void llvm::PointerMayBeCaptured(const Value *V, CaptureTracker *Tracker) { assert(V->getType()->isPointerTy() && "Capture is for pointers only!"); SmallVector<Use*, Threshold> Worklist; SmallSet<Use*, Threshold> Visited; int Count = 0; for (Value::const_use_iterator UI = V->use_begin(), UE = V->use_end(); UI != UE; ++UI) { // If there are lots of uses, conservatively say that the value // is captured to avoid taking too much compile time. if (Count++ >= Threshold) return Tracker->tooManyUses(); Use *U = &UI.getUse(); if (!Tracker->shouldExplore(U)) continue; Visited.insert(U); Worklist.push_back(U); } while (!Worklist.empty()) { Use *U = Worklist.pop_back_val(); Instruction *I = cast<Instruction>(U->getUser()); V = U->get(); switch (I->getOpcode()) { case Instruction::Call: case Instruction::Invoke: { CallSite CS(I); // Not captured if the callee is readonly, doesn't return a copy through // its return value and doesn't unwind (a readonly function can leak bits // by throwing an exception or not depending on the input value). if (CS.onlyReadsMemory() && CS.doesNotThrow() && I->getType()->isVoidTy()) break; // Not captured if only passed via 'nocapture' arguments. Note that // calling a function pointer does not in itself cause the pointer to // be captured. This is a subtle point considering that (for example) // the callee might return its own address. It is analogous to saying // that loading a value from a pointer does not cause the pointer to be // captured, even though the loaded value might be the pointer itself // (think of self-referential objects). CallSite::arg_iterator B = CS.arg_begin(), E = CS.arg_end(); for (CallSite::arg_iterator A = B; A != E; ++A) if (A->get() == V && !CS.doesNotCapture(A - B)) // The parameter is not marked 'nocapture' - captured. if (Tracker->captured(U)) return; break; } case Instruction::Load: // Loading from a pointer does not cause it to be captured. break; case Instruction::VAArg: // "va-arg" from a pointer does not cause it to be captured. break; case Instruction::Store: if (V == I->getOperand(0)) // Stored the pointer - conservatively assume it may be captured. if (Tracker->captured(U)) return; // Storing to the pointee does not cause the pointer to be captured. break; case Instruction::BitCast: case Instruction::GetElementPtr: case Instruction::PHI: case Instruction::Select: // The original value is not captured via this if the new value isn't. for (Instruction::use_iterator UI = I->use_begin(), UE = I->use_end(); UI != UE; ++UI) { Use *U = &UI.getUse(); if (Visited.insert(U)) if (Tracker->shouldExplore(U)) Worklist.push_back(U); } break; case Instruction::ICmp: // Don't count comparisons of a no-alias return value against null as // captures. This allows us to ignore comparisons of malloc results // with null, for example. if (ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(I->getOperand(1))) if (CPN->getType()->getAddressSpace() == 0) if (isNoAliasCall(V->stripPointerCastsSafe())) break; // Otherwise, be conservative. There are crazy ways to capture pointers // using comparisons. if (Tracker->captured(U)) return; break; default: // Something else - be conservative and say it is captured. if (Tracker->captured(U)) return; break; } } // All uses examined. }
bool ObjCARCContract::runOnFunction(Function &F) { if (!EnableARCOpts) return false; // If nothing in the Module uses ARC, don't do anything. if (!Run) return false; Changed = false; AA = &getAnalysis<AliasAnalysis>(); DT = &getAnalysis<DominatorTree>(); PA.setAA(&getAnalysis<AliasAnalysis>()); // Track whether it's ok to mark objc_storeStrong calls with the "tail" // keyword. Be conservative if the function has variadic arguments. // It seems that functions which "return twice" are also unsafe for the // "tail" argument, because they are setjmp, which could need to // return to an earlier stack state. bool TailOkForStoreStrongs = !F.isVarArg() && !F.callsFunctionThatReturnsTwice(); // For ObjC library calls which return their argument, replace uses of the // argument with uses of the call return value, if it dominates the use. This // reduces register pressure. SmallPtrSet<Instruction *, 4> DependingInstructions; SmallPtrSet<const BasicBlock *, 4> Visited; for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) { Instruction *Inst = &*I++; DEBUG(dbgs() << "ObjCARCContract: Visiting: " << *Inst << "\n"); // Only these library routines return their argument. In particular, // objc_retainBlock does not necessarily return its argument. InstructionClass Class = GetBasicInstructionClass(Inst); switch (Class) { case IC_Retain: case IC_FusedRetainAutorelease: case IC_FusedRetainAutoreleaseRV: break; case IC_Autorelease: case IC_AutoreleaseRV: if (ContractAutorelease(F, Inst, Class, DependingInstructions, Visited)) continue; break; case IC_RetainRV: { // If we're compiling for a target which needs a special inline-asm // marker to do the retainAutoreleasedReturnValue optimization, // insert it now. if (!RetainRVMarker) break; BasicBlock::iterator BBI = Inst; BasicBlock *InstParent = Inst->getParent(); // Step up to see if the call immediately precedes the RetainRV call. // If it's an invoke, we have to cross a block boundary. And we have // to carefully dodge no-op instructions. do { if (&*BBI == InstParent->begin()) { BasicBlock *Pred = InstParent->getSinglePredecessor(); if (!Pred) goto decline_rv_optimization; BBI = Pred->getTerminator(); break; } --BBI; } while (IsNoopInstruction(BBI)); if (&*BBI == GetObjCArg(Inst)) { DEBUG(dbgs() << "ObjCARCContract: Adding inline asm marker for " "retainAutoreleasedReturnValue optimization.\n"); Changed = true; InlineAsm *IA = InlineAsm::get(FunctionType::get(Type::getVoidTy(Inst->getContext()), /*isVarArg=*/false), RetainRVMarker->getString(), /*Constraints=*/"", /*hasSideEffects=*/true); CallInst::Create(IA, "", Inst); } decline_rv_optimization: break; } case IC_InitWeak: { // objc_initWeak(p, null) => *p = null CallInst *CI = cast<CallInst>(Inst); if (IsNullOrUndef(CI->getArgOperand(1))) { Value *Null = ConstantPointerNull::get(cast<PointerType>(CI->getType())); Changed = true; new StoreInst(Null, CI->getArgOperand(0), CI); DEBUG(dbgs() << "OBJCARCContract: Old = " << *CI << "\n" << " New = " << *Null << "\n"); CI->replaceAllUsesWith(Null); CI->eraseFromParent(); } continue; } case IC_Release: ContractRelease(Inst, I); continue; case IC_User: // Be conservative if the function has any alloca instructions. // Technically we only care about escaping alloca instructions, // but this is sufficient to handle some interesting cases. if (isa<AllocaInst>(Inst)) TailOkForStoreStrongs = false; continue; case IC_IntrinsicUser: // Remove calls to @clang.arc.use(...). Inst->eraseFromParent(); continue; default: continue; } DEBUG(dbgs() << "ObjCARCContract: Finished List.\n\n"); // Don't use GetObjCArg because we don't want to look through bitcasts // and such; to do the replacement, the argument must have type i8*. const Value *Arg = cast<CallInst>(Inst)->getArgOperand(0); for (;;) { // If we're compiling bugpointed code, don't get in trouble. if (!isa<Instruction>(Arg) && !isa<Argument>(Arg)) break; // Look through the uses of the pointer. for (Value::const_use_iterator UI = Arg->use_begin(), UE = Arg->use_end(); UI != UE; ) { Use &U = UI.getUse(); unsigned OperandNo = UI.getOperandNo(); ++UI; // Increment UI now, because we may unlink its element. // If the call's return value dominates a use of the call's argument // value, rewrite the use to use the return value. We check for // reachability here because an unreachable call is considered to // trivially dominate itself, which would lead us to rewriting its // argument in terms of its return value, which would lead to // infinite loops in GetObjCArg. if (DT->isReachableFromEntry(U) && DT->dominates(Inst, U)) { Changed = true; Instruction *Replacement = Inst; Type *UseTy = U.get()->getType(); if (PHINode *PHI = dyn_cast<PHINode>(U.getUser())) { // For PHI nodes, insert the bitcast in the predecessor block. unsigned ValNo = PHINode::getIncomingValueNumForOperand(OperandNo); BasicBlock *BB = PHI->getIncomingBlock(ValNo); if (Replacement->getType() != UseTy) Replacement = new BitCastInst(Replacement, UseTy, "", &BB->back()); // While we're here, rewrite all edges for this PHI, rather // than just one use at a time, to minimize the number of // bitcasts we emit. for (unsigned i = 0, e = PHI->getNumIncomingValues(); i != e; ++i) if (PHI->getIncomingBlock(i) == BB) { // Keep the UI iterator valid. if (&PHI->getOperandUse( PHINode::getOperandNumForIncomingValue(i)) == &UI.getUse()) ++UI; PHI->setIncomingValue(i, Replacement); } } else { if (Replacement->getType() != UseTy) Replacement = new BitCastInst(Replacement, UseTy, "", cast<Instruction>(U.getUser())); U.set(Replacement); } } } // If Arg is a no-op casted pointer, strip one level of casts and iterate. if (const BitCastInst *BI = dyn_cast<BitCastInst>(Arg)) Arg = BI->getOperand(0); else if (isa<GEPOperator>(Arg) && cast<GEPOperator>(Arg)->hasAllZeroIndices()) Arg = cast<GEPOperator>(Arg)->getPointerOperand(); else if (isa<GlobalAlias>(Arg) && !cast<GlobalAlias>(Arg)->mayBeOverridden()) Arg = cast<GlobalAlias>(Arg)->getAliasee(); else break; } } // If this function has no escaping allocas or suspicious vararg usage, // objc_storeStrong calls can be marked with the "tail" keyword. if (TailOkForStoreStrongs) for (SmallPtrSet<CallInst *, 8>::iterator I = StoreStrongCalls.begin(), E = StoreStrongCalls.end(); I != E; ++I) (*I)->setTailCall(); StoreStrongCalls.clear(); return Changed; }