bool IRTranslator::translateCall(const CallInst &CI) { auto TII = MIRBuilder.getMF().getTarget().getIntrinsicInfo(); const Function &F = *CI.getCalledFunction(); Intrinsic::ID ID = F.getIntrinsicID(); if (TII && ID == Intrinsic::not_intrinsic) ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(&F)); assert(ID != Intrinsic::not_intrinsic && "FIXME: support real calls"); // Need types (starting with return) & args. SmallVector<LLT, 4> Tys; Tys.emplace_back(*CI.getType()); for (auto &Arg : CI.arg_operands()) Tys.emplace_back(*Arg->getType()); unsigned Res = CI.getType()->isVoidTy() ? 0 : getOrCreateVReg(CI); MachineInstrBuilder MIB = MIRBuilder.buildIntrinsic(Tys, ID, Res, !CI.doesNotAccessMemory()); for (auto &Arg : CI.arg_operands()) { if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg)) MIB.addImm(CI->getSExtValue()); else MIB.addUse(getOrCreateVReg(*Arg)); } return true; }
bool CallLowering::lowerCall( MachineIRBuilder &MIRBuilder, const CallInst &CI, unsigned ResReg, ArrayRef<unsigned> ArgRegs, std::function<unsigned()> GetCalleeReg) const { auto &DL = CI.getParent()->getParent()->getParent()->getDataLayout(); // First step is to marshall all the function's parameters into the correct // physregs and memory locations. Gather the sequence of argument types that // we'll pass to the assigner function. SmallVector<ArgInfo, 8> OrigArgs; unsigned i = 0; for (auto &Arg : CI.arg_operands()) { ArgInfo OrigArg{ArgRegs[i], Arg->getType(), ISD::ArgFlagsTy{}}; setArgFlags(OrigArg, i + 1, DL, CI); OrigArgs.push_back(OrigArg); ++i; } MachineOperand Callee = MachineOperand::CreateImm(0); if (Function *F = CI.getCalledFunction()) Callee = MachineOperand::CreateGA(F, 0); else Callee = MachineOperand::CreateReg(GetCalleeReg(), false); ArgInfo OrigRet{ResReg, CI.getType(), ISD::ArgFlagsTy{}}; if (!OrigRet.Ty->isVoidTy()) setArgFlags(OrigRet, AttributeSet::ReturnIndex, DL, CI); return lowerCall(MIRBuilder, Callee, OrigRet, OrigArgs); }
void GCInvariantVerifier::visitCallInst(CallInst &CI) { CallingConv::ID CC = CI.getCallingConv(); if (CC == JLCALL_CC || CC == JLCALL_F_CC) { for (Value *Arg : CI.arg_operands()) { Type *Ty = Arg->getType(); Check(Ty->isPointerTy() && cast<PointerType>(Ty)->getAddressSpace() == AddressSpace::Tracked, "Invalid derived pointer in jlcall", &CI); } } }
static bool markTails(Function &F, bool &AllCallsAreTailCalls) { if (F.callsFunctionThatReturnsTwice()) return false; AllCallsAreTailCalls = true; // The local stack holds all alloca instructions and all byval arguments. AllocaDerivedValueTracker Tracker; for (Argument &Arg : F.args()) { if (Arg.hasByValAttr()) Tracker.walk(&Arg); } for (auto &BB : F) { for (auto &I : BB) if (AllocaInst *AI = dyn_cast<AllocaInst>(&I)) Tracker.walk(AI); } bool Modified = false; // Track whether a block is reachable after an alloca has escaped. Blocks that // contain the escaping instruction will be marked as being visited without an // escaped alloca, since that is how the block began. enum VisitType { UNVISITED, UNESCAPED, ESCAPED }; DenseMap<BasicBlock *, VisitType> Visited; // We propagate the fact that an alloca has escaped from block to successor. // Visit the blocks that are propagating the escapedness first. To do this, we // maintain two worklists. SmallVector<BasicBlock *, 32> WorklistUnescaped, WorklistEscaped; // We may enter a block and visit it thinking that no alloca has escaped yet, // then see an escape point and go back around a loop edge and come back to // the same block twice. Because of this, we defer setting tail on calls when // we first encounter them in a block. Every entry in this list does not // statically use an alloca via use-def chain analysis, but may find an alloca // through other means if the block turns out to be reachable after an escape // point. SmallVector<CallInst *, 32> DeferredTails; BasicBlock *BB = &F.getEntryBlock(); VisitType Escaped = UNESCAPED; do { for (auto &I : *BB) { if (Tracker.EscapePoints.count(&I)) Escaped = ESCAPED; CallInst *CI = dyn_cast<CallInst>(&I); if (!CI || CI->isTailCall()) continue; bool IsNoTail = CI->isNoTailCall() || CI->hasOperandBundles(); if (!IsNoTail && CI->doesNotAccessMemory()) { // A call to a readnone function whose arguments are all things computed // outside this function can be marked tail. Even if you stored the // alloca address into a global, a readnone function can't load the // global anyhow. // // Note that this runs whether we know an alloca has escaped or not. If // it has, then we can't trust Tracker.AllocaUsers to be accurate. bool SafeToTail = true; for (auto &Arg : CI->arg_operands()) { if (isa<Constant>(Arg.getUser())) continue; if (Argument *A = dyn_cast<Argument>(Arg.getUser())) if (!A->hasByValAttr()) continue; SafeToTail = false; break; } if (SafeToTail) { emitOptimizationRemark( F.getContext(), "tailcallelim", F, CI->getDebugLoc(), "marked this readnone call a tail call candidate"); CI->setTailCall(); Modified = true; continue; } } if (!IsNoTail && Escaped == UNESCAPED && !Tracker.AllocaUsers.count(CI)) { DeferredTails.push_back(CI); } else { AllCallsAreTailCalls = false; } } for (auto *SuccBB : make_range(succ_begin(BB), succ_end(BB))) { auto &State = Visited[SuccBB]; if (State < Escaped) { State = Escaped; if (State == ESCAPED) WorklistEscaped.push_back(SuccBB); else WorklistUnescaped.push_back(SuccBB); } } if (!WorklistEscaped.empty()) { BB = WorklistEscaped.pop_back_val(); Escaped = ESCAPED; } else { BB = nullptr; while (!WorklistUnescaped.empty()) { auto *NextBB = WorklistUnescaped.pop_back_val(); if (Visited[NextBB] == UNESCAPED) { BB = NextBB; Escaped = UNESCAPED; break; } } } } while (BB); for (CallInst *CI : DeferredTails) { if (Visited[CI->getParent()] != ESCAPED) { // If the escape point was part way through the block, calls after the // escape point wouldn't have been put into DeferredTails. emitOptimizationRemark(F.getContext(), "tailcallelim", F, CI->getDebugLoc(), "marked this call a tail call candidate"); CI->setTailCall(); Modified = true; } else { AllCallsAreTailCalls = false; } } return Modified; }