void mono_llvm_set_must_tail (LLVMValueRef call_ins) { CallInst *ins = (CallInst*)unwrap (call_ins); ins->setTailCallKind (CallInst::TailCallKind::TCK_MustTail); }
// Calls to setjmp(p) are lowered to _setjmp3(p, 0) by the frontend. // The idea behind _setjmp3 is that it takes an optional number of personality // specific parameters to indicate how to restore the personality-specific frame // state when longjmp is initiated. Typically, the current TryLevel is saved. void WinEHStatePass::rewriteSetJmpCallSite(IRBuilder<> &Builder, Function &F, CallSite CS, Value *State) { // Don't rewrite calls with a weird number of arguments. if (CS.getNumArgOperands() != 2) return; Instruction *Inst = CS.getInstruction(); SmallVector<OperandBundleDef, 1> OpBundles; CS.getOperandBundlesAsDefs(OpBundles); SmallVector<Value *, 3> OptionalArgs; if (Personality == EHPersonality::MSVC_CXX) { OptionalArgs.push_back(CxxLongjmpUnwind); OptionalArgs.push_back(State); OptionalArgs.push_back(emitEHLSDA(Builder, &F)); } else if (Personality == EHPersonality::MSVC_X86SEH) { OptionalArgs.push_back(SehLongjmpUnwind); OptionalArgs.push_back(State); if (UseStackGuard) OptionalArgs.push_back(Cookie); } else { llvm_unreachable("unhandled personality!"); } SmallVector<Value *, 5> Args; Args.push_back( Builder.CreateBitCast(CS.getArgOperand(0), Builder.getInt8PtrTy())); Args.push_back(Builder.getInt32(OptionalArgs.size())); Args.append(OptionalArgs.begin(), OptionalArgs.end()); CallSite NewCS; if (CS.isCall()) { auto *CI = cast<CallInst>(Inst); CallInst *NewCI = Builder.CreateCall(SetJmp3, Args, OpBundles); NewCI->setTailCallKind(CI->getTailCallKind()); NewCS = NewCI; } else { auto *II = cast<InvokeInst>(Inst); NewCS = Builder.CreateInvoke( SetJmp3, II->getNormalDest(), II->getUnwindDest(), Args, OpBundles); } NewCS.setCallingConv(CS.getCallingConv()); NewCS.setAttributes(CS.getAttributes()); NewCS->setDebugLoc(CS->getDebugLoc()); Instruction *NewInst = NewCS.getInstruction(); NewInst->takeName(Inst); Inst->replaceAllUsesWith(NewInst); Inst->eraseFromParent(); }
Value *ABICallSignature::emitCall(GenIR &Reader, Value *Target, bool MayThrow, ArrayRef<Value *> Args, Value *IndirectionCell, bool IsJmp, Value **CallNode) const { assert(Target->getType()->isIntegerTy(Reader.TargetPointerSizeInBits)); LLVMContext &Context = *Reader.JitContext->LLVMContext; // Compute the function type bool HasIndirectResult = Result.getKind() == ABIArgInfo::Indirect; bool HasIndirectionCell = IndirectionCell != nullptr; bool IsUnmanagedCall = Signature.getCallingConvention() != CORINFO_CALLCONV_DEFAULT; bool CallerHasSecretParameter = Reader.MethodSignature.hasSecretParameter(); bool IsJmpWithSecretParam = IsJmp && CallerHasSecretParameter; assert(((HasIndirectionCell ? 1 : 0) + (IsUnmanagedCall ? 1 : 0) + (IsJmpWithSecretParam ? 1 : 0)) <= 1); uint32_t NumSpecialArgs = 0; if (HasIndirectionCell || IsJmpWithSecretParam) { NumSpecialArgs = 1; } uint32_t NumExtraArgs = (HasIndirectResult ? 1 : 0) + NumSpecialArgs; const uint32_t NumArgs = Args.size() + NumExtraArgs; Value *ResultNode = nullptr; SmallVector<Type *, 16> ArgumentTypes(NumArgs); SmallVector<Value *, 16> Arguments(NumArgs); SmallVector<AttributeSet, 16> Attrs(NumArgs + 1); IRBuilder<> &Builder = *Reader.LLVMBuilder; // Check for calls with special args. // // Any special arguments are passed immediately preceeding the normal // arguments. The backend will place these arguments in the appropriate // registers according to the calling convention. Each special argument should // be machine-word-sized. if (HasIndirectionCell) { assert(IndirectionCell->getType()->isIntegerTy( Reader.TargetPointerSizeInBits)); ArgumentTypes[0] = IndirectionCell->getType(); Arguments[0] = IndirectionCell; } else if (IsJmpWithSecretParam) { Arguments[0] = Reader.secretParam(); ArgumentTypes[0] = Arguments[0]->getType(); } int32_t ResultIndex = -1; if (HasIndirectResult) { ResultIndex = (int32_t)NumSpecialArgs + (Signature.hasThis() ? 1 : 0); Type *ResultTy = Result.getType(); // Jmp target signature has to match the caller's signature. Since we type // the caller's indirect result parameters as managed pointers, jmp target's // indirect result parameters also have to be typed as managed pointers. ArgumentTypes[ResultIndex] = IsJmp ? Reader.getManagedPointerType(ResultTy) : Reader.getUnmanagedPointerType(ResultTy); if (IsJmp) { // When processing jmp, pass the pointer that we got from the caller // rather than a pointer to a copy in the current frame. Arguments[ResultIndex] = ResultNode = Reader.IndirectResult; } else { Arguments[ResultIndex] = ResultNode = Reader.createTemporary(ResultTy); } if (ResultTy->isStructTy()) { Reader.setValueRepresentsStruct(ResultNode); } } else { AttrBuilder RetAttrs; if (Result.getKind() == ABIArgInfo::ZeroExtend) { RetAttrs.addAttribute(Attribute::ZExt); } else if (Result.getKind() == ABIArgInfo::SignExtend) { RetAttrs.addAttribute(Attribute::SExt); } if (RetAttrs.hasAttributes()) { Attrs.push_back( AttributeSet::get(Context, AttributeSet::ReturnIndex, RetAttrs)); } } uint32_t I = NumSpecialArgs, J = 0; for (auto Arg : Args) { AttrBuilder ArgAttrs; if (ResultIndex >= 0 && I == (uint32_t)ResultIndex) { I++; } const ABIArgInfo &ArgInfo = this->Args[J]; Type *ArgType = Arg->getType(); if (ArgInfo.getKind() == ABIArgInfo::Indirect) { // TODO: byval attribute support if (IsJmp) { // When processing jmp pass the pointer that we got from the caller // rather than a pointer to a copy in the current frame. Arguments[I] = Arg; ArgumentTypes[I] = ArgType; } else { Value *Temp = nullptr; if (Reader.doesValueRepresentStruct(Arg)) { StructType *ArgStructTy = cast<StructType>(ArgType->getPointerElementType()); ArgumentTypes[I] = ArgType; Temp = Reader.createTemporary(ArgStructTy); const bool IsVolatile = false; Reader.copyStruct(ArgStructTy, Temp, Arg, IsVolatile); } else { ArgumentTypes[I] = ArgType->getPointerTo(); Temp = Reader.createTemporary(ArgType); Builder.CreateStore(Arg, Temp); } Arguments[I] = Temp; } } else { ArgumentTypes[I] = ArgInfo.getType(); Arguments[I] = coerce(Reader, ArgInfo.getType(), Arg); if (ArgInfo.getKind() == ABIArgInfo::ZeroExtend) { ArgAttrs.addAttribute(Attribute::ZExt); } else if (ArgInfo.getKind() == ABIArgInfo::SignExtend) { ArgAttrs.addAttribute(Attribute::SExt); } if (ArgAttrs.hasAttributes()) { const unsigned Idx = I + 1; // Add one to accomodate the return attrs. Attrs.push_back(AttributeSet::get(Context, Idx, ArgAttrs)); } } I++, J++; } const bool IsVarArg = false; Type *FunctionTy = FunctionType::get(FuncResultType, ArgumentTypes, IsVarArg); Type *FunctionPtrTy = Reader.getUnmanagedPointerType(FunctionTy); Target = Builder.CreateIntToPtr(Target, FunctionPtrTy); // The most straightforward way to satisfy the constraints imposed by the GC // on threads that are executing unmanaged code is to make the transition to // and from unmanaged code immediately preceeding and following the machine // call instruction, respectively. Unfortunately, there is no way to express // this in "standard" LLVM IR, hence the intrinsic. This intrinsic is also // a special GC statepoint that forces any GC pointers in callee-saved // registers to be spilled to the stack. CallSite Call; Value *UnmanagedCallResult = nullptr; if (IsUnmanagedCall) { Call = emitUnmanagedCall(Reader, Target, MayThrow, Arguments, UnmanagedCallResult); } else { Call = Reader.makeCall(Target, MayThrow, Arguments); } CallingConv::ID CC; if (HasIndirectionCell) { assert(Signature.getCallingConvention() == CORINFO_CALLCONV_DEFAULT); CC = CallingConv::CLR_VirtualDispatchStub; } else if (IsJmpWithSecretParam) { assert(Signature.getCallingConvention() == CORINFO_CALLCONV_DEFAULT); CC = CallingConv::CLR_SecretParameter; } else { bool Unused; CC = getLLVMCallingConv(getNormalizedCallingConvention(Signature), Unused); } Call.setCallingConv(CC); if (Attrs.size() > 0) { Call.setAttributes(AttributeSet::get(Context, Attrs)); } if (ResultNode == nullptr) { assert(!HasIndirectResult); const CallArgType &SigResultType = Signature.getResultType(); Type *Ty = Reader.getType(SigResultType.CorType, SigResultType.Class); if (!Ty->isVoidTy()) { ResultNode = coerce(Reader, Ty, IsUnmanagedCall ? UnmanagedCallResult : Call.getInstruction()); } else { ResultNode = Call.getInstruction(); } } else { if (!Reader.doesValueRepresentStruct(ResultNode)) { ResultNode = Builder.CreateLoad(ResultNode); } } *CallNode = Call.getInstruction(); if (IsJmp) { CallInst *TheCallInst = cast<CallInst>(*CallNode); TheCallInst->setTailCallKind(CallInst::TailCallKind::TCK_MustTail); } return ResultNode; }