SILType SILType::wrapAnyOptionalType(SILFunction &F) const { SILModule &M = F.getModule(); EnumDecl *OptionalDecl = M.getASTContext().getOptionalDecl(OTK_Optional); BoundGenericType *BoundEnumDecl = BoundGenericType::get(OptionalDecl, Type(), {getSwiftRValueType()}); AbstractionPattern Pattern(F.getLoweredFunctionType()->getGenericSignature(), BoundEnumDecl->getCanonicalType()); return M.Types.getLoweredType(Pattern, BoundEnumDecl); }
// Returns true if the callee contains a partial apply instruction, // whose substitutions list would contain opened existentials after // inlining. static bool calleeHasPartialApplyWithOpenedExistentials(FullApplySite AI) { if (!AI.hasSubstitutions()) return false; SILFunction *Callee = AI.getReferencedFunction(); auto Subs = AI.getSubstitutions(); // Bail if there are no open existentials in the list of substitutions. bool HasNoOpenedExistentials = true; for (auto Sub : Subs) { if (Sub.getReplacement()->hasOpenedExistential()) { HasNoOpenedExistentials = false; break; } } if (HasNoOpenedExistentials) return false; auto SubsMap = Callee->getLoweredFunctionType() ->getGenericSignature()->getSubstitutionMap(Subs); for (auto &BB : *Callee) { for (auto &I : BB) { if (auto PAI = dyn_cast<PartialApplyInst>(&I)) { auto PAISubs = PAI->getSubstitutions(); if (PAISubs.empty()) continue; // Check if any of substitutions would contain open existentials // after inlining. auto PAISubMap = PAI->getOrigCalleeType() ->getGenericSignature()->getSubstitutionMap(PAISubs); PAISubMap = PAISubMap.subst(SubsMap); if (PAISubMap.hasOpenedExistential()) return true; } } } return false; }
CanSILFunctionType FunctionSignatureTransform::createOptimizedSILFunctionType() { CanSILFunctionType FTy = F->getLoweredFunctionType(); // The only way that we modify the arity of function parameters is here for // dead arguments. Doing anything else is unsafe since by definition non-dead // arguments will have SSA uses in the function. We would need to be smarter // in our moving to handle such cases. llvm::SmallVector<SILParameterInfo, 8> InterfaceParams; for (auto &ArgDesc : ArgumentDescList) { computeOptimizedArgInterface(ArgDesc, InterfaceParams); } // ResultDescs only covers the direct results; we currently can't ever // change an indirect result. Piece the modified direct result information // back into the all-results list. llvm::SmallVector<SILResultInfo, 8> InterfaceResults; auto &ResultDescs = ResultDescList; for (SILResultInfo InterfaceResult : FTy->getResults()) { if (InterfaceResult.isFormalDirect()) { auto &RV = ResultDescs[0]; if (!RV.CalleeRetain.empty()) { ++NumOwnedConvertedToNotOwnedResult; InterfaceResults.push_back(SILResultInfo(InterfaceResult.getType(), ResultConvention::Unowned)); continue; } } InterfaceResults.push_back(InterfaceResult); } // Don't use a method representation if we modified self. auto ExtInfo = FTy->getExtInfo(); if (shouldModifySelfArgument) { ExtInfo = ExtInfo.withRepresentation(SILFunctionTypeRepresentation::Thin); } return SILFunctionType::get(FTy->getGenericSignature(), ExtInfo, FTy->getCalleeConvention(), InterfaceParams, InterfaceResults, FTy->getOptionalErrorResult(), F->getModule().getASTContext()); }
bool FunctionSignatureTransform::OwnedToGuaranteedAnalyzeResults() { auto FTy = F->getLoweredFunctionType(); // For now, only do anything if there's a single direct result. if (FTy->getDirectResults().size() != 1) return false; bool SignatureOptimize = false; if (ResultDescList[0].hasConvention(ResultConvention::Owned)) { auto &RI = ResultDescList[0]; // We have an @owned return value, find the epilogue retains now. ConsumedResultToEpilogueRetainMatcher ReturnRetainMap(RCIA->get(F), AA, F); auto Retains = ReturnRetainMap.getEpilogueRetains(); // We do not need to worry about the throw block, as the return value is only // going to be used in the return block/normal block of the try_apply // instruction. if (!Retains.empty()) { RI.CalleeRetain = Retains; SignatureOptimize = true; RI.OwnedToGuaranteed = true; } } return SignatureOptimize; }
/// \brief We only know how to simulate reference call effects for unary /// function calls that take their argument @owned or @guaranteed and return an /// @owned value. static bool knowHowToEmitReferenceCountInsts(ApplyInst *Call) { if (Call->getNumArguments() != 1) return false; FunctionRefInst *FRI = cast<FunctionRefInst>(Call->getCallee()); SILFunction *F = FRI->getReferencedFunction(); auto FnTy = F->getLoweredFunctionType(); // Look at the result type. if (FnTy->getNumAllResults() != 1) return false; auto ResultInfo = FnTy->getAllResults()[0]; if (ResultInfo.getConvention() != ResultConvention::Owned) return false; // Look at the parameter. auto Params = FnTy->getParameters(); (void) Params; assert(Params.size() == 1 && "Expect one parameter"); auto ParamConv = FnTy->getParameters()[0].getConvention(); return ParamConv == ParameterConvention::Direct_Owned || ParamConv == ParameterConvention::Direct_Guaranteed; }
CanSILFunctionType FunctionSignatureTransformDescriptor::createOptimizedSILFunctionType() { SILFunction *F = OriginalFunction; CanSILFunctionType FTy = F->getLoweredFunctionType(); auto ExpectedFTy = F->getLoweredType().castTo<SILFunctionType>(); auto HasGenericSignature = FTy->getGenericSignature() != nullptr; // The only way that we modify the arity of function parameters is here for // dead arguments. Doing anything else is unsafe since by definition non-dead // arguments will have SSA uses in the function. We would need to be smarter // in our moving to handle such cases. llvm::SmallVector<SILParameterInfo, 8> InterfaceParams; for (auto &ArgDesc : ArgumentDescList) { computeOptimizedArgInterface(ArgDesc, InterfaceParams); } // ResultDescs only covers the direct results; we currently can't ever // change an indirect result. Piece the modified direct result information // back into the all-results list. llvm::SmallVector<SILResultInfo, 8> InterfaceResults; for (SILResultInfo InterfaceResult : FTy->getResults()) { if (InterfaceResult.isFormalDirect()) { auto &RV = ResultDescList[0]; if (!RV.CalleeRetain.empty()) { ++NumOwnedConvertedToNotOwnedResult; InterfaceResults.push_back(SILResultInfo(InterfaceResult.getType(), ResultConvention::Unowned)); continue; } } InterfaceResults.push_back(InterfaceResult); } llvm::SmallVector<SILYieldInfo, 8> InterfaceYields; for (SILYieldInfo InterfaceYield : FTy->getYields()) { // For now, don't touch the yield types. InterfaceYields.push_back(InterfaceYield); } bool UsesGenerics = false; if (HasGenericSignature) { // Not all of the generic type parameters are used by the function // parameters. // Check which of the generic type parameters are not used and check if they // are used anywhere in the function body. If this is not the case, we can // remove the unused generic type parameters from the generic signature. // This makes the code both smaller and faster, because no implicit // parameters for type metadata and conformances need to be passed to the // callee at the LLVM IR level. // TODO: Implement a more precise analysis, so that we can eliminate only // those generic parameters which are not used. UsesGenerics = usesGenerics(F, InterfaceParams, InterfaceResults); // The set of used archetypes is complete now. if (!UsesGenerics) { // None of the generic type parameters are used. LLVM_DEBUG(llvm::dbgs() << "None of generic parameters are used by " << F->getName() << "\n"; llvm::dbgs() << "Interface params:\n"; for (auto Param : InterfaceParams) { Param.getType().dump(); } llvm::dbgs() << "Interface results:\n"; for (auto Result : InterfaceResults) { Result.getType().dump(); }); }
/// \brief Make sure that all parameters are passed with a reference count /// neutral parameter convention except for self. bool swift::ArraySemanticsCall::isValidSignature() { assert(SemanticsCall && getKind() != ArrayCallKind::kNone && "Need an array semantic call"); FunctionRefInst *FRI = cast<FunctionRefInst>(SemanticsCall->getCallee()); SILFunction *F = FRI->getReferencedFunction(); auto FnTy = F->getLoweredFunctionType(); auto &Mod = F->getModule(); // Check whether we have a valid signature for semantic calls that we hoist. switch (getKind()) { // All other calls can be consider valid. default: break; case ArrayCallKind::kArrayPropsIsNativeTypeChecked: { // @guaranteed/@owned Self if (SemanticsCall->getNumArguments() != 1) return false; auto SelfConvention = FnTy->getSelfParameter().getConvention(); return SelfConvention == ParameterConvention::Direct_Guaranteed || SelfConvention == ParameterConvention::Direct_Owned; } case ArrayCallKind::kCheckIndex: { // Int, @guaranteed/@owned Self if (SemanticsCall->getNumArguments() != 2 || !SemanticsCall->getArgument(0)->getType().isTrivial(Mod)) return false; auto SelfConvention = FnTy->getSelfParameter().getConvention(); return SelfConvention == ParameterConvention::Direct_Guaranteed || SelfConvention == ParameterConvention::Direct_Owned; } case ArrayCallKind::kCheckSubscript: { // Int, Bool, Self if (SemanticsCall->getNumArguments() != 3 || !SemanticsCall->getArgument(0)->getType().isTrivial(Mod)) return false; if (!SemanticsCall->getArgument(1)->getType().isTrivial(Mod)) return false; auto SelfConvention = FnTy->getSelfParameter().getConvention(); return SelfConvention == ParameterConvention::Direct_Guaranteed || SelfConvention == ParameterConvention::Direct_Owned; } case ArrayCallKind::kMakeMutable: { auto SelfConvention = FnTy->getSelfParameter().getConvention(); return SelfConvention == ParameterConvention::Indirect_Inout; } case ArrayCallKind::kArrayUninitialized: { // Make sure that if we are a _adoptStorage call that our storage is // uniquely referenced by us. SILValue Arg0 = SemanticsCall->getArgument(0); if (Arg0->getType().isExistentialType()) { auto *AllocBufferAI = dyn_cast<ApplyInst>(Arg0); if (!AllocBufferAI) return false; auto *AllocFn = AllocBufferAI->getReferencedFunction(); if (!AllocFn || AllocFn->getName() != "swift_bufferAllocate" || !hasOneNonDebugUse(AllocBufferAI)) return false; } return true; } case ArrayCallKind::kWithUnsafeMutableBufferPointer: { SILFunctionConventions origConv(SemanticsCall->getOrigCalleeType(), Mod); if (origConv.getNumIndirectSILResults() != 1 || SemanticsCall->getNumArguments() != 3) return false; auto SelfConvention = FnTy->getSelfParameter().getConvention(); return SelfConvention == ParameterConvention::Indirect_Inout; } } return true; }
void MaterializeForSetEmitter::emit(SILGenFunction &gen, ManagedValue self, SILValue resultBuffer, SILValue callbackBuffer, ArrayRef<ManagedValue> indices) { SILLocation loc = Witness; loc.markAutoGenerated(); // If there's an abstraction difference, we always need to use the // get/set pattern. AccessStrategy strategy; if (WitnessStorage->getType()->is<ReferenceStorageType>() || (Conformance && RequirementStorageType != WitnessStorageType)) { strategy = AccessStrategy::DispatchToAccessor; } else { strategy = WitnessStorage->getAccessStrategy(TheAccessSemantics, AccessKind::ReadWrite); } // Handle the indices. RValue indicesRV; if (isa<SubscriptDecl>(WitnessStorage)) { indicesRV = collectIndicesFromParameters(gen, loc, indices); } else { assert(indices.empty() && "indices for a non-subscript?"); } // As above, assume that we don't need to reabstract 'self'. // Choose the right implementation. SILValue address; SILFunction *callbackFn = nullptr; switch (strategy) { case AccessStrategy::Storage: address = emitUsingStorage(gen, loc, self, std::move(indicesRV)); break; case AccessStrategy::Addressor: address = emitUsingAddressor(gen, loc, self, std::move(indicesRV), callbackBuffer, callbackFn); break; case AccessStrategy::DirectToAccessor: case AccessStrategy::DispatchToAccessor: address = emitUsingGetterSetter(gen, loc, self, std::move(indicesRV), resultBuffer, callbackBuffer, callbackFn); break; } // Return the address as a Builtin.RawPointer. SILType rawPointerTy = SILType::getRawPointerType(gen.getASTContext()); address = gen.B.createAddressToPointer(loc, address, rawPointerTy); SILType resultTupleTy = gen.F.mapTypeIntoContext( gen.F.getLoweredFunctionType()->getResult().getSILType()); SILType optCallbackTy = resultTupleTy.getTupleElementType(1); // Form the callback. SILValue callback; if (callbackFn) { // Make a reference to the function. callback = gen.B.createFunctionRef(loc, callbackFn); // If it's polymorphic, cast to RawPointer and then back to the // right monomorphic type. The safety of this cast relies on some // assumptions about what exactly IRGen can reconstruct from the // callback's thick type argument. if (callbackFn->getLoweredFunctionType()->isPolymorphic()) { callback = gen.B.createThinFunctionToPointer(loc, callback, rawPointerTy); OptionalTypeKind optKind; auto callbackTy = optCallbackTy.getAnyOptionalObjectType(SGM.M, optKind); callback = gen.B.createPointerToThinFunction(loc, callback, callbackTy); } callback = gen.B.createOptionalSome(loc, callback, optCallbackTy); } else { callback = gen.B.createOptionalNone(loc, optCallbackTy); } // Form the result and return. auto result = gen.B.createTuple(loc, resultTupleTy, { address, callback }); gen.Cleanups.emitCleanupsForReturn(CleanupLocation::get(loc)); gen.B.createReturn(loc, result); }
/// In this function we create the actual cloned function and its proper cloned /// type. But we do not create any body. This implies that the creation of the /// actual arguments in the function is in populateCloned. /// /// \arg PAUser The function that is being passed the partial apply. /// \arg PAI The partial apply that is being passed to PAUser. /// \arg ClosureIndex The index of the partial apply in PAUser's function /// signature. /// \arg ClonedName The name of the cloned function that we will create. SILFunction * ClosureSpecCloner::initCloned(SILOptFunctionBuilder &FunctionBuilder, const CallSiteDescriptor &CallSiteDesc, StringRef ClonedName) { SILFunction *ClosureUser = CallSiteDesc.getApplyCallee(); // This is the list of new interface parameters of the cloned function. llvm::SmallVector<SILParameterInfo, 4> NewParameterInfoList; // First add to NewParameterInfoList all of the SILParameterInfo in the // original function except for the closure. CanSILFunctionType ClosureUserFunTy = ClosureUser->getLoweredFunctionType(); auto ClosureUserConv = ClosureUser->getConventions(); unsigned Index = ClosureUserConv.getSILArgIndexOfFirstParam(); for (auto ¶m : ClosureUserConv.getParameters()) { if (Index != CallSiteDesc.getClosureIndex()) NewParameterInfoList.push_back(param); ++Index; } // Then add any arguments that are captured in the closure to the function's // argument type. Since they are captured, we need to pass them directly into // the new specialized function. SILFunction *ClosedOverFun = CallSiteDesc.getClosureCallee(); auto ClosedOverFunConv = ClosedOverFun->getConventions(); SILModule &M = ClosureUser->getModule(); // Captured parameters are always appended to the function signature. If the // type of the captured argument is: // - direct and trivial, pass the argument as Direct_Unowned. // - direct and non-trivial, pass the argument as Direct_Owned. // - indirect, pass the argument using the same parameter convention as in the // original closure. // // We use the type of the closure here since we allow for the closure to be an // external declaration. unsigned NumTotalParams = ClosedOverFunConv.getNumParameters(); unsigned NumNotCaptured = NumTotalParams - CallSiteDesc.getNumArguments(); for (auto &PInfo : ClosedOverFunConv.getParameters().slice(NumNotCaptured)) { ParameterConvention ParamConv; if (PInfo.isFormalIndirect()) { ParamConv = PInfo.getConvention(); assert(!SILModuleConventions(M).useLoweredAddresses() || ParamConv == ParameterConvention::Indirect_Inout || ParamConv == ParameterConvention::Indirect_InoutAliasable); } else { ParamConv = ClosedOverFunConv.getSILType(PInfo).isTrivial(M) ? ParameterConvention::Direct_Unowned : ParameterConvention::Direct_Owned; } SILParameterInfo NewPInfo(PInfo.getType(), ParamConv); NewParameterInfoList.push_back(NewPInfo); } // The specialized function is always a thin function. This is important // because we may add additional parameters after the Self parameter of // witness methods. In this case the new function is not a method anymore. auto ExtInfo = ClosureUserFunTy->getExtInfo(); ExtInfo = ExtInfo.withRepresentation(SILFunctionTypeRepresentation::Thin); auto ClonedTy = SILFunctionType::get( ClosureUserFunTy->getGenericSignature(), ExtInfo, ClosureUserFunTy->getCoroutineKind(), ClosureUserFunTy->getCalleeConvention(), NewParameterInfoList, ClosureUserFunTy->getYields(), ClosureUserFunTy->getResults(), ClosureUserFunTy->getOptionalErrorResult(), M.getASTContext()); // We make this function bare so we don't have to worry about decls in the // SILArgument. auto *Fn = FunctionBuilder.createFunction( // It's important to use a shared linkage for the specialized function // and not the original linkage. // Otherwise the new function could have an external linkage (in case the // original function was de-serialized) and would not be code-gen'd. // It's also important to disconnect this specialized function from any // classes (the classSubclassScope), because that may incorrectly // influence the linkage. getSpecializedLinkage(ClosureUser, ClosureUser->getLinkage()), ClonedName, ClonedTy, ClosureUser->getGenericEnvironment(), ClosureUser->getLocation(), IsBare, ClosureUser->isTransparent(), CallSiteDesc.isSerialized(), IsNotDynamic, ClosureUser->getEntryCount(), ClosureUser->isThunk(), /*classSubclassScope=*/SubclassScope::NotApplicable, ClosureUser->getInlineStrategy(), ClosureUser->getEffectsKind(), ClosureUser, ClosureUser->getDebugScope()); if (!ClosureUser->hasQualifiedOwnership()) { Fn->setUnqualifiedOwnership(); } for (auto &Attr : ClosureUser->getSemanticsAttrs()) Fn->addSemanticsAttr(Attr); return Fn; }
/// Bridge argument types and adjust retain count conventions for an ObjC thunk. static SILFunctionType *emitObjCThunkArguments(SILGenFunction &gen, SILLocation loc, SILDeclRef thunk, SmallVectorImpl<SILValue> &args, SILValue &foreignErrorSlot, Optional<ForeignErrorConvention> &foreignError) { SILDeclRef native = thunk.asForeign(false); auto mod = gen.SGM.M.getSwiftModule(); auto subs = gen.F.getForwardingSubstitutions(); auto objcInfo = gen.SGM.Types.getConstantInfo(thunk); auto objcFnTy = objcInfo.SILFnType->substGenericArgs(gen.SGM.M, mod, subs); auto swiftInfo = gen.SGM.Types.getConstantInfo(native); auto swiftFnTy = swiftInfo.SILFnType->substGenericArgs(gen.SGM.M, mod, subs); // We must have the same context archetypes as the unthunked function. assert(objcInfo.ContextGenericParams == swiftInfo.ContextGenericParams); SmallVector<ManagedValue, 8> bridgedArgs; bridgedArgs.reserve(objcFnTy->getParameters().size()); SILFunction *orig = gen.SGM.getFunction(native, NotForDefinition); // Find the foreign error convention if we have one. if (orig->getLoweredFunctionType()->hasErrorResult()) { auto func = cast<AbstractFunctionDecl>(thunk.getDecl()); foreignError = func->getForeignErrorConvention(); assert(foreignError && "couldn't find foreign error convention!"); } // Emit the indirect result arguments, if any. // FIXME: we're just assuming that these match up exactly? for (auto indirectResult : objcFnTy->getIndirectResults()) { SILType argTy = gen.F.mapTypeIntoContext(indirectResult.getSILType()); auto arg = new (gen.F.getModule()) SILArgument(gen.F.begin(), argTy); args.push_back(arg); } // Emit the other arguments, taking ownership of arguments if necessary. auto inputs = objcFnTy->getParameters(); auto nativeInputs = swiftFnTy->getParameters(); assert(inputs.size() == nativeInputs.size() + unsigned(foreignError.hasValue())); for (unsigned i = 0, e = inputs.size(); i < e; ++i) { SILType argTy = gen.F.mapTypeIntoContext(inputs[i].getSILType()); SILValue arg = new(gen.F.getModule()) SILArgument(gen.F.begin(), argTy); // If this parameter is the foreign error slot, pull it out. // It does not correspond to a native argument. if (foreignError && i == foreignError->getErrorParameterIndex()) { foreignErrorSlot = arg; continue; } // If this parameter is deallocating, emit an unmanaged rvalue and // continue. The object has the deallocating bit set so retain, release is // irrelevant. if (inputs[i].isDeallocating()) { bridgedArgs.push_back(ManagedValue::forUnmanaged(arg)); continue; } // If the argument is a block, copy it. if (argTy.isBlockPointerCompatible()) { auto copy = gen.B.createCopyBlock(loc, arg); // If the argument is consumed, we're still responsible for releasing the // original. if (inputs[i].isConsumed()) gen.emitManagedRValueWithCleanup(arg); arg = copy; } // Convert the argument to +1 if necessary. else if (!inputs[i].isConsumed()) { arg = emitObjCUnconsumedArgument(gen, loc, arg); } auto managedArg = gen.emitManagedRValueWithCleanup(arg); bridgedArgs.push_back(managedArg); } assert(bridgedArgs.size() + unsigned(foreignError.hasValue()) == objcFnTy->getParameters().size() && "objc inputs don't match number of arguments?!"); assert(bridgedArgs.size() == swiftFnTy->getNumSILArguments() && "swift inputs don't match number of arguments?!"); assert((foreignErrorSlot || !foreignError) && "didn't find foreign error slot"); // Bridge the input types. Scope scope(gen.Cleanups, CleanupLocation::get(loc)); assert(bridgedArgs.size() == nativeInputs.size()); for (unsigned i = 0, size = bridgedArgs.size(); i < size; ++i) { SILType argTy = gen.F.mapTypeIntoContext( swiftFnTy->getParameters()[i].getSILType()); ManagedValue native = gen.emitBridgedToNativeValue(loc, bridgedArgs[i], SILFunctionTypeRepresentation::ObjCMethod, argTy.getSwiftType()); SILValue argValue; if (nativeInputs[i].isConsumed()) argValue = native.forward(gen); else argValue = native.getValue(); args.push_back(argValue); } return objcFnTy; }
/// \brief Populate the body of the cloned closure, modifying instructions as /// necessary. This is where we create the actual specialized BB Arguments. void ClosureSpecCloner::populateCloned() { SILFunction *Cloned = getCloned(); SILFunction *ClosureUser = CallSiteDesc.getApplyCallee(); // Create arguments for the entry block. SILBasicBlock *ClosureUserEntryBB = &*ClosureUser->begin(); SILBasicBlock *ClonedEntryBB = Cloned->createBasicBlock(); // Remove the closure argument. SILArgument *ClosureArg = nullptr; for (size_t i = 0, e = ClosureUserEntryBB->args_size(); i != e; ++i) { SILArgument *Arg = ClosureUserEntryBB->getArgument(i); if (i == CallSiteDesc.getClosureIndex()) { ClosureArg = Arg; continue; } // Otherwise, create a new argument which copies the original argument SILValue MappedValue = ClonedEntryBB->createFunctionArgument(Arg->getType(), Arg->getDecl()); ValueMap.insert(std::make_pair(Arg, MappedValue)); } // Next we need to add in any arguments that are not captured as arguments to // the cloned function. // // We do not insert the new mapped arguments into the value map since there by // definition is nothing in the partial apply user function that references // such arguments. After this pass is done the only thing that will reference // the arguments is the partial apply that we will create. SILFunction *ClosedOverFun = CallSiteDesc.getClosureCallee(); CanSILFunctionType ClosedOverFunTy = ClosedOverFun->getLoweredFunctionType(); unsigned NumTotalParams = ClosedOverFunTy->getParameters().size(); unsigned NumNotCaptured = NumTotalParams - CallSiteDesc.getNumArguments(); llvm::SmallVector<SILValue, 4> NewPAIArgs; for (auto &PInfo : ClosedOverFunTy->getParameters().slice(NumNotCaptured)) { SILValue MappedValue = ClonedEntryBB->createFunctionArgument(PInfo.getSILType()); NewPAIArgs.push_back(MappedValue); } SILBuilder &Builder = getBuilder(); Builder.setInsertionPoint(ClonedEntryBB); // Clone FRI and PAI, and replace usage of the removed closure argument // with result of cloned PAI. SILValue FnVal = Builder.createFunctionRef(CallSiteDesc.getLoc(), ClosedOverFun); auto *NewClosure = CallSiteDesc.createNewClosure(Builder, FnVal, NewPAIArgs); ValueMap.insert(std::make_pair(ClosureArg, SILValue(NewClosure))); BBMap.insert(std::make_pair(ClosureUserEntryBB, ClonedEntryBB)); // Recursively visit original BBs in depth-first preorder, starting with the // entry block, cloning all instructions other than terminators. visitSILBasicBlock(ClosureUserEntryBB); // Now iterate over the BBs and fix up the terminators. for (auto BI = BBMap.begin(), BE = BBMap.end(); BI != BE; ++BI) { Builder.setInsertionPoint(BI->second); visit(BI->first->getTerminator()); } // Then insert a release in all non failure exit BBs if our partial apply was // guaranteed. This is b/c it was passed at +0 originally and we need to // balance the initial increment of the newly created closure. if (CallSiteDesc.isClosureGuaranteed() && CallSiteDesc.closureHasRefSemanticContext()) { for (SILBasicBlock *BB : CallSiteDesc.getNonFailureExitBBs()) { SILBasicBlock *OpBB = BBMap[BB]; TermInst *TI = OpBB->getTerminator(); auto Loc = CleanupLocation::get(NewClosure->getLoc()); // If we have a return, we place the release right before it so we know // that it will be executed at the end of the epilogue. if (isa<ReturnInst>(TI)) { Builder.setInsertionPoint(TI); Builder.createReleaseValue(Loc, SILValue(NewClosure), Atomicity::Atomic); continue; } // We use casts where findAllNonFailureExitBBs should have made sure that // this is true. This will ensure that the code is updated when we hit the // cast failure in debug builds. auto *Unreachable = cast<UnreachableInst>(TI); auto PrevIter = std::prev(SILBasicBlock::iterator(Unreachable)); auto NoReturnApply = FullApplySite::isa(&*PrevIter); // We insert the release value right before the no return apply so that if // the partial apply is passed into the no-return function as an @owned // value, we will retain the partial apply before we release it and // potentially eliminate it. Builder.setInsertionPoint(NoReturnApply.getInstruction()); Builder.createReleaseValue(Loc, SILValue(NewClosure), Atomicity::Atomic); } } }
/// In this function we create the actual cloned function and its proper cloned /// type. But we do not create any body. This implies that the creation of the /// actual arguments in the function is in populateCloned. /// /// \arg PAUser The function that is being passed the partial apply. /// \arg PAI The partial apply that is being passed to PAUser. /// \arg ClosureIndex The index of the partial apply in PAUser's function /// signature. /// \arg ClonedName The name of the cloned function that we will create. SILFunction * ClosureSpecCloner::initCloned(const CallSiteDescriptor &CallSiteDesc, StringRef ClonedName) { SILFunction *ClosureUser = CallSiteDesc.getApplyCallee(); // This is the list of new interface parameters of the cloned function. llvm::SmallVector<SILParameterInfo, 4> NewParameterInfoList; // First add to NewParameterInfoList all of the SILParameterInfo in the // original function except for the closure. CanSILFunctionType ClosureUserFunTy = ClosureUser->getLoweredFunctionType(); unsigned Index = ClosureUserFunTy->getNumIndirectResults(); for (auto ¶m : ClosureUserFunTy->getParameters()) { if (Index != CallSiteDesc.getClosureIndex()) NewParameterInfoList.push_back(param); ++Index; } // Then add any arguments that are captured in the closure to the function's // argument type. Since they are captured, we need to pass them directly into // the new specialized function. SILFunction *ClosedOverFun = CallSiteDesc.getClosureCallee(); CanSILFunctionType ClosedOverFunTy = ClosedOverFun->getLoweredFunctionType(); SILModule &M = ClosureUser->getModule(); // Captured parameters are always appended to the function signature. If the // type of the captured argument is trivial, pass the argument as // Direct_Unowned. Otherwise pass it as Direct_Owned. // // We use the type of the closure here since we allow for the closure to be an // external declaration. unsigned NumTotalParams = ClosedOverFunTy->getParameters().size(); unsigned NumNotCaptured = NumTotalParams - CallSiteDesc.getNumArguments(); for (auto &PInfo : ClosedOverFunTy->getParameters().slice(NumNotCaptured)) { if (PInfo.getSILType().isTrivial(M)) { SILParameterInfo NewPInfo(PInfo.getType(), ParameterConvention::Direct_Unowned); NewParameterInfoList.push_back(NewPInfo); continue; } SILParameterInfo NewPInfo(PInfo.getType(), ParameterConvention::Direct_Owned); NewParameterInfoList.push_back(NewPInfo); } // The specialized function is always a thin function. This is important // because we may add additional parameters after the Self parameter of // witness methods. In this case the new function is not a method anymore. auto ExtInfo = ClosureUserFunTy->getExtInfo(); ExtInfo = ExtInfo.withRepresentation(SILFunctionTypeRepresentation::Thin); auto ClonedTy = SILFunctionType::get( ClosureUserFunTy->getGenericSignature(), ExtInfo, ClosureUserFunTy->getCalleeConvention(), NewParameterInfoList, ClosureUserFunTy->getAllResults(), ClosureUserFunTy->getOptionalErrorResult(), M.getASTContext()); // We make this function bare so we don't have to worry about decls in the // SILArgument. auto *Fn = M.createFunction( // It's important to use a shared linkage for the specialized function // and not the original linkage. // Otherwise the new function could have an external linkage (in case the // original function was de-serialized) and would not be code-gen'd. getSpecializedLinkage(ClosureUser, ClosureUser->getLinkage()), ClonedName, ClonedTy, ClosureUser->getGenericEnvironment(), ClosureUser->getLocation(), IsBare, ClosureUser->isTransparent(), CallSiteDesc.isFragile(), ClosureUser->isThunk(), ClosureUser->getClassVisibility(), ClosureUser->getInlineStrategy(), ClosureUser->getEffectsKind(), ClosureUser, ClosureUser->getDebugScope()); Fn->setDeclCtx(ClosureUser->getDeclContext()); if (ClosureUser->hasUnqualifiedOwnership()) { Fn->setUnqualifiedOwnership(); } for (auto &Attr : ClosureUser->getSemanticsAttrs()) Fn->addSemanticsAttr(Attr); return Fn; }
/// Specialize a partial_apply by promoting the parameters indicated by /// indices. We expect these parameters to be replaced by stack address /// references. static PartialApplyInst * specializePartialApply(PartialApplyInst *PartialApply, ParamIndexList &PromotedParamIndices, bool &CFGChanged) { auto *FRI = cast<FunctionRefInst>(PartialApply->getCallee()); assert(FRI && "Expected a direct partial_apply!"); auto *F = FRI->getReferencedFunction(); assert(F && "Expected a referenced function!"); std::string ClonedName = getClonedName(F, PromotedParamIndices); auto &M = PartialApply->getModule(); SILFunction *ClonedFn; if (auto *PrevFn = M.lookUpFunction(ClonedName)) { ClonedFn = PrevFn; } else { // Clone the function the existing partial_apply references. PromotedParamCloner Cloner(F, PromotedParamIndices, ClonedName); Cloner.populateCloned(); ClonedFn = Cloner.getCloned(); } // Now create the new partial_apply using the cloned function. llvm::SmallVector<SILValue, 16> Args; ValueLifetimeAnalysis::Frontier PAFrontier; // Promote the arguments that need promotion. for (auto &O : PartialApply->getArgumentOperands()) { auto ParamIndex = getParameterIndexForOperand(&O); if (!std::count(PromotedParamIndices.begin(), PromotedParamIndices.end(), ParamIndex)) { Args.push_back(O.get()); continue; } // If this argument is promoted, it is a box that we're // turning into an address because we've proven we can // keep this value on the stack. The partial_apply had ownership // of this box so we must now release it explicitly when the // partial_apply is released. auto box = cast<AllocBoxInst>(O.get()); // If the box address has a MUI, route accesses through it so DI still // works. SILInstruction *promoted = nullptr; int numAddrUses = 0; for (Operand *BoxUse : box->getUses()) { if (auto *PBI = dyn_cast<ProjectBoxInst>(BoxUse->getUser())) { for (auto PBIUse : PBI->getUses()) { numAddrUses++; if (auto MUI = dyn_cast<MarkUninitializedInst>(PBIUse->getUser())) promoted = MUI; } } } assert((!promoted || numAddrUses == 1) && "box value used by mark_uninitialized but not exclusively!"); // We only reuse an existing project_box if it directly follows the // alloc_box. This makes sure that the project_box dominates the // partial_apply. if (!promoted) promoted = getOrCreateProjectBox(box); Args.push_back(promoted); if (PAFrontier.empty()) { ValueLifetimeAnalysis VLA(PartialApply); CFGChanged |= !VLA.computeFrontier(PAFrontier, ValueLifetimeAnalysis::AllowToModifyCFG); assert(!PAFrontier.empty() && "partial_apply must have at least one use " "to release the returned function"); } // Insert releases after each point where the partial_apply becomes dead. for (SILInstruction *FrontierInst : PAFrontier) { SILBuilderWithScope Builder(FrontierInst); Builder.emitStrongReleaseAndFold(PartialApply->getLoc(), O.get()); } } SILBuilderWithScope Builder(PartialApply); // Build the function_ref and partial_apply. SILValue FunctionRef = Builder.createFunctionRef(PartialApply->getLoc(), ClonedFn); CanSILFunctionType CanFnTy = ClonedFn->getLoweredFunctionType(); auto const &Subs = PartialApply->getSubstitutions(); CanSILFunctionType SubstCalleeTy = CanFnTy->substGenericArgs(M, M.getSwiftModule(), Subs); return Builder.createPartialApply(PartialApply->getLoc(), FunctionRef, SILType::getPrimitiveObjectType(SubstCalleeTy), PartialApply->getSubstitutions(), Args, PartialApply->getType()); }
static llvm::Function * getAccessorForComputedComponent(IRGenModule &IGM, const KeyPathPatternComponent &component, KeyPathAccessor whichAccessor, GenericEnvironment *genericEnv, ArrayRef<GenericRequirement> requirements) { SILFunction *accessor; switch (whichAccessor) { case Getter: accessor = component.getComputedPropertyGetter(); break; case Setter: accessor = component.getComputedPropertySetter(); break; case Equals: accessor = component.getSubscriptIndexEquals(); break; case Hash: accessor = component.getSubscriptIndexHash(); break; } auto accessorFn = IGM.getAddrOfSILFunction(accessor, NotForDefinition); // If the accessor is not generic, we can use it as is. if (requirements.empty()) { return accessorFn; } auto accessorFnTy = cast<llvm::FunctionType>( accessorFn->getType()->getPointerElementType());; // Otherwise, we need a thunk to unmarshal the generic environment from the // argument area. It'd be nice to have a good way to represent this // directly in SIL, of course... const char *thunkName; unsigned numArgsToForward; switch (whichAccessor) { case Getter: thunkName = "keypath_get"; numArgsToForward = 2; break; case Setter: thunkName = "keypath_set"; numArgsToForward = 2; break; case Equals: thunkName = "keypath_equals"; numArgsToForward = 2; break; case Hash: thunkName = "keypath_hash"; numArgsToForward = 1; break; } SmallVector<llvm::Type *, 4> thunkParams; for (unsigned i = 0; i < numArgsToForward; ++i) thunkParams.push_back(accessorFnTy->getParamType(i)); switch (whichAccessor) { case Getter: case Setter: thunkParams.push_back(IGM.Int8PtrTy); break; case Equals: case Hash: break; } thunkParams.push_back(IGM.SizeTy); auto thunkType = llvm::FunctionType::get(accessorFnTy->getReturnType(), thunkParams, /*vararg*/ false); auto accessorThunk = llvm::Function::Create(thunkType, llvm::GlobalValue::PrivateLinkage, thunkName, IGM.getModule()); accessorThunk->setAttributes(IGM.constructInitialAttributes()); accessorThunk->setCallingConv(IGM.SwiftCC); switch (whichAccessor) { case Getter: // Original accessor's args should be @in or @out, meaning they won't be // captured or aliased. accessorThunk->addAttribute(1, llvm::Attribute::NoCapture); accessorThunk->addAttribute(1, llvm::Attribute::NoAlias); accessorThunk->addAttribute(2, llvm::Attribute::NoCapture); accessorThunk->addAttribute(2, llvm::Attribute::NoAlias); // Output is sret. accessorThunk->addAttribute(1, llvm::Attribute::StructRet); break; case Setter: // Original accessor's args should be @in or @out, meaning they won't be // captured or aliased. accessorThunk->addAttribute(1, llvm::Attribute::NoCapture); accessorThunk->addAttribute(1, llvm::Attribute::NoAlias); accessorThunk->addAttribute(2, llvm::Attribute::NoCapture); accessorThunk->addAttribute(2, llvm::Attribute::NoAlias); break; case Equals: case Hash: break; } { IRGenFunction IGF(IGM, accessorThunk); if (IGM.DebugInfo) IGM.DebugInfo->emitArtificialFunction(IGF, accessorThunk); auto params = IGF.collectParameters(); Explosion forwardedArgs; forwardedArgs.add(params.claim(numArgsToForward)); llvm::Value *componentArgsBuf; switch (whichAccessor) { case Getter: case Setter: // The component arguments are passed alongside the base being projected. componentArgsBuf = params.claimNext(); // Pass the argument pointer down to the underlying function. if (!component.getSubscriptIndices().empty()) { forwardedArgs.add(componentArgsBuf); } break; case Equals: case Hash: // We're operating directly on the component argument buffer. componentArgsBuf = forwardedArgs.getAll()[0]; break; } auto componentArgsBufSize = params.claimNext(); bindPolymorphicArgumentsFromComponentIndices(IGF, component, genericEnv, requirements, componentArgsBuf, componentArgsBufSize); // Use the bound generic metadata to form a call to the original generic // accessor. WitnessMetadata ignoreWitnessMetadata; auto forwardingSubs = genericEnv->getGenericSignature()->getSubstitutionMap( genericEnv->getForwardingSubstitutions()); emitPolymorphicArguments(IGF, accessor->getLoweredFunctionType(), forwardingSubs, &ignoreWitnessMetadata, forwardedArgs); auto fnPtr = FunctionPointer::forDirect(IGM, accessorFn, accessor->getLoweredFunctionType()); auto call = IGF.Builder.CreateCall(fnPtr, forwardedArgs.claimAll()); if (call->getType()->isVoidTy()) IGF.Builder.CreateRetVoid(); else IGF.Builder.CreateRet(call); } return accessorThunk; }
void FunctionSignatureTransform::createFunctionSignatureOptimizedFunction() { // Create the optimized function ! SILModule &M = F->getModule(); std::string Name = getUniqueName(createOptimizedSILFunctionName(), M); NewF = M.createFunction( F->getLinkage(), Name, createOptimizedSILFunctionType(), nullptr, F->getLocation(), F->isBare(), F->isTransparent(), F->isFragile(), F->isThunk(), F->getClassVisibility(), F->getInlineStrategy(), F->getEffectsKind(), 0, F->getDebugScope(), F->getDeclContext()); // Then we transfer the body of F to NewF. NewF->spliceBody(F); NewF->setDeclCtx(F->getDeclContext()); // Array semantic clients rely on the signature being as in the original // version. for (auto &Attr : F->getSemanticsAttrs()) { if (!StringRef(Attr).startswith("array.")) NewF->addSemanticsAttr(Attr); } // Do the last bit of work to the newly created optimized function. ArgumentExplosionFinalizeOptimizedFunction(); DeadArgumentFinalizeOptimizedFunction(); // Create the thunk body ! F->setThunk(IsThunk); // The thunk now carries the information on how the signature is // optimized. If we inline the thunk, we will get the benefit of calling // the signature optimized function without additional setup on the // caller side. F->setInlineStrategy(AlwaysInline); SILBasicBlock *ThunkBody = F->createBasicBlock(); for (auto &ArgDesc : ArgumentDescList) { ThunkBody->createBBArg(ArgDesc.Arg->getType(), ArgDesc.Decl); } SILLocation Loc = ThunkBody->getParent()->getLocation(); SILBuilder Builder(ThunkBody); Builder.setCurrentDebugScope(ThunkBody->getParent()->getDebugScope()); FunctionRefInst *FRI = Builder.createFunctionRef(Loc, NewF); // Create the args for the thunk's apply, ignoring any dead arguments. llvm::SmallVector<SILValue, 8> ThunkArgs; for (auto &ArgDesc : ArgumentDescList) { addThunkArgument(ArgDesc, Builder, ThunkBody, ThunkArgs); } // We are ignoring generic functions and functions with out parameters for // now. SILValue ReturnValue; SILType LoweredType = NewF->getLoweredType(); SILType ResultType = LoweredType.getFunctionInterfaceResultType(); auto FunctionTy = LoweredType.castTo<SILFunctionType>(); if (FunctionTy->hasErrorResult()) { // We need a try_apply to call a function with an error result. SILFunction *Thunk = ThunkBody->getParent(); SILBasicBlock *NormalBlock = Thunk->createBasicBlock(); ReturnValue = NormalBlock->createBBArg(ResultType, 0); SILBasicBlock *ErrorBlock = Thunk->createBasicBlock(); SILType ErrorProtocol = SILType::getPrimitiveObjectType(FunctionTy->getErrorResult().getType()); auto *ErrorArg = ErrorBlock->createBBArg(ErrorProtocol, 0); Builder.createTryApply(Loc, FRI, LoweredType, ArrayRef<Substitution>(), ThunkArgs, NormalBlock, ErrorBlock); Builder.setInsertionPoint(ErrorBlock); Builder.createThrow(Loc, ErrorArg); Builder.setInsertionPoint(NormalBlock); } else { ReturnValue = Builder.createApply(Loc, FRI, LoweredType, ResultType, ArrayRef<Substitution>(), ThunkArgs, false); } // Set up the return results. if (NewF->getLoweredFunctionType()->isNoReturn()) { Builder.createUnreachable(Loc); } else { Builder.createReturn(Loc, ReturnValue); } // Do the last bit work to finalize the thunk. OwnedToGuaranteedFinalizeThunkFunction(Builder, F); assert(F->getDebugScope()->Parent != NewF->getDebugScope()->Parent); }