示例#1
0
void SILGenFunction::emitInjectOptionalValueInto(SILLocation loc,
                                                 ArgumentSource &&value,
                                                 SILValue dest,
                                                 const TypeLowering &optTL) {
  SILType optType = optTL.getLoweredType();
  assert(dest->getType() == optType.getAddressType());
  auto loweredPayloadTy = optType.getAnyOptionalObjectType();
  assert(loweredPayloadTy);

  // Project out the payload area.
  auto someDecl = getASTContext().getOptionalSomeDecl();
  auto destPayload =
    B.createInitEnumDataAddr(loc, dest, someDecl,
                             loweredPayloadTy.getAddressType());
  
  // Emit the value into the payload area.
  TemporaryInitialization emitInto(destPayload, CleanupHandle::invalid());
  std::move(value).forwardInto(*this, &emitInto);
  
  // Inject the tag.
  B.createInjectEnumAddr(loc, dest, someDecl);
}
static void splitDestructure(SILBuilder &B, SILInstruction *I, SILValue Op) {
  assert((isa<DestructureStructInst>(I) || isa<DestructureTupleInst>(I)) &&
         "Only destructure operations can be passed to splitDestructure");

  SILModule &M = I->getModule();
  SILLocation Loc = I->getLoc();
  SILType OpType = Op->getType();

  llvm::SmallVector<Projection, 8> Projections;
  Projection::getFirstLevelProjections(OpType, M, Projections);
  assert(Projections.size() == I->getNumResults());

  llvm::SmallVector<SILValue, 8> NewValues;
  for (unsigned i : indices(Projections)) {
    const auto &Proj = Projections[i];
    NewValues.push_back(Proj.createObjectProjection(B, Loc, Op).get());
    assert(NewValues.back()->getType() == I->getResults()[i]->getType() &&
           "Expected created projections and results to be the same types");
  }

  I->replaceAllUsesPairwiseWith(NewValues);
  I->eraseFromParent();
}
示例#3
0
void CallSiteDescriptor::extendArgumentLifetime(
    SILValue Arg, SILArgumentConvention ArgConvention) const {
  assert(!CInfo->LifetimeFrontier.empty() &&
         "Need a post-dominating release(s)");

  auto ArgTy = Arg->getType();

  // Extend the lifetime of a captured argument to cover the callee.
  SILBuilderWithScope Builder(getClosure());

  // Indirect non-inout arguments are not supported yet.
  assert(!isNonInoutIndirectSILArgument(Arg, ArgConvention));

  if (ArgTy.isObject()) {
    Builder.createRetainValue(getClosure()->getLoc(), Arg,
                              Builder.getDefaultAtomicity());
    for (auto *I : CInfo->LifetimeFrontier) {
      Builder.setInsertionPoint(I);
      Builder.createReleaseValue(getClosure()->getLoc(), Arg,
                                 Builder.getDefaultAtomicity());
    }
  }
}
示例#4
0
/// Compute the subelement number indicated by the specified pointer (which is
/// derived from the root by a series of tuple/struct element addresses) by
/// treating the type as a linearized namespace with sequential elements.  For
/// example, given:
///
///   root = alloc { a: { c: i64, d: i64 }, b: (i64, i64) }
///   tmp1 = struct_element_addr root, 1
///   tmp2 = tuple_element_addr tmp1, 0
///
/// This will return a subelement number of 2.
///
/// If this pointer is to within a existential projection, it returns ~0U.
///
static unsigned computeSubelement(SILValue Pointer, SILInstruction *RootInst) {
  unsigned SubEltNumber = 0;
  SILModule &M = RootInst->getModule();
  
  while (1) {
    // If we got to the root, we're done.
    if (RootInst == Pointer.getDef())
      return SubEltNumber;
    
    auto *Inst = cast<SILInstruction>(Pointer);
    if (auto *TEAI = dyn_cast<TupleElementAddrInst>(Inst)) {
      SILType TT = TEAI->getOperand().getType();
      
      // Keep track of what subelement is being referenced.
      for (unsigned i = 0, e = TEAI->getFieldNo(); i != e; ++i) {
        SubEltNumber += getNumSubElements(TT.getTupleElementType(i), M);
      }
      Pointer = TEAI->getOperand();
    } else if (auto *SEAI = dyn_cast<StructElementAddrInst>(Inst)) {
      SILType ST = SEAI->getOperand().getType();
      
      // Keep track of what subelement is being referenced.
      StructDecl *SD = SEAI->getStructDecl();
      for (auto *D : SD->getStoredProperties()) {
        if (D == SEAI->getField()) break;
        SubEltNumber += getNumSubElements(ST.getFieldType(D, M), M);
      }
      
      Pointer = SEAI->getOperand();
    } else {
      assert((isa<InitExistentialAddrInst>(Inst) || isa<InjectEnumAddrInst>(Inst))&&
             "Unknown access path instruction");
      // Cannot promote loads and stores from within an existential projection.
      return ~0U;
    }
  }
}
示例#5
0
AccessedStorage::Kind AccessedStorage::classify(SILValue base) {
  switch (base->getKind()) {
  // An AllocBox is a fully identified memory location.
  case ValueKind::AllocBoxInst:
    return Box;
  // An AllocStack is a fully identified memory location, which may occur
  // after inlining code already subjected to stack promotion.
  case ValueKind::AllocStackInst:
    return Stack;
  case ValueKind::GlobalAddrInst:
    return Global;
  case ValueKind::ApplyInst: {
    FullApplySite apply(cast<ApplyInst>(base));
    if (auto *funcRef = apply.getReferencedFunction()) {
      if (getVariableOfGlobalInit(funcRef))
        return Global;
    }
    return Unidentified;
  }
  case ValueKind::RefElementAddrInst:
    return Class;
  // A yield is effectively a nested access, enforced independently in
  // the caller and callee.
  case ValueKind::BeginApplyResult:
    return Yield;
  // A function argument is effectively a nested access, enforced
  // independently in the caller and callee.
  case ValueKind::SILFunctionArgument:
    return Argument;
  // View the outer begin_access as a separate location because nested
  // accesses do not conflict with each other.
  case ValueKind::BeginAccessInst:
    return Nested;
  default:
    return Unidentified;
  }
}
示例#6
0
static void mapOperands(SILInstruction *I,
                        const llvm::DenseMap<ValueBase *, SILValue> &ValueMap) {
  for (auto &Opd : I->getAllOperands()) {
    SILValue OrigVal = Opd.get();
    ValueBase *OrigDef = OrigVal.getDef();
    auto Found = ValueMap.find(OrigDef);
    if (Found != ValueMap.end()) {
      SILValue MappedVal = Found->second;
      unsigned ResultIdx = OrigVal.getResultNumber();
      // All mapped instructions have their result number set to zero. Except
      // for arguments that we followed along one edge to their incoming value
      // on that edge.
      if (isa<SILArgument>(OrigDef))
        ResultIdx = MappedVal.getResultNumber();
      Opd.set(SILValue(MappedVal.getDef(), ResultIdx));
    }
  }
}
示例#7
0
NullablePtr<SILInstruction>
Projection::
createValueProjection(SILBuilder &B, SILLocation Loc, SILValue Base) const {
    // Grab Base's type.
    SILType BaseTy = Base.getType();

    // If BaseTy is not an object type, bail.
    if (!BaseTy.isObject())
        return nullptr;

    // If this projection is associated with an address type, convert its type to
    // an object type.
    //
    // We explicitly do not convert Type to be an object if it is a local storage
    // type since we want it to fail.
    SILType Ty = Type.isAddress()? Type.getObjectType() : Type;

    if (!Ty.isObject())
        return nullptr;

    // Ok, we now know that the type of Base and the type represented by the base
    // of this projection match and that this projection can be represented as
    // value. Create the instruction if we can. Otherwise, return nullptr.
    switch (getKind()) {
    case ProjectionKind::Struct:
        return B.createStructExtract(Loc, Base, cast<VarDecl>(getDecl()));
    case ProjectionKind::Tuple:
        return B.createTupleExtract(Loc, Base, getIndex());
    case ProjectionKind::Index:
        return nullptr;
    case ProjectionKind::Enum:
        return B.createUncheckedEnumData(Loc, Base,
                                         cast<EnumElementDecl>(getDecl()));
    case ProjectionKind::Class:
        return nullptr;
    }
}
示例#8
0
void MemoryToRegisters::removeSingleBlockAllocation(AllocStackInst *ASI) {
  DEBUG(llvm::dbgs() << "*** Promoting in-block: " << *ASI);

  SILBasicBlock *BB = ASI->getParent();

  // The default value of the AllocStack is NULL because we don't have
  // uninitialized variables in Swift.
  SILValue RunningVal = SILValue();

  // For all instructions in the block.
  for (auto BBI = BB->begin(), E = BB->end(); BBI != E;) {
    SILInstruction *Inst = &*BBI;
    ++BBI;

    // Remove instructions that we are loading from. Replace the loaded value
    // with our running value.
    if (isLoadFromStack(Inst, ASI)) {
      if (!RunningVal) {
        assert(ASI->getElementType().isVoid() &&
               "Expected initialization of non-void type!");
        RunningVal = SILUndef::get(ASI->getElementType(), ASI->getModule());
      }
      replaceLoad(cast<LoadInst>(Inst), RunningVal, ASI);
      NumInstRemoved++;
      continue;
    }

    // Remove stores and record the value that we are saving as the running
    // value.
    if (auto *SI = dyn_cast<StoreInst>(Inst)) {
      if (SI->getDest() == ASI) {
        RunningVal = SI->getSrc();
        Inst->eraseFromParent();
        NumInstRemoved++;
        continue;
      }
    }

    // Replace debug_value_addr with debug_value of the promoted value.
    if (auto *DVAI = dyn_cast<DebugValueAddrInst>(Inst)) {
      if (DVAI->getOperand() == ASI) {
        if (RunningVal) {
          promoteDebugValueAddr(DVAI, RunningVal, B);
        } else {
          // Drop debug_value_addr of uninitialized void values.
          assert(ASI->getElementType().isVoid() &&
                 "Expected initialization of non-void type!");
          DVAI->eraseFromParent();
        }
      }
      continue;
    }

    // Replace destroys with a release of the value.
    if (auto *DAI = dyn_cast<DestroyAddrInst>(Inst)) {
      if (DAI->getOperand() == ASI) {
        replaceDestroy(DAI, RunningVal);
      }
      continue;
    }

    // Remove deallocation.
    if (auto *DSI = dyn_cast<DeallocStackInst>(Inst)) {
      if (DSI->getOperand() == ASI) {
        Inst->eraseFromParent();
        NumInstRemoved++;
        // No need to continue scanning after deallocation.
        break;
      }
    }

    SILValue InstVal = Inst;
    
    // Remove dead address instructions that may be uses of the allocation.
    while (InstVal->use_empty() && (isa<StructElementAddrInst>(InstVal) ||
                                    isa<TupleElementAddrInst>(InstVal))) {
      SILInstruction *I = cast<SILInstruction>(InstVal);
      InstVal = I->getOperand(0);
      I->eraseFromParent();
      NumInstRemoved++;
    }
  }
}
示例#9
0
/// \brief Returns the callee SILFunction called at a call site, in the case
/// that the call is transparent (as in, both that the call is marked
/// with the transparent flag and that callee function is actually transparently
/// determinable from the SIL) or nullptr otherwise. This assumes that the SIL
/// is already in SSA form.
///
/// In the case that a non-null value is returned, FullArgs contains effective
/// argument operands for the callee function.
static SILFunction *
getCalleeFunction(FullApplySite AI, bool &IsThick,
                  SmallVectorImpl<SILValue>& CaptureArgs,
                  SmallVectorImpl<SILValue>& FullArgs,
                  PartialApplyInst *&PartialApply,
                  SILModule::LinkingMode Mode) {
  IsThick = false;
  PartialApply = nullptr;
  CaptureArgs.clear();
  FullArgs.clear();

  for (const auto &Arg : AI.getArguments())
    FullArgs.push_back(Arg);
  SILValue CalleeValue = AI.getCallee();

  if (LoadInst *LI = dyn_cast<LoadInst>(CalleeValue)) {
    assert(CalleeValue.getResultNumber() == 0);
    // Conservatively only see through alloc_box; we assume this pass is run
    // immediately after SILGen
    SILInstruction *ABI = dyn_cast<AllocBoxInst>(LI->getOperand());
    if (!ABI)
      return nullptr;
    assert(LI->getOperand().getResultNumber() == 1);

    // Scan forward from the alloc box to find the first store, which
    // (conservatively) must be in the same basic block as the alloc box
    StoreInst *SI = nullptr;
    for (auto I = SILBasicBlock::iterator(ABI), E = I->getParent()->end();
         I != E; ++I) {
      // If we find the load instruction first, then the load is loading from
      // a non-initialized alloc; this shouldn't really happen but I'm not
      // making any assumptions
      if (static_cast<SILInstruction*>(I) == LI)
        return nullptr;
      if ((SI = dyn_cast<StoreInst>(I)) && SI->getDest().getDef() == ABI) {
        // We found a store that we know dominates the load; now ensure there
        // are no other uses of the alloc other than loads, retains, releases
        // and dealloc stacks
        for (auto UI = ABI->use_begin(), UE = ABI->use_end(); UI != UE;
             ++UI)
          if (UI.getUser() != SI && !isa<LoadInst>(UI.getUser()) &&
              !isa<StrongRetainInst>(UI.getUser()) &&
              !isa<StrongReleaseInst>(UI.getUser()))
            return nullptr;
        // We can conservatively see through the store
        break;
      }
    }
    if (!SI)
      return nullptr;
    CalleeValue = SI->getSrc();
  }

  // We are allowed to see through exactly one "partial apply" instruction or
  // one "thin to thick function" instructions, since those are the patterns
  // generated when using auto closures.
  if (PartialApplyInst *PAI =
        dyn_cast<PartialApplyInst>(CalleeValue)) {
    assert(CalleeValue.getResultNumber() == 0);

    for (const auto &Arg : PAI->getArguments()) {
      CaptureArgs.push_back(Arg);
      FullArgs.push_back(Arg);
    }

    CalleeValue = PAI->getCallee();
    IsThick = true;
    PartialApply = PAI;
  } else if (ThinToThickFunctionInst *TTTFI =
               dyn_cast<ThinToThickFunctionInst>(CalleeValue)) {
    assert(CalleeValue.getResultNumber() == 0);
    CalleeValue = TTTFI->getOperand();
    IsThick = true;
  }

  FunctionRefInst *FRI = dyn_cast<FunctionRefInst>(CalleeValue);

  if (!FRI)
    return nullptr;

  SILFunction *CalleeFunction = FRI->getReferencedFunction();

  switch (CalleeFunction->getRepresentation()) {
  case SILFunctionTypeRepresentation::Thick:
  case SILFunctionTypeRepresentation::Thin:
  case SILFunctionTypeRepresentation::Method:
  case SILFunctionTypeRepresentation::WitnessMethod:
    break;
    
  case SILFunctionTypeRepresentation::CFunctionPointer:
  case SILFunctionTypeRepresentation::ObjCMethod:
  case SILFunctionTypeRepresentation::Block:
    return nullptr;
  }

  // If CalleeFunction is a declaration, see if we can load it. If we fail to
  // load it, bail.
  if (CalleeFunction->empty()
      && !AI.getModule().linkFunction(CalleeFunction, Mode))
    return nullptr;
  return CalleeFunction;
}
示例#10
0
/// Generate a new apply of a function_ref to replace an apply of a
/// witness_method when we've determined the actual function we'll end
/// up calling.
static ApplySite devirtualizeWitnessMethod(ApplySite AI, SILFunction *F,
                                           ArrayRef<Substitution> Subs) {
  // We know the witness thunk and the corresponding set of substitutions
  // required to invoke the protocol method at this point.
  auto &Module = AI.getModule();

  // Collect all the required substitutions.
  //
  // The complete set of substitutions may be different, e.g. because the found
  // witness thunk F may have been created by a  specialization pass and have
  // additional generic parameters.
  SmallVector<Substitution, 16> NewSubstList(Subs.begin(), Subs.end());

  // Add the non-self-derived substitutions from the original application.
  ArrayRef<Substitution>  SubstList;
  SubstList = AI.getSubstitutionsWithoutSelfSubstitution();

  for (auto &origSub : SubstList)
    if (!origSub.getArchetype()->isSelfDerived())
      NewSubstList.push_back(origSub);

  // Figure out the exact bound type of the function to be called by
  // applying all substitutions.
  auto CalleeCanType = F->getLoweredFunctionType();
  auto SubstCalleeCanType = CalleeCanType->substGenericArgs(
    Module, Module.getSwiftModule(), NewSubstList);

  // Collect arguments from the apply instruction.
  auto Arguments = SmallVector<SILValue, 4>();

  auto ParamTypes = SubstCalleeCanType->getParameterSILTypes();

  // Iterate over the non self arguments and add them to the
  // new argument list, upcasting when required.
  SILBuilderWithScope B(AI.getInstruction());
  for (unsigned ArgN = 0, ArgE = AI.getNumArguments(); ArgN != ArgE; ++ArgN) {
    SILValue A = AI.getArgument(ArgN);
    auto ParamType = ParamTypes[ParamTypes.size() - AI.getNumArguments() + ArgN];
    if (A.getType() != ParamType)
      A = B.createUpcast(AI.getLoc(), A, ParamType);

    Arguments.push_back(A);
  }

  // Replace old apply instruction by a new apply instruction that invokes
  // the witness thunk.
  SILBuilderWithScope Builder(AI.getInstruction());
  SILLocation Loc = AI.getLoc();
  FunctionRefInst *FRI = Builder.createFunctionRef(Loc, F);

  auto SubstCalleeSILType = SILType::getPrimitiveObjectType(SubstCalleeCanType);
  auto ResultSILType = SubstCalleeCanType->getSILResult();
  ApplySite SAI;

  if (auto *A = dyn_cast<ApplyInst>(AI))
    SAI = Builder.createApply(Loc, FRI, SubstCalleeSILType,
                              ResultSILType, NewSubstList, Arguments,
                              A->isNonThrowing());
  if (auto *TAI = dyn_cast<TryApplyInst>(AI))
    SAI = Builder.createTryApply(Loc, FRI, SubstCalleeSILType,
                                 NewSubstList, Arguments,
                                 TAI->getNormalBB(), TAI->getErrorBB());
  if (auto *PAI = dyn_cast<PartialApplyInst>(AI))
    SAI = Builder.createPartialApply(Loc, FRI, SubstCalleeSILType,
                                     NewSubstList, Arguments, PAI->getType());

  NumWitnessDevirt++;
  return SAI;
}
示例#11
0
/// \brief Devirtualize an apply of a class method.
///
/// \p AI is the apply to devirtualize.
/// \p ClassOrMetatype is a class value or metatype value that is the
///    self argument of the apply we will devirtualize.
/// return the result value of the new ApplyInst if created one or null.
DevirtualizationResult swift::devirtualizeClassMethod(FullApplySite AI,
                                                     SILValue ClassOrMetatype) {
  DEBUG(llvm::dbgs() << "    Trying to devirtualize : " << *AI.getInstruction());

  SILModule &Mod = AI.getModule();
  auto *CMI = cast<ClassMethodInst>(AI.getCallee());
  auto ClassOrMetatypeType = ClassOrMetatype.getType();
  auto *F = getTargetClassMethod(Mod, ClassOrMetatypeType, CMI->getMember());

  CanSILFunctionType GenCalleeType = F->getLoweredFunctionType();

  auto Subs = getSubstitutionsForCallee(Mod, GenCalleeType,
                                        ClassOrMetatypeType, AI);
  CanSILFunctionType SubstCalleeType = GenCalleeType;
  if (GenCalleeType->isPolymorphic())
    SubstCalleeType = GenCalleeType->substGenericArgs(Mod, Mod.getSwiftModule(), Subs);

  SILBuilderWithScope B(AI.getInstruction());
  FunctionRefInst *FRI = B.createFunctionRef(AI.getLoc(), F);

  // Create the argument list for the new apply, casting when needed
  // in order to handle covariant indirect return types and
  // contravariant argument types.
  llvm::SmallVector<SILValue, 8> NewArgs;
  auto Args = AI.getArguments();
  auto ParamTypes = SubstCalleeType->getParameterSILTypes();

  for (unsigned i = 0, e = Args.size() - 1; i != e; ++i)
    NewArgs.push_back(castValueToABICompatibleType(&B, AI.getLoc(), Args[i],
                                                   Args[i].getType(),
                                                   ParamTypes[i]).getValue());

  // Add the self argument, upcasting if required because we're
  // calling a base class's method.
  auto SelfParamTy = SubstCalleeType->getSelfParameter().getSILType();
  NewArgs.push_back(castValueToABICompatibleType(&B, AI.getLoc(),
                                                 ClassOrMetatype,
                                                 ClassOrMetatypeType,
                                                 SelfParamTy).getValue());

  // If we have a direct return type, make sure we use the subst callee return
  // type. If we have an indirect return type, AI's return type of the empty
  // tuple should be ok.
  SILType ResultTy = AI.getType();
  if (!SubstCalleeType->hasIndirectResult()) {
    ResultTy = SubstCalleeType->getSILResult();
  }

  SILType SubstCalleeSILType =
    SILType::getPrimitiveObjectType(SubstCalleeType);
  FullApplySite NewAI;

  SILBasicBlock *ResultBB = nullptr;
  SILBasicBlock *NormalBB = nullptr;
  SILValue ResultValue;
  bool ResultCastRequired = false;
  SmallVector<Operand *, 4> OriginalResultUses;

  if (!isa<TryApplyInst>(AI)) {
    NewAI = B.createApply(AI.getLoc(), FRI, SubstCalleeSILType, ResultTy,
                          Subs, NewArgs, cast<ApplyInst>(AI)->isNonThrowing());
    ResultValue = SILValue(NewAI.getInstruction(), 0);
  } else {
    auto *TAI = cast<TryApplyInst>(AI);
    // Create new normal and error BBs only if:
    // - re-using a BB would create a critical edge
    // - or, the result of the new apply would be of different
    //   type than the argument of the original normal BB.
    if (TAI->getNormalBB()->getSinglePredecessor())
      ResultBB = TAI->getNormalBB();
    else {
      ResultBB = B.getFunction().createBasicBlock();
      ResultBB->createBBArg(ResultTy);
    }

    NormalBB = TAI->getNormalBB();

    SILBasicBlock *ErrorBB = nullptr;
    if (TAI->getErrorBB()->getSinglePredecessor())
      ErrorBB = TAI->getErrorBB();
    else {
      ErrorBB = B.getFunction().createBasicBlock();
      ErrorBB->createBBArg(TAI->getErrorBB()->getBBArg(0)->getType());
    }

    NewAI = B.createTryApply(AI.getLoc(), FRI, SubstCalleeSILType,
                             Subs, NewArgs,
                             ResultBB, ErrorBB);
    if (ErrorBB != TAI->getErrorBB()) {
      B.setInsertionPoint(ErrorBB);
      B.createBranch(TAI->getLoc(), TAI->getErrorBB(),
                     {ErrorBB->getBBArg(0)});
    }

    // Does the result value need to be casted?
    ResultCastRequired = ResultTy != NormalBB->getBBArg(0)->getType();

    if (ResultBB != NormalBB)
      B.setInsertionPoint(ResultBB);
    else if (ResultCastRequired) {
      B.setInsertionPoint(NormalBB->begin());
      // Collect all uses, before casting.
      for (auto *Use : NormalBB->getBBArg(0)->getUses()) {
        OriginalResultUses.push_back(Use);
      }
      NormalBB->getBBArg(0)->replaceAllUsesWith(SILUndef::get(AI.getType(), Mod));
      NormalBB->replaceBBArg(0, ResultTy, nullptr);
    }

    // The result value is passed as a parameter to the normal block.
    ResultValue = ResultBB->getBBArg(0);
  }

  // Check if any casting is required for the return value.
  ResultValue = castValueToABICompatibleType(&B, NewAI.getLoc(), ResultValue,
                                             ResultTy, AI.getType()).getValue();

  DEBUG(llvm::dbgs() << "        SUCCESS: " << F->getName() << "\n");
  NumClassDevirt++;

  if (NormalBB) {
    if (NormalBB != ResultBB) {
      // If artificial normal BB was introduced, branch
      // to the original normal BB.
      B.createBranch(NewAI.getLoc(), NormalBB, { ResultValue });
    } else if (ResultCastRequired) {
      // Update all original uses by the new value.
      for(auto *Use: OriginalResultUses) {
        Use->set(ResultValue);
      }
    }
    return std::make_pair(NewAI.getInstruction(), NewAI);
  }

  // We need to return a pair of values here:
  // - the first one is the actual result of the devirtualized call, possibly
  //   casted into an appropriate type. This SILValue may be a BB arg, if it
  //   was a cast between optional types.
  // - the second one is the new apply site.
  return std::make_pair(ResultValue.getDef(), NewAI);
}
示例#12
0
ManagedValue SILGenFunction::emitExistentialErasure(
                            SILLocation loc,
                            CanType concreteFormalType,
                            const TypeLowering &concreteTL,
                            const TypeLowering &existentialTL,
                            const ArrayRef<ProtocolConformance *> &conformances,
                            SGFContext C,
                            llvm::function_ref<ManagedValue (SGFContext)> F) {
  // Mark the needed conformances as used.
  for (auto *conformance : conformances)
    SGM.useConformance(conformance);

  switch (existentialTL.getLoweredType().getObjectType()
            .getPreferredExistentialRepresentation(SGM.M, concreteFormalType)) {
  case ExistentialRepresentation::None:
    llvm_unreachable("not an existential type");
  case ExistentialRepresentation::Metatype: {
    assert(existentialTL.isLoadable());

    SILValue metatype = F(SGFContext()).getUnmanagedValue();
    assert(metatype.getType().castTo<AnyMetatypeType>()->getRepresentation()
             == MetatypeRepresentation::Thick);

    auto upcast =
      B.createInitExistentialMetatype(loc, metatype,
                                      existentialTL.getLoweredType(),
                                      conformances);
    return ManagedValue::forUnmanaged(upcast);
  }
  case ExistentialRepresentation::Class: {
    assert(existentialTL.isLoadable());

    ManagedValue sub = F(SGFContext());
    SILValue v = B.createInitExistentialRef(loc,
                                            existentialTL.getLoweredType(),
                                            concreteFormalType,
                                            sub.getValue(),
                                            conformances);
    return ManagedValue(v, sub.getCleanup());
  }
  case ExistentialRepresentation::Boxed: {
    // Allocate the existential.
    auto box = B.createAllocExistentialBox(loc,
                                           existentialTL.getLoweredType(),
                                           concreteFormalType,
                                           concreteTL.getLoweredType(),
                                           conformances);
    auto existential = box->getExistentialResult();
    auto valueAddr = box->getValueAddressResult();

    // Initialize the concrete value in-place.
    InitializationPtr init(
        new ExistentialInitialization(existential, valueAddr, concreteFormalType,
                                      ExistentialRepresentation::Boxed,
                                      *this));
    ManagedValue mv = F(SGFContext(init.get()));
    if (!mv.isInContext()) {
      mv.forwardInto(*this, loc, init->getAddress());
      init->finishInitialization(*this);
    }
    
    return emitManagedRValueWithCleanup(existential);
  }
  case ExistentialRepresentation::Opaque: {
    // Allocate the existential.
    SILValue existential =
      getBufferForExprResult(loc, existentialTL.getLoweredType(), C);

    // Allocate the concrete value inside the container.
    SILValue valueAddr = B.createInitExistentialAddr(
                            loc, existential,
                            concreteFormalType,
                            concreteTL.getLoweredType(),
                            conformances);
    // Initialize the concrete value in-place.
    InitializationPtr init(
        new ExistentialInitialization(existential, valueAddr, concreteFormalType,
                                      ExistentialRepresentation::Opaque,
                                      *this));
    ManagedValue mv = F(SGFContext(init.get()));
    if (!mv.isInContext()) {
      mv.forwardInto(*this, loc, init->getAddress());
      init->finishInitialization(*this);
    }

    return manageBufferForExprResult(existential, existentialTL, C);
  }
  }
}
示例#13
0
static bool isNonInoutIndirectSILArgument(SILValue Arg,
                                          SILArgumentConvention ArgConvention) {
  return !Arg->getType().isObject() && ArgConvention.isIndirectConvention() &&
         ArgConvention != SILArgumentConvention::Indirect_Inout &&
         ArgConvention != SILArgumentConvention::Indirect_InoutAliasable;
}
示例#14
0
// Attempt to remove the array allocated at NewAddrValue and release its
// refcounted elements.
//
// This is tightly coupled with the implementation of array.uninitialized.
// The call to allocate an uninitialized array returns two values:
// (Array<E> ArrayBase, UnsafeMutable<E> ArrayElementStorage)
//
// TODO: This relies on the lowest level array.uninitialized not being
// inlined. To do better we could either run this pass before semantic inlining,
// or we could also handle calls to array.init.
static bool removeAndReleaseArray(SILValue NewArrayValue) {
  TupleExtractInst *ArrayDef = nullptr;
  TupleExtractInst *StorageAddress = nullptr;
  for (auto *Op : NewArrayValue->getUses()) {
    auto *TupleElt = dyn_cast<TupleExtractInst>(Op->getUser());
    if (!TupleElt)
      return false;
    switch (TupleElt->getFieldNo()) {
    default:
      return false;
    case 0:
      ArrayDef = TupleElt;
      break;
    case 1:
      StorageAddress = TupleElt;
      break;
    }
  }
  if (!ArrayDef)
    return false; // No Array object to delete.

  assert(!ArrayDef->getType().isTrivial(ArrayDef->getModule()) &&
         "Array initialization should produce the proper tuple type.");

  // Analyze the array object uses.
  DeadObjectAnalysis DeadArray(ArrayDef);
  if (!DeadArray.analyze())
    return false;

  // Require all stores to be into the array storage not the array object,
  // otherwise bail.
  bool HasStores = false;
  DeadArray.visitStoreLocations([&](ArrayRef<StoreInst*>){ HasStores = true; });
  if (HasStores)
    return false;

  // Remove references to empty arrays.
  if (!StorageAddress) {
    removeInstructions(DeadArray.getAllUsers());
    return true;
  }
  assert(StorageAddress->getType().isTrivial(ArrayDef->getModule()) &&
         "Array initialization should produce the proper tuple type.");

  // Analyze the array storage uses.
  DeadObjectAnalysis DeadStorage(StorageAddress);
  if (!DeadStorage.analyze())
    return false;

  // Find array object lifetime.
  ValueLifetimeAnalysis VLA(ArrayDef);
  ValueLifetime Lifetime = VLA.computeFromUserList(DeadArray.getAllUsers());

  // Check that all storage users are in the Array's live blocks and never the
  // last user.
  for (auto *User : DeadStorage.getAllUsers()) {
    auto *BB = User->getParent();
    if (!VLA.successorHasLiveIn(BB)
        && VLA.findLastSpecifiedUseInBlock(BB) == User) {
        return false;
    }
  }
  // For each store location, insert releases.
  // This makes a strong assumption that the allocated object is released on all
  // paths in which some object initialization occurs.
  SILSSAUpdater SSAUp;
  DeadStorage.visitStoreLocations([&] (ArrayRef<StoreInst*> Stores) {
      insertReleases(Stores, Lifetime.getLastUsers(), SSAUp);
    });

  // Delete all uses of the dead array and its storage address.
  removeInstructions(DeadArray.getAllUsers());
  removeInstructions(DeadStorage.getAllUsers());

  return true;
}
示例#15
0
/// Find all closures that may be propagated into the given function-type value.
///
/// Searches the use-def chain from the given value upward until a partial_apply
/// is reached. Populates `results` with the set of partial_apply instructions.
///
/// `funcVal` may be either a function type or an Optional function type. This
/// might be called on a directly applied value or on a call argument, which may
/// in turn be applied within the callee.
void swift::findClosuresForFunctionValue(
    SILValue funcVal, TinyPtrVector<PartialApplyInst *> &results) {

  SILType funcTy = funcVal->getType();
  // Handle `Optional<@convention(block) @noescape (_)->(_)>`
  if (auto optionalObjTy = funcTy.getOptionalObjectType())
    funcTy = optionalObjTy;
  assert(funcTy.is<SILFunctionType>());

  SmallVector<SILValue, 4> worklist;
  // Avoid exponential path exploration and prevent duplicate results.
  llvm::SmallDenseSet<SILValue, 8> visited;
  auto worklistInsert = [&](SILValue V) {
    if (visited.insert(V).second)
      worklist.push_back(V);
  };
  worklistInsert(funcVal);

  while (!worklist.empty()) {
    SILValue V = worklist.pop_back_val();

    if (auto *I = V->getDefiningInstruction()) {
      // Look through copies, borrows, and conversions.
      //
      // Handle copy_block and copy_block_without_actually_escaping before
      // calling findClosureStoredIntoBlock.
      if (SingleValueInstruction *SVI = getSingleValueCopyOrCast(I)) {
        worklistInsert(SVI->getOperand(0));
        continue;
      }
    }
    // Look through Optionals.
    if (V->getType().getOptionalObjectType()) {
      auto *EI = dyn_cast<EnumInst>(V);
      if (EI && EI->hasOperand()) {
        worklistInsert(EI->getOperand());
      }
      // Ignore the .None case.
      continue;
    }
    // Look through Phis.
    //
    // This should be done before calling findClosureStoredIntoBlock.
    if (auto *arg = dyn_cast<SILPhiArgument>(V)) {
      SmallVector<std::pair<SILBasicBlock *, SILValue>, 2> blockArgs;
      arg->getIncomingPhiValues(blockArgs);
      for (auto &blockAndArg : blockArgs)
        worklistInsert(blockAndArg.second);

      continue;
    }
    // Look through ObjC closures.
    auto fnType = V->getType().getAs<SILFunctionType>();
    if (fnType
        && fnType->getRepresentation() == SILFunctionTypeRepresentation::Block) {
      if (SILValue storedClosure = findClosureStoredIntoBlock(V))
        worklistInsert(storedClosure);

      continue;
    }
    if (auto *PAI = dyn_cast<PartialApplyInst>(V)) {
      SILValue thunkArg = isPartialApplyOfReabstractionThunk(PAI);
      if (thunkArg) {
        // Handle reabstraction thunks recursively. This may reabstract over
        // @convention(block).
        worklistInsert(thunkArg);
        continue;
      }
      results.push_back(PAI);
      continue;
    }
    // Ignore other unrecognized values that feed this applied argument.
  }
}
示例#16
0
void SILGenFunction::emitValueConstructor(ConstructorDecl *ctor) {
  MagicFunctionName = SILGenModule::getMagicFunctionName(ctor);

  if (ctor->isMemberwiseInitializer())
    return emitImplicitValueConstructor(*this, ctor);

  // True if this constructor delegates to a peer constructor with self.init().
  bool isDelegating = ctor->getDelegatingOrChainedInitKind(nullptr) ==
    ConstructorDecl::BodyInitKind::Delegating;

  // Get the 'self' decl and type.
  VarDecl *selfDecl = ctor->getImplicitSelfDecl();
  auto &lowering = getTypeLowering(selfDecl->getType()->getInOutObjectType());
  SILType selfTy = lowering.getLoweredType();
  (void)selfTy;
  assert(!selfTy.getClassOrBoundGenericClass()
         && "can't emit a class ctor here");

  // Allocate the local variable for 'self'.
  emitLocalVariableWithCleanup(selfDecl, false)->finishInitialization(*this);
  
  // Mark self as being uninitialized so that DI knows where it is and how to
  // check for it.
  SILValue selfLV;
  {
    auto &SelfVarLoc = VarLocs[selfDecl];
    auto MUIKind =  isDelegating ? MarkUninitializedInst::DelegatingSelf
                                 : MarkUninitializedInst::RootSelf;
    selfLV = B.createMarkUninitialized(selfDecl, SelfVarLoc.value, MUIKind);
    SelfVarLoc.value = selfLV;
  }
  
  // Emit the prolog.
  emitProlog(ctor->getParameterList(1), ctor->getResultType(), ctor,
             ctor->hasThrows());
  emitConstructorMetatypeArg(*this, ctor);

  // Create a basic block to jump to for the implicit 'self' return.
  // We won't emit this until after we've emitted the body.
  // The epilog takes a void return because the return of 'self' is implicit.
  prepareEpilog(Type(), ctor->hasThrows(), CleanupLocation(ctor));

  // If the constructor can fail, set up an alternative epilog for constructor
  // failure.
  SILBasicBlock *failureExitBB = nullptr;
  SILArgument *failureExitArg = nullptr;
  auto &resultLowering = getTypeLowering(ctor->getResultType());

  if (ctor->getFailability() != OTK_None) {
    SILBasicBlock *failureBB = createBasicBlock(FunctionSection::Postmatter);

    // On failure, we'll clean up everything (except self, which should have
    // been cleaned up before jumping here) and return nil instead.
    SavedInsertionPoint savedIP(*this, failureBB, FunctionSection::Postmatter);
    failureExitBB = createBasicBlock();
    Cleanups.emitCleanupsForReturn(ctor);
    // Return nil.
    if (lowering.isAddressOnly()) {
      // Inject 'nil' into the indirect return.
      assert(F.getIndirectResults().size() == 1);
      B.createInjectEnumAddr(ctor, F.getIndirectResults()[0],
                             getASTContext().getOptionalNoneDecl());
      B.createBranch(ctor, failureExitBB);

      B.setInsertionPoint(failureExitBB);
      B.createReturn(ctor, emitEmptyTuple(ctor));
    } else {
      // Pass 'nil' as the return value to the exit BB.
      failureExitArg = new (F.getModule())
        SILArgument(failureExitBB, resultLowering.getLoweredType());
      SILValue nilResult =
        B.createEnum(ctor, {}, getASTContext().getOptionalNoneDecl(),
                     resultLowering.getLoweredType());
      B.createBranch(ctor, failureExitBB, nilResult);

      B.setInsertionPoint(failureExitBB);
      B.createReturn(ctor, failureExitArg);
    }

    FailDest = JumpDest(failureBB, Cleanups.getCleanupsDepth(), ctor);
  }

  // If this is not a delegating constructor, emit member initializers.
  if (!isDelegating) {
    auto *dc = ctor->getDeclContext();
    auto *nominal = dc->getAsNominalTypeOrNominalTypeExtensionContext();
    emitMemberInitializers(dc, selfDecl, nominal);
  }

  emitProfilerIncrement(ctor->getBody());
  // Emit the constructor body.
  emitStmt(ctor->getBody());

  
  // Build a custom epilog block, since the AST representation of the
  // constructor decl (which has no self in the return type) doesn't match the
  // SIL representation.
  SILValue selfValue;
  {
    SavedInsertionPoint savedIP(*this, ReturnDest.getBlock());
    assert(B.getInsertionBB()->empty() && "Epilog already set up?");
    
    auto cleanupLoc = CleanupLocation::get(ctor);
    
    if (!lowering.isAddressOnly()) {
      // Otherwise, load and return the final 'self' value.
      selfValue =
          B.createLoad(cleanupLoc, selfLV, LoadOwnershipQualifier::Unqualified);

      // Emit a retain of the loaded value, since we return it +1.
      lowering.emitCopyValue(B, cleanupLoc, selfValue);

      // Inject the self value into an optional if the constructor is failable.
      if (ctor->getFailability() != OTK_None) {
        selfValue = B.createEnum(ctor, selfValue,
                                 getASTContext().getOptionalSomeDecl(),
                                 getLoweredLoadableType(ctor->getResultType()));
      }
    } else {
      // If 'self' is address-only, copy 'self' into the indirect return slot.
      assert(F.getIndirectResults().size() == 1 &&
             "no indirect return for address-only ctor?!");

      // Get the address to which to store the result.
      SILValue completeReturnAddress = F.getIndirectResults()[0];
      SILValue returnAddress;
      switch (ctor->getFailability()) {
      // For non-failable initializers, store to the return address directly.
      case OTK_None:
        returnAddress = completeReturnAddress;
        break;
      // If this is a failable initializer, project out the payload.
      case OTK_Optional:
      case OTK_ImplicitlyUnwrappedOptional:
        returnAddress = B.createInitEnumDataAddr(ctor, completeReturnAddress,
                                       getASTContext().getOptionalSomeDecl(),
                                                 selfLV->getType());
        break;
      }
      
      // We have to do a non-take copy because someone else may be using the
      // box (e.g. someone could have closed over it).
      B.createCopyAddr(cleanupLoc, selfLV, returnAddress,
                       IsNotTake, IsInitialization);
      
      // Inject the enum tag if the result is optional because of failability.
      if (ctor->getFailability() != OTK_None) {
        // Inject the 'Some' tag.
        B.createInjectEnumAddr(ctor, completeReturnAddress,
                               getASTContext().getOptionalSomeDecl());
      }
    }
  }
  
  // Finally, emit the epilog and post-matter.
  auto returnLoc = emitEpilog(ctor, /*UsesCustomEpilog*/true);

  // Finish off the epilog by returning.  If this is a failable ctor, then we
  // actually jump to the failure epilog to keep the invariant that there is
  // only one SIL return instruction per SIL function.
  if (B.hasValidInsertionPoint()) {
    if (!failureExitBB) {
      // If we're not returning self, then return () since we're returning Void.
      if (!selfValue) {
        SILLocation loc(ctor);
        loc.markAutoGenerated();
        selfValue = emitEmptyTuple(loc);
      }
      
      B.createReturn(returnLoc, selfValue);
    } else {
      if (selfValue)
        B.createBranch(returnLoc, failureExitBB, selfValue);
      else
        B.createBranch(returnLoc, failureExitBB);
    }
  }
}
示例#17
0
void SILGenFunction::emitClassConstructorAllocator(ConstructorDecl *ctor) {
  assert(!ctor->isFactoryInit() && "factories should not be emitted here");

  // Emit the prolog. Since we're just going to forward our args directly
  // to the initializer, don't allocate local variables for them.
  RegularLocation Loc(ctor);
  Loc.markAutoGenerated();

  // Forward the constructor arguments.
  // FIXME: Handle 'self' along with the other body patterns.
  SmallVector<SILValue, 8> args;
  bindParametersForForwarding(ctor->getParameterList(1), args);

  SILValue selfMetaValue = emitConstructorMetatypeArg(*this, ctor);

  // Allocate the "self" value.
  VarDecl *selfDecl = ctor->getImplicitSelfDecl();
  SILType selfTy = getLoweredType(selfDecl->getType());
  assert(selfTy.hasReferenceSemantics() &&
         "can't emit a value type ctor here");

  // Use alloc_ref to allocate the object.
  // TODO: allow custom allocation?
  // FIXME: should have a cleanup in case of exception
  auto selfClassDecl = ctor->getDeclContext()->getAsClassOrClassExtensionContext();

  SILValue selfValue;

  // Allocate the 'self' value.
  bool useObjCAllocation = usesObjCAllocator(selfClassDecl);

  if (ctor->isConvenienceInit() || ctor->hasClangNode()) {
    // For a convenience initializer or an initializer synthesized
    // for an Objective-C class, allocate using the metatype.
    SILValue allocArg = selfMetaValue;

    // When using Objective-C allocation, convert the metatype
    // argument to an Objective-C metatype.
    if (useObjCAllocation) {
      auto metaTy = allocArg->getType().castTo<MetatypeType>();
      metaTy = CanMetatypeType::get(metaTy.getInstanceType(),
                                    MetatypeRepresentation::ObjC);
      allocArg = B.createThickToObjCMetatype(Loc, allocArg,
                                             getLoweredType(metaTy));
    }

    selfValue = B.createAllocRefDynamic(Loc, allocArg, selfTy,
                                        useObjCAllocation, {}, {});
  } else {
    // For a designated initializer, we know that the static type being
    // allocated is the type of the class that defines the designated
    // initializer.
    selfValue = B.createAllocRef(Loc, selfTy, useObjCAllocation, false, {}, {});
  }
  args.push_back(selfValue);

  // Call the initializer. Always use the Swift entry point, which will be a
  // bridging thunk if we're calling ObjC.
  SILDeclRef initConstant =
    SILDeclRef(ctor,
               SILDeclRef::Kind::Initializer,
               SILDeclRef::ConstructAtBestResilienceExpansion,
               SILDeclRef::ConstructAtNaturalUncurryLevel,
               /*isObjC=*/false);

  ManagedValue initVal;
  SILType initTy;

  ArrayRef<Substitution> subs;
  // Call the initializer.
  ArrayRef<Substitution> forwardingSubs;
  if (auto *genericEnv = ctor->getGenericEnvironmentOfContext()) {
    auto *genericSig = ctor->getGenericSignatureOfContext();
    forwardingSubs = genericEnv->getForwardingSubstitutions(
        SGM.SwiftModule, genericSig);
  }
  std::tie(initVal, initTy, subs)
    = emitSiblingMethodRef(Loc, selfValue, initConstant, forwardingSubs);

  SILValue initedSelfValue = emitApplyWithRethrow(Loc, initVal.forward(*this),
                                                  initTy, subs, args);

  // Return the initialized 'self'.
  B.createReturn(ImplicitReturnLocation::getImplicitReturnLoc(Loc),
                 initedSelfValue);
}
示例#18
0
void SILValue::replaceAllUsesWith(SILValue V) {
  assert(*this != V && "Cannot RAUW a value with itself");
  assert(getType() == V.getType() && "Invalid type");
  while (!use_empty())
    (**use_begin()).set(V);
}
示例#19
0
/// Check if the value \p Value is known to be zero, non-zero or unknown.
IsZeroKind swift::isZeroValue(SILValue Value) {
  // Inspect integer literals.
  if (auto *L = dyn_cast<IntegerLiteralInst>(Value)) {
    if (!L->getValue())
      return IsZeroKind::Zero;
    return IsZeroKind::NotZero;
  }

  // Inspect Structs.
  switch (Value->getKind()) {
    // Bitcast of zero is zero.
    case ValueKind::UncheckedTrivialBitCastInst:
    // Extracting from a zero class returns a zero.
    case ValueKind::StructExtractInst:
      return isZeroValue(cast<SILInstruction>(Value)->getOperand(0));
    default:
      break;
  }

  // Inspect casts.
  if (auto *BI = dyn_cast<BuiltinInst>(Value)) {
    switch (BI->getBuiltinInfo().ID) {
      case BuiltinValueKind::IntToPtr:
      case BuiltinValueKind::PtrToInt:
      case BuiltinValueKind::ZExt:
        return isZeroValue(BI->getArguments()[0]);
      case BuiltinValueKind::UDiv:
      case BuiltinValueKind::SDiv: {
        if (IsZeroKind::Zero == isZeroValue(BI->getArguments()[0]))
          return IsZeroKind::Zero;
        return IsZeroKind::Unknown;
      }
      case BuiltinValueKind::Mul:
      case BuiltinValueKind::SMulOver:
      case BuiltinValueKind::UMulOver: {
        IsZeroKind LHS = isZeroValue(BI->getArguments()[0]);
        IsZeroKind RHS = isZeroValue(BI->getArguments()[1]);
        if (LHS == IsZeroKind::Zero || RHS == IsZeroKind::Zero)
          return IsZeroKind::Zero;

        return IsZeroKind::Unknown;
      }
      default:
        return IsZeroKind::Unknown;
    }
  }

  // Handle results of XXX_with_overflow arithmetic.
  if (auto *T = dyn_cast<TupleExtractInst>(Value)) {
    // Make sure we are extracting the number value and not
    // the overflow flag.
    if (T->getFieldNo() != 0)
      return IsZeroKind::Unknown;

    BuiltinInst *BI = dyn_cast<BuiltinInst>(T->getOperand());
    if (!BI)
      return IsZeroKind::Unknown;

    return isZeroValue(BI);
  }

  //Inspect allocations and pointer literals.
  if (isa<StringLiteralInst>(Value) ||
      isa<AllocationInst>(Value) ||
      isa<GlobalAddrInst>(Value))
    return IsZeroKind::NotZero;

  return IsZeroKind::Unknown;
}
示例#20
0
/// Uses a bunch of ad-hoc rules to disambiguate a GEP instruction against
/// another pointer. We know that V1 is a GEP, but we don't know anything about
/// V2. O1, O2 are getUnderlyingObject of V1, V2 respectively.
AliasResult AliasAnalysis::aliasAddressProjection(SILValue V1, SILValue V2,
                                                  SILValue O1, SILValue O2) {

  // If V2 is also a gep instruction with a must-alias or not-aliasing base
  // pointer, figure out if the indices of the GEPs tell us anything about the
  // derived pointers.
  if (!NewProjection::isAddressProjection(V2)) {
    // Ok, V2 is not an address projection. See if V2 after stripping casts
    // aliases O1. If so, then we know that V2 must partially alias V1 via a
    // must alias relation on O1. This ensures that given an alloc_stack and a
    // gep from that alloc_stack, we say that they partially alias.
    if (isSameValueOrGlobal(O1, V2.stripCasts()))
      return AliasResult::PartialAlias;

    return AliasResult::MayAlias;
  }
  
  assert(!NewProjection::isAddressProjection(O1) &&
         "underlying object may not be a projection");
  assert(!NewProjection::isAddressProjection(O2) &&
         "underlying object may not be a projection");

  // Do the base pointers alias?
  AliasResult BaseAlias = aliasInner(O1, O2);

  // If the underlying objects are not aliased, the projected values are also
  // not aliased.
  if (BaseAlias == AliasResult::NoAlias)
    return AliasResult::NoAlias;

  // Let's do alias checking based on projections.
  auto V1Path = ProjectionPath::getAddrProjectionPath(O1, V1, true);
  auto V2Path = ProjectionPath::getAddrProjectionPath(O2, V2, true);

  // getUnderlyingPath and findAddressProjectionPathBetweenValues disagree on
  // what the base pointer of the two values are. Be conservative and return
  // MayAlias.
  //
  // FIXME: The only way this should happen realistically is if there are
  // casts in between two projection instructions. getUnderlyingObject will
  // ignore that, while findAddressProjectionPathBetweenValues wont. The two
  // solutions are to make address projections variadic (something on the wee
  // horizon) or enable Projection to represent a cast as a special sort of
  // projection.
  if (!V1Path || !V2Path)
    return AliasResult::MayAlias;

  auto R = V1Path->computeSubSeqRelation(*V2Path);

  // If all of the projections are equal (and they have the same base pointer),
  // the two GEPs must be the same.
  if (BaseAlias == AliasResult::MustAlias &&
      R == SubSeqRelation_t::Equal)
    return AliasResult::MustAlias;

  // The two GEPs do not alias if they are accessing different fields, even if
  // we don't know the base pointers. Different fields should not overlap.
  //
  // TODO: Replace this with a check on the computed subseq relation. See the
  // TODO in computeSubSeqRelation.
  if (V1Path->hasNonEmptySymmetricDifference(V2Path.getValue()))
    return AliasResult::NoAlias;

  // If one of the GEPs is a super path of the other then they partially
  // alias.
  if (BaseAlias == AliasResult::MustAlias &&
      isStrictSubSeqRelation(R))
    return AliasResult::PartialAlias;

  // We failed to prove anything. Be conservative and return MayAlias.
  return AliasResult::MayAlias;
}
示例#21
0
DevirtualizationResult swift::tryDevirtualizeClassMethod(FullApplySite AI,
                                                   SILValue ClassInstance) {
  if (!canDevirtualizeClassMethod(AI, ClassInstance.getType()))
    return std::make_pair(nullptr, FullApplySite());
  return devirtualizeClassMethod(AI, ClassInstance);
}
示例#22
0
/// The main AA entry point. Performs various analyses on V1, V2 in an attempt
/// to disambiguate the two values.
AliasResult AliasAnalysis::aliasInner(SILValue V1, SILValue V2,
                                      SILType TBAAType1,
                                      SILType TBAAType2) {
#ifndef NDEBUG
  // If alias analysis is disabled, always return may alias.
  if (!shouldRunAA())
    return AliasResult::MayAlias;
#endif

  // If the two values equal, quickly return must alias.
  if (isSameValueOrGlobal(V1, V2))
    return AliasResult::MustAlias;

  DEBUG(llvm::dbgs() << "ALIAS ANALYSIS:\n    V1: " << *V1.getDef()
        << "    V2: " << *V2.getDef());

  // Pass in both the TBAA types so we can perform typed access TBAA and the
  // actual types of V1, V2 so we can perform class based TBAA.
  if (!typesMayAlias(TBAAType1, TBAAType2))
    return AliasResult::NoAlias;

#ifndef NDEBUG
  if (!shouldRunBasicAA())
    return AliasResult::MayAlias;
#endif

  // Strip off any casts on V1, V2.
  V1 = V1.stripCasts();
  V2 = V2.stripCasts();
  DEBUG(llvm::dbgs() << "        After Cast Stripping V1:" << *V1.getDef());
  DEBUG(llvm::dbgs() << "        After Cast Stripping V2:" << *V2.getDef());

  // Ok, we need to actually compute an Alias Analysis result for V1, V2. Begin
  // by finding the "base" of V1, V2 by stripping off all casts and GEPs.
  SILValue O1 = getUnderlyingObject(V1);
  SILValue O2 = getUnderlyingObject(V2);
  DEBUG(llvm::dbgs() << "        Underlying V1:" << *O1.getDef());
  DEBUG(llvm::dbgs() << "        Underlying V2:" << *O2.getDef());

  // If O1 and O2 do not equal, see if we can prove that they cannot be the
  // same object. If we can, return No Alias.
  if (O1 != O2 && aliasUnequalObjects(O1, O2))
    return AliasResult::NoAlias;

  // Ok, either O1, O2 are the same or we could not prove anything based off of
  // their inequality.
  // Next: ask escape analysis. This catches cases where we compare e.g. a
  // non-escaping pointer with another (maybe escaping) pointer. Escape analysis
  // uses the connection graph to check if the pointers may point to the same
  // content.
  // Note that escape analysis must work with the original pointers and not the
  // underlying objects because it treats projections differently.
  if (!EA->canPointToSameMemory(V1, V2)) {
    DEBUG(llvm::dbgs() << "            Found not-aliased objects based on"
                                      "escape analysis\n");
    return AliasResult::NoAlias;
  }

  // Now we climb up use-def chains and attempt to do tricks based off of GEPs.

  // First if one instruction is a gep and the other is not, canonicalize our
  // inputs so that V1 always is the instruction containing the GEP.
  if (!NewProjection::isAddressProjection(V1) &&
       NewProjection::isAddressProjection(V2)) {
    std::swap(V1, V2);
    std::swap(O1, O2);
  }

  // If V1 is an address projection, attempt to use information from the
  // aggregate type tree to disambiguate it from V2.
  if (NewProjection::isAddressProjection(V1)) {
    AliasResult Result = aliasAddressProjection(V1, V2, O1, O2);
    if (Result != AliasResult::MayAlias)
      return Result;
  }

  // We could not prove anything. Be conservative and return that V1, V2 may
  // alias.
  return AliasResult::MayAlias;
}
示例#23
0
ManagedValue SILGenFunction::emitExistentialErasure(
                            SILLocation loc,
                            CanType concreteFormalType,
                            const TypeLowering &concreteTL,
                            const TypeLowering &existentialTL,
                            ArrayRef<ProtocolConformanceRef> conformances,
                            SGFContext C,
                            llvm::function_ref<ManagedValue (SGFContext)> F,
                            bool allowEmbeddedNSError) {
  // Mark the needed conformances as used.
  for (auto conformance : conformances)
    SGM.useConformance(conformance);

  // If we're erasing to the 'Error' type, we might be able to get an NSError
  // representation more efficiently.
  auto &ctx = getASTContext();
  auto nsError = ctx.getNSErrorDecl();
  if (allowEmbeddedNSError && nsError &&
      existentialTL.getSemanticType().getSwiftRValueType()->getAnyNominal() ==
        ctx.getErrorDecl()) {
    // Check whether the concrete type conforms to the _BridgedStoredNSError
    // protocol. In that case, call the _nsError witness getter to extract the
    // NSError directly.
    auto conformance =
      SGM.getConformanceToBridgedStoredNSError(loc, concreteFormalType);

    CanType nsErrorType =
      nsError->getDeclaredInterfaceType()->getCanonicalType();

    ProtocolConformanceRef nsErrorConformances[1] = {
      ProtocolConformanceRef(SGM.getNSErrorConformanceToError())
    };

    if (conformance && nsError && SGM.getNSErrorConformanceToError()) {
      if (auto witness =
            conformance->getWitness(SGM.getNSErrorRequirement(loc), nullptr)) {
        // Create a reference to the getter witness.
        SILDeclRef getter =
          getGetterDeclRef(cast<VarDecl>(witness.getDecl()),
                           /*isDirectAccessorUse=*/true);
        
        // Compute the substitutions.
        ArrayRef<Substitution> substitutions =
          concreteFormalType->gatherAllSubstitutions(
          SGM.SwiftModule, nullptr);

        // Emit the erasure, through the getter to _nsError.
        return emitExistentialErasure(
            loc, nsErrorType,
            getTypeLowering(nsErrorType),
            existentialTL,
            ctx.AllocateCopy(nsErrorConformances),
            C,
            [&](SGFContext innerC) -> ManagedValue {
              // Call the getter.
              return emitGetAccessor(loc, getter, substitutions,
                                     ArgumentSource(loc,
                                                    RValue(*this, loc,
                                                           concreteFormalType,
                                                           F(SGFContext()))),
                                     /*isSuper=*/false,
                                     /*isDirectAccessorUse=*/true,
                                     RValue(), innerC)
                .getAsSingleValue(*this, loc);
            });
      }
    }

    // Check whether the concrete type is an archetype. If so, call the
    // _getEmbeddedNSError() witness to try to dig out the embedded NSError.
    if (auto archetypeType = concreteFormalType->getAs<ArchetypeType>()) {
      if (std::find(archetypeType->getConformsTo().begin(),
                       archetypeType->getConformsTo().end(),
                       ctx.getErrorDecl())
            != archetypeType->getConformsTo().end()) {
        auto contBB = createBasicBlock();
        auto isNotPresentBB = createBasicBlock();
        auto isPresentBB = createBasicBlock();

        SILValue existentialResult =
          contBB->createBBArg(existentialTL.getLoweredType());

        ProtocolConformanceRef trivialErrorConformances[1] = {
          ProtocolConformanceRef(ctx.getErrorDecl())
        };

        Substitution substitutions[1] = {
          Substitution(concreteFormalType,
                       ctx.AllocateCopy(trivialErrorConformances))
        };

        // Call swift_stdlib_getErrorEmbeddedNSError to attempt to extract an
        // NSError from the value.
        ManagedValue concreteValue = F(SGFContext());
        ManagedValue potentialNSError =
          emitApplyOfLibraryIntrinsic(loc,
                                      SGM.getGetErrorEmbeddedNSError(loc),
                                      ctx.AllocateCopy(substitutions),
                                      { concreteValue },
                                      SGFContext())
            .getAsSingleValue(*this, loc);

        // Check whether we got an NSError back.
        SILValue hasNSError =
          emitDoesOptionalHaveValue(loc, potentialNSError.getValue());

        B.createCondBranch(loc, hasNSError, isPresentBB, isNotPresentBB);

        // If we did get an NSError, emit the existential erasure from that
        // NSError.
        B.emitBlock(isPresentBB);
        SILValue branchArg;
        {
          // Don't allow cleanups to escape the conditional block.
          FullExpr presentScope(Cleanups, CleanupLocation::get(loc));
          
          // Emit the existential erasure from the NSError.
          branchArg = emitExistentialErasure(
              loc, nsErrorType,
              getTypeLowering(nsErrorType),
              existentialTL,
              ctx.AllocateCopy(nsErrorConformances),
              C,
              [&](SGFContext innerC) -> ManagedValue {
                // Pull the NSError object out of the optional result.
                auto &inputTL = getTypeLowering(potentialNSError.getType());
                auto nsErrorValue =
                  emitUncheckedGetOptionalValueFrom(loc, potentialNSError,
                                                    inputTL);


                // Perform an unchecked cast down to NSError, because it was typed
                // as 'AnyObject' for layering reasons.
                return ManagedValue(B.createUncheckedRefCast(
                                      loc,
                                      nsErrorValue.getValue(),
                                      getLoweredType(nsErrorType)),
                                    nsErrorValue.getCleanup());

              }).forward(*this);
        }
        B.createBranch(loc, contBB, branchArg);

        // If we did not get an NSError, just directly emit the existential
        // (recursively).
        B.emitBlock(isNotPresentBB);
        branchArg = emitExistentialErasure(loc, concreteFormalType, concreteTL,
                                           existentialTL, conformances,
                                           SGFContext(), F,
                                           /*allowEmbeddedNSError=*/false)
                      .forward(*this);
        B.createBranch(loc, contBB, branchArg);

        // Continue.
        B.emitBlock(contBB);
        return emitManagedRValueWithCleanup(existentialResult, existentialTL);
      }
    }
  }

  switch (existentialTL.getLoweredType().getObjectType()
            .getPreferredExistentialRepresentation(SGM.M, concreteFormalType)) {
  case ExistentialRepresentation::None:
    llvm_unreachable("not an existential type");
  case ExistentialRepresentation::Metatype: {
    assert(existentialTL.isLoadable());

    SILValue metatype = F(SGFContext()).getUnmanagedValue();
    assert(metatype->getType().castTo<AnyMetatypeType>()->getRepresentation()
             == MetatypeRepresentation::Thick);

    auto upcast =
      B.createInitExistentialMetatype(loc, metatype,
                                      existentialTL.getLoweredType(),
                                      conformances);
    return ManagedValue::forUnmanaged(upcast);
  }
  case ExistentialRepresentation::Class: {
    assert(existentialTL.isLoadable());

    ManagedValue sub = F(SGFContext());
    SILValue v = B.createInitExistentialRef(loc,
                                            existentialTL.getLoweredType(),
                                            concreteFormalType,
                                            sub.getValue(),
                                            conformances);
    return ManagedValue(v, sub.getCleanup());
  }
  case ExistentialRepresentation::Boxed: {
    // Allocate the existential.
    auto *existential = B.createAllocExistentialBox(loc,
                                           existentialTL.getLoweredType(),
                                           concreteFormalType,
                                           conformances);
    auto *valueAddr = B.createProjectExistentialBox(loc,
                                           concreteTL.getLoweredType(),
                                           existential);
    // Initialize the concrete value in-place.
    InitializationPtr init(
        new ExistentialInitialization(existential, valueAddr, concreteFormalType,
                                      ExistentialRepresentation::Boxed,
                                      *this));
    ManagedValue mv = F(SGFContext(init.get()));
    if (!mv.isInContext()) {
      mv.forwardInto(*this, loc, init->getAddress());
      init->finishInitialization(*this);
    }
    
    return emitManagedRValueWithCleanup(existential);
  }
  case ExistentialRepresentation::Opaque: {
    // Allocate the existential.
    SILValue existential =
      getBufferForExprResult(loc, existentialTL.getLoweredType(), C);

    // Allocate the concrete value inside the container.
    SILValue valueAddr = B.createInitExistentialAddr(
                            loc, existential,
                            concreteFormalType,
                            concreteTL.getLoweredType(),
                            conformances);
    // Initialize the concrete value in-place.
    InitializationPtr init(
        new ExistentialInitialization(existential, valueAddr, concreteFormalType,
                                      ExistentialRepresentation::Opaque,
                                      *this));
    ManagedValue mv = F(SGFContext(init.get()));
    if (!mv.isInContext()) {
      mv.forwardInto(*this, loc, init->getAddress());
      init->finishInitialization(*this);
    }

    return manageBufferForExprResult(existential, existentialTL, C);
  }
  }
}
示例#24
0
void SILGenFunction::emitCaptures(SILLocation loc,
                                  AnyFunctionRef closure,
                                  CaptureEmission purpose,
                                  SmallVectorImpl<ManagedValue> &capturedArgs) {
  auto captureInfo = SGM.Types.getLoweredLocalCaptures(closure);
  // For boxed captures, we need to mark the contained variables as having
  // escaped for DI diagnostics.
  SmallVector<SILValue, 2> escapesToMark;
  
  // Partial applications take ownership of the context parameters, so we'll
  // need to pass ownership rather than merely guaranteeing parameters.
  bool canGuarantee;
  switch (purpose) {
  case CaptureEmission::PartialApplication:
    canGuarantee = false;
    break;
  case CaptureEmission::ImmediateApplication:
    canGuarantee = true;
    break;
  }
  // TODO: Or we always retain them when guaranteed contexts aren't enabled.
  if (!SGM.M.getOptions().EnableGuaranteedClosureContexts)
    canGuarantee = false;
  
  for (auto capture : captureInfo.getCaptures()) {
    auto *vd = capture.getDecl();

    switch (SGM.Types.getDeclCaptureKind(capture)) {
    case CaptureKind::None:
      break;

    case CaptureKind::Constant: {
      // let declarations.
      auto Entry = VarLocs[vd];

      auto &tl = getTypeLowering(vd->getType()->getReferenceStorageReferent());
      SILValue Val = Entry.value;

      if (!Val->getType().isAddress()) {
        // Our 'let' binding can guarantee the lifetime for the callee,
        // if we don't need to do anything more to it.
        if (canGuarantee && !vd->getType()->is<ReferenceStorageType>()) {
          auto guaranteed = ManagedValue::forUnmanaged(Val);
          capturedArgs.push_back(guaranteed);
          break;
        }
      
        // Just retain a by-val let.
        B.emitRetainValueOperation(loc, Val);
      } else {
        // If we have a mutable binding for a 'let', such as 'self' in an
        // 'init' method, load it.
        Val = emitLoad(loc, Val, tl, SGFContext(), IsNotTake).forward(*this);
      }

      // If we're capturing an unowned pointer by value, we will have just
      // loaded it into a normal retained class pointer, but we capture it as
      // an unowned pointer.  Convert back now.
      if (vd->getType()->is<ReferenceStorageType>()) {
        auto type = getTypeLowering(vd->getType()).getLoweredType();
        Val = emitConversionFromSemanticValue(loc, Val, type);
      }
      
      capturedArgs.push_back(emitManagedRValueWithCleanup(Val));
      break;
    }

    case CaptureKind::StorageAddress: {
      // No-escaping stored declarations are captured as the
      // address of the value.
      assert(VarLocs.count(vd) && "no location for captured var!");
      VarLoc vl = VarLocs[vd];
      assert(vl.value->getType().isAddress() && "no address for captured var!");
      capturedArgs.push_back(ManagedValue::forLValue(vl.value));
      break;
    }

    case CaptureKind::Box: {
      // LValues are captured as both the box owning the value and the
      // address of the value.
      assert(VarLocs.count(vd) && "no location for captured var!");
      VarLoc vl = VarLocs[vd];
      assert(vl.value->getType().isAddress() && "no address for captured var!");

      // If this is a boxed variable, we can use it directly.
      if (vl.box) {
        // We can guarantee our own box to the callee.
        if (canGuarantee) {
          capturedArgs.push_back(ManagedValue::forUnmanaged(vl.box));
        } else {
          B.createStrongRetain(loc, vl.box, Atomicity::Atomic);
          capturedArgs.push_back(emitManagedRValueWithCleanup(vl.box));
        }
        escapesToMark.push_back(vl.value);
      } else {
        // Address only 'let' values are passed by box.  This isn't great, in
        // that a variable captured by multiple closures will be boxed for each
        // one.  This could be improved by doing an "isCaptured" analysis when
        // emitting address-only let constants, and emit them into an alloc_box
        // like a variable instead of into an alloc_stack.
        //
        // TODO: This might not be profitable anymore with guaranteed captures,
        // since we could conceivably forward the copied value into the
        // closure context and pass it down to the partially applied function
        // in-place.
        AllocBoxInst *allocBox =
          B.createAllocBox(loc, vl.value->getType().getObjectType());
        ProjectBoxInst *boxAddress = B.createProjectBox(loc, allocBox);
        B.createCopyAddr(loc, vl.value, boxAddress, IsNotTake,IsInitialization);
        capturedArgs.push_back(emitManagedRValueWithCleanup(allocBox));
      }

      break;
    }
    }
  }
  
  // Mark box addresses as captured for DI purposes. The values must have
  // been fully initialized before we close over them.
  if (!escapesToMark.empty()) {
    B.createMarkFunctionEscape(loc, escapesToMark);
  }
}
示例#25
0
/// \brief Removes instructions that create the callee value if they are no
/// longer necessary after inlining.
static void
cleanupCalleeValue(SILValue CalleeValue, ArrayRef<SILValue> CaptureArgs,
                   ArrayRef<SILValue> FullArgs) {
  SmallVector<SILInstruction*, 16> InstsToDelete;
  for (SILValue V : FullArgs) {
    if (SILInstruction *I = dyn_cast<SILInstruction>(V))
      if (I != CalleeValue.getDef() &&
          isInstructionTriviallyDead(I))
        InstsToDelete.push_back(I);
  }
  recursivelyDeleteTriviallyDeadInstructions(InstsToDelete, true);

  // Handle the case where the callee of the apply is a load instruction.
  if (LoadInst *LI = dyn_cast<LoadInst>(CalleeValue)) {
    assert(CalleeValue.getResultNumber() == 0);
    SILInstruction *ABI = dyn_cast<AllocBoxInst>(LI->getOperand());
    assert(ABI && LI->getOperand().getResultNumber() == 1);

    // The load instruction must have no more uses left to erase it.
    if (!LI->use_empty())
      return;
    LI->eraseFromParent();

    // Look through uses of the alloc box the load is loading from to find up to
    // one store and up to one strong release.
    StoreInst *SI = nullptr;
    StrongReleaseInst *SRI = nullptr;
    for (auto UI = ABI->use_begin(), UE = ABI->use_end(); UI != UE; ++UI) {
      if (SI == nullptr && isa<StoreInst>(UI.getUser())) {
        SI = cast<StoreInst>(UI.getUser());
        assert(SI->getDest() == SILValue(ABI, 1));
      } else if (SRI == nullptr && isa<StrongReleaseInst>(UI.getUser())) {
        SRI = cast<StrongReleaseInst>(UI.getUser());
        assert(SRI->getOperand() == SILValue(ABI, 0));
      } else
        return;
    }

    // If we found a store, record its source and erase it.
    if (SI) {
      CalleeValue = SI->getSrc();
      SI->eraseFromParent();
    } else {
      CalleeValue = SILValue();
    }

    // If we found a strong release, replace it with a strong release of the
    // source of the store and erase it.
    if (SRI) {
      if (CalleeValue.isValid())
        SILBuilderWithScope(SRI)
            .emitStrongReleaseAndFold(SRI->getLoc(), CalleeValue);
      SRI->eraseFromParent();
    }

    assert(ABI->use_empty());
    ABI->eraseFromParent();
    if (!CalleeValue.isValid())
      return;
  }

  if (auto *PAI = dyn_cast<PartialApplyInst>(CalleeValue)) {
    assert(CalleeValue.getResultNumber() == 0);

    SILValue Callee = PAI->getCallee();
    if (!tryDeleteDeadClosure(PAI))
      return;
    CalleeValue = Callee;
  }

  if (auto *TTTFI = dyn_cast<ThinToThickFunctionInst>(CalleeValue)) {
    assert(CalleeValue.getResultNumber() == 0);
    SILValue Callee = TTTFI->getCallee();
    if (!tryDeleteDeadClosure(TTTFI))
      return;
    CalleeValue = Callee;
  }

  if (FunctionRefInst *FRI = dyn_cast<FunctionRefInst>(CalleeValue)) {
    assert(CalleeValue.getResultNumber() == 0);
    if (!FRI->use_empty())
      return;
    FRI->eraseFromParent();
  }
}
示例#26
0
ManagedValue
SILGenFunction::emitClosureValue(SILLocation loc, SILDeclRef constant,
                                 CanType expectedType,
                                 ArrayRef<Substitution> subs) {
  auto closure = *constant.getAnyFunctionRef();
  auto captureInfo = closure.getCaptureInfo();

  assert(((constant.uncurryLevel == 1 &&
           captureInfo.hasLocalCaptures()) ||
          (constant.uncurryLevel == 0 &&
           !captureInfo.hasLocalCaptures())) &&
         "curried local functions not yet supported");

  auto constantInfo = getConstantInfo(constant);
  SILValue functionRef = emitGlobalFunctionRef(loc, constant, constantInfo);
  SILType functionTy = functionRef->getType();

  // Apply substitutions.
  auto pft = constantInfo.SILFnType;

  bool wasSpecialized = false;
  if (pft->isPolymorphic()) {
    // If the lowered function type is generic but Sema did not hand us any
    // substitutions, the function is a local function that appears in a
    // generic context but does not have a generic parameter list of its own;
    // just use our forwarding substitutions.
    if (subs.empty()) {
      assert(closure.getAsDeclContext()->isLocalContext() &&
             "cannot reference generic global function without substitutions");
      subs = getForwardingSubstitutions();
    }
    auto specialized = pft->substGenericArgs(F.getModule(),
                                             F.getModule().getSwiftModule(),
                                             subs);
    functionTy = SILType::getPrimitiveObjectType(specialized);
    wasSpecialized = true;
  }

  // If we're in top-level code, we don't need to physically capture script
  // globals, but we still need to mark them as escaping so that DI can flag
  // uninitialized uses.
  if (this == SGM.TopLevelSGF) {
    SGM.emitMarkFunctionEscapeForTopLevelCodeGlobals(
        loc, captureInfo);
  }

  if (!captureInfo.hasLocalCaptures() && !wasSpecialized) {
    auto result = ManagedValue::forUnmanaged(functionRef);
    return emitOrigToSubstValue(loc, result,
                                AbstractionPattern(expectedType),
                                expectedType);
  }

  SmallVector<ManagedValue, 4> capturedArgs;
  emitCaptures(loc, closure, CaptureEmission::PartialApplication,
               capturedArgs);

  // The partial application takes ownership of the context parameters.
  SmallVector<SILValue, 4> forwardedArgs;
  for (auto capture : capturedArgs)
    forwardedArgs.push_back(capture.forward(*this));

  SILType closureTy =
    SILGenBuilder::getPartialApplyResultType(functionRef->getType(),
                                             capturedArgs.size(), SGM.M,
                                             subs);
  auto toClosure =
    B.createPartialApply(loc, functionRef, functionTy,
                         subs, forwardedArgs, closureTy);
  auto result = emitManagedRValueWithCleanup(toClosure);

  // Get the lowered AST types:
  //  - the original type
  auto origLoweredFormalType =
      AbstractionPattern(constantInfo.LoweredInterfaceType);
  if (captureInfo.hasLocalCaptures()) {
    // Get the unlowered formal type of the constant, stripping off
    // the first level of function application, which applies captures.
    origLoweredFormalType =
      AbstractionPattern(constantInfo.FormalInterfaceType)
          .getFunctionResultType();

    // Lower it, being careful to use the right generic signature.
    origLoweredFormalType =
      AbstractionPattern(
          origLoweredFormalType.getGenericSignature(),
          SGM.Types.getLoweredASTFunctionType(
              cast<FunctionType>(origLoweredFormalType.getType()),
              0, constant));
  }

  // - the substituted type
  auto substFormalType = cast<FunctionType>(expectedType);
  auto substLoweredFormalType =
    SGM.Types.getLoweredASTFunctionType(substFormalType, 0, constant);

  // Generalize if necessary.
  result = emitOrigToSubstValue(loc, result, origLoweredFormalType,
                                substLoweredFormalType);

  return result;
}
示例#27
0
/// AggregateAvailableValues - Given a bunch of primitive subelement values,
/// build out the right aggregate type (LoadTy) by emitting tuple and struct
/// instructions as necessary.
static SILValue
AggregateAvailableValues(SILInstruction *Inst, SILType LoadTy,
                         SILValue Address,
                         ArrayRef<std::pair<SILValue, unsigned>> AvailableValues,
                         unsigned FirstElt) {
  assert(LoadTy.isObject());
  SILModule &M = Inst->getModule();
  
  // Check to see if the requested value is fully available, as an aggregate.
  // This is a super-common case for single-element structs, but is also a
  // general answer for arbitrary structs and tuples as well.
  if (FirstElt < AvailableValues.size()) {  // #Elements may be zero.
    SILValue FirstVal = AvailableValues[FirstElt].first;
    if (FirstVal.isValid() && AvailableValues[FirstElt].second == 0 &&
        FirstVal.getType() == LoadTy) {
      // If the first element of this value is available, check any extra ones
      // before declaring success.
      bool AllMatch = true;
      for (unsigned i = 0, e = getNumSubElements(LoadTy, M); i != e; ++i)
        if (AvailableValues[FirstElt+i].first != FirstVal ||
            AvailableValues[FirstElt+i].second != i) {
          AllMatch = false;
          break;
        }
      
      if (AllMatch)
        return FirstVal;
    }
  }
  
  
  SILBuilderWithScope B(Inst);
  
  if (TupleType *TT = LoadTy.getAs<TupleType>()) {
    SmallVector<SILValue, 4> ResultElts;
    
    for (unsigned EltNo : indices(TT->getElements())) {
      SILType EltTy = LoadTy.getTupleElementType(EltNo);
      unsigned NumSubElt = getNumSubElements(EltTy, M);
      
      // If we are missing any of the available values in this struct element,
      // compute an address to load from.
      SILValue EltAddr;
      if (anyMissing(FirstElt, NumSubElt, AvailableValues))
        EltAddr = B.createTupleElementAddr(Inst->getLoc(), Address, EltNo,
                                           EltTy.getAddressType());
      
      ResultElts.push_back(AggregateAvailableValues(Inst, EltTy, EltAddr,
                                                    AvailableValues, FirstElt));
      FirstElt += NumSubElt;
    }
    
    return B.createTuple(Inst->getLoc(), LoadTy, ResultElts);
  }
  
  // Extract struct elements from fully referenceable structs.
  if (auto *SD = getFullyReferenceableStruct(LoadTy)) {
    SmallVector<SILValue, 4> ResultElts;
    
    for (auto *FD : SD->getStoredProperties()) {
      SILType EltTy = LoadTy.getFieldType(FD, M);
      unsigned NumSubElt = getNumSubElements(EltTy, M);
      
      // If we are missing any of the available values in this struct element,
      // compute an address to load from.
      SILValue EltAddr;
      if (anyMissing(FirstElt, NumSubElt, AvailableValues))
        EltAddr = B.createStructElementAddr(Inst->getLoc(), Address, FD,
                                            EltTy.getAddressType());
      
      ResultElts.push_back(AggregateAvailableValues(Inst, EltTy, EltAddr,
                                                    AvailableValues, FirstElt));
      FirstElt += NumSubElt;
    }
    return B.createStruct(Inst->getLoc(), LoadTy, ResultElts);
  }
  
  // Otherwise, we have a simple primitive.  If the value is available, use it,
  // otherwise emit a load of the value.
  auto Val = AvailableValues[FirstElt];
  if (!Val.first.isValid())
    return B.createLoad(Inst->getLoc(), Address);
  
  SILValue EltVal = ExtractSubElement(Val.first, Val.second, B, Inst->getLoc());
  // It must be the same type as LoadTy if available.
  assert(EltVal.getType() == LoadTy &&
         "Subelement types mismatch");
  return EltVal;
}
示例#28
0
void SILGenFunction::emitArtificialTopLevel(ClassDecl *mainClass) {
  // Load argc and argv from the entry point arguments.
  SILValue argc = F.begin()->getBBArg(0);
  SILValue argv = F.begin()->getBBArg(1);

  switch (mainClass->getArtificialMainKind()) {
  case ArtificialMainKind::UIApplicationMain: {
    // Emit a UIKit main.
    // return UIApplicationMain(C_ARGC, C_ARGV, nil, ClassName);

    CanType NSStringTy = SGM.Types.getNSStringType();
    CanType OptNSStringTy
      = OptionalType::get(NSStringTy)->getCanonicalType();
    CanType IUOptNSStringTy
      = ImplicitlyUnwrappedOptionalType::get(NSStringTy)->getCanonicalType();

    // Look up UIApplicationMain.
    // FIXME: Doing an AST lookup here is gross and not entirely sound;
    // we're getting away with it because the types are guaranteed to already
    // be imported.
    ASTContext &ctx = getASTContext();
    Module *UIKit = ctx.getLoadedModule(ctx.getIdentifier("UIKit"));
    SmallVector<ValueDecl *, 1> results;
    UIKit->lookupQualified(UIKit->getDeclaredType(),
                           ctx.getIdentifier("UIApplicationMain"),
                           NL_QualifiedDefault,
                           /*resolver*/nullptr,
                           results);
    assert(!results.empty() && "couldn't find UIApplicationMain in UIKit");
    assert(results.size() == 1 && "more than one UIApplicationMain?");

    SILDeclRef mainRef{results.front(), ResilienceExpansion::Minimal,
                       SILDeclRef::ConstructAtNaturalUncurryLevel,
                       /*isForeign*/true};
    auto UIApplicationMainFn = SGM.M.getOrCreateFunction(mainClass, mainRef,
                                                         NotForDefinition);
    auto fnTy = UIApplicationMainFn->getLoweredFunctionType();

    // Get the class name as a string using NSStringFromClass.
    CanType mainClassTy = mainClass->getDeclaredTypeInContext()->getCanonicalType();
    CanType mainClassMetaty = CanMetatypeType::get(mainClassTy,
                                                   MetatypeRepresentation::ObjC);
    ProtocolDecl *anyObjectProtocol =
      ctx.getProtocol(KnownProtocolKind::AnyObject);
    auto mainClassAnyObjectConformance = ProtocolConformanceRef(
      *SGM.M.getSwiftModule()->lookupConformance(mainClassTy, anyObjectProtocol,
                                                nullptr));
    CanType anyObjectTy = anyObjectProtocol
      ->getDeclaredTypeInContext()
      ->getCanonicalType();
    CanType anyObjectMetaTy = CanExistentialMetatypeType::get(anyObjectTy,
                                                  MetatypeRepresentation::ObjC);

    auto NSStringFromClassType = SILFunctionType::get(nullptr,
                  SILFunctionType::ExtInfo()
                    .withRepresentation(SILFunctionType::Representation::
                                        CFunctionPointer),
                  ParameterConvention::Direct_Unowned,
                  SILParameterInfo(anyObjectMetaTy,
                                   ParameterConvention::Direct_Unowned),
                  SILResultInfo(OptNSStringTy,
                                ResultConvention::Autoreleased),
                  /*error result*/ None,
                  ctx);
    auto NSStringFromClassFn
      = SGM.M.getOrCreateFunction(mainClass, "NSStringFromClass",
                                  SILLinkage::PublicExternal,
                                  NSStringFromClassType,
                                  IsBare, IsTransparent, IsNotFragile);
    auto NSStringFromClass = B.createFunctionRef(mainClass, NSStringFromClassFn);
    SILValue metaTy = B.createMetatype(mainClass,
                             SILType::getPrimitiveObjectType(mainClassMetaty));
    metaTy = B.createInitExistentialMetatype(mainClass, metaTy,
                          SILType::getPrimitiveObjectType(anyObjectMetaTy),
                          ctx.AllocateCopy(
                            llvm::makeArrayRef(mainClassAnyObjectConformance)));
    SILValue optName = B.createApply(mainClass,
                               NSStringFromClass,
                               NSStringFromClass->getType(),
                               SILType::getPrimitiveObjectType(OptNSStringTy),
                               {}, metaTy);

    // Fix up the string parameters to have the right type.
    SILType nameArgTy = fnTy->getSILArgumentType(3);
    assert(nameArgTy == fnTy->getSILArgumentType(2));
    auto managedName = ManagedValue::forUnmanaged(optName);
    SILValue nilValue;
    if (optName->getType() == nameArgTy) {
      nilValue = getOptionalNoneValue(mainClass,
                                      getTypeLowering(OptNSStringTy));
    } else {
      assert(nameArgTy.getSwiftRValueType() == IUOptNSStringTy);
      nilValue = getOptionalNoneValue(mainClass,
                                      getTypeLowering(IUOptNSStringTy));
      managedName = emitOptionalToOptional(
          mainClass, managedName,
          SILType::getPrimitiveObjectType(IUOptNSStringTy),
          [](SILGenFunction &, SILLocation, ManagedValue input, SILType) {
        return input;
      });
    }

    // Fix up argv to have the right type.
    auto argvTy = fnTy->getSILArgumentType(1);

    SILType unwrappedTy = argvTy;
    if (Type innerTy = argvTy.getSwiftRValueType()->getAnyOptionalObjectType()){
      auto canInnerTy = innerTy->getCanonicalType();
      unwrappedTy = SILType::getPrimitiveObjectType(canInnerTy);
    }

    auto managedArgv = ManagedValue::forUnmanaged(argv);

    if (unwrappedTy != argv->getType()) {
      auto converted =
          emitPointerToPointer(mainClass, managedArgv,
                               argv->getType().getSwiftRValueType(),
                               unwrappedTy.getSwiftRValueType());
      managedArgv = std::move(converted).getAsSingleValue(*this, mainClass);
    }

    if (unwrappedTy != argvTy) {
      managedArgv = getOptionalSomeValue(mainClass, managedArgv,
                                         getTypeLowering(argvTy));
    }

    auto UIApplicationMain = B.createFunctionRef(mainClass, UIApplicationMainFn);

    SILValue args[] = {argc, managedArgv.getValue(), nilValue,
                       managedName.getValue()};

    B.createApply(mainClass, UIApplicationMain,
                  UIApplicationMain->getType(),
                  argc->getType(), {}, args);
    SILValue r = B.createIntegerLiteral(mainClass,
                        SILType::getBuiltinIntegerType(32, ctx), 0);
    auto rType = F.getLoweredFunctionType()->getSingleResult().getSILType();
    if (r->getType() != rType)
      r = B.createStruct(mainClass, rType, r);

    Cleanups.emitCleanupsForReturn(mainClass);
    B.createReturn(mainClass, r);
    return;
  }

  case ArtificialMainKind::NSApplicationMain: {
    // Emit an AppKit main.
    // return NSApplicationMain(C_ARGC, C_ARGV);

    SILParameterInfo argTypes[] = {
      SILParameterInfo(argc->getType().getSwiftRValueType(),
                       ParameterConvention::Direct_Unowned),
      SILParameterInfo(argv->getType().getSwiftRValueType(),
                       ParameterConvention::Direct_Unowned),
    };
    auto NSApplicationMainType = SILFunctionType::get(nullptr,
                  SILFunctionType::ExtInfo()
                    // Should be C calling convention, but NSApplicationMain
                    // has an overlay to fix the type of argv.
                    .withRepresentation(SILFunctionType::Representation::Thin),
                  ParameterConvention::Direct_Unowned,
                  argTypes,
                  SILResultInfo(argc->getType().getSwiftRValueType(),
                                ResultConvention::Unowned),
                  /*error result*/ None,
                  getASTContext());

    auto NSApplicationMainFn
      = SGM.M.getOrCreateFunction(mainClass, "NSApplicationMain",
                                  SILLinkage::PublicExternal,
                                  NSApplicationMainType,
                                  IsBare, IsTransparent, IsNotFragile);

    auto NSApplicationMain = B.createFunctionRef(mainClass, NSApplicationMainFn);
    SILValue args[] = { argc, argv };

    B.createApply(mainClass, NSApplicationMain,
                  NSApplicationMain->getType(),
                  argc->getType(), {}, args);
    SILValue r = B.createIntegerLiteral(mainClass,
                        SILType::getBuiltinIntegerType(32, getASTContext()), 0);
    auto rType = F.getLoweredFunctionType()->getSingleResult().getSILType();
    if (r->getType() != rType)
      r = B.createStruct(mainClass, rType, r);
    B.createReturn(mainClass, r);
    return;
  }
  }
}
示例#29
0
ManagedValue SILGenFunction::emitExistentialErasure(
                            SILLocation loc,
                            CanType concreteFormalType,
                            const TypeLowering &concreteTL,
                            const TypeLowering &existentialTL,
                            ArrayRef<ProtocolConformanceRef> conformances,
                            SGFContext C,
                            llvm::function_ref<ManagedValue (SGFContext)> F,
                            bool allowEmbeddedNSError) {
  // Mark the needed conformances as used.
  for (auto conformance : conformances)
    SGM.useConformance(conformance);

  // If we're erasing to the 'Error' type, we might be able to get an NSError
  // representation more efficiently.
  auto &ctx = getASTContext();
  if (ctx.LangOpts.EnableObjCInterop && conformances.size() == 1 &&
      conformances[0].getRequirement() == ctx.getErrorDecl() &&
      ctx.getNSErrorDecl()) {
    auto nsErrorDecl = ctx.getNSErrorDecl();

    // If the concrete type is NSError or a subclass thereof, just erase it
    // directly.
    auto nsErrorType = nsErrorDecl->getDeclaredType()->getCanonicalType();
    if (nsErrorType->isExactSuperclassOf(concreteFormalType, nullptr)) {
      ManagedValue nsError =  F(SGFContext());
      if (nsErrorType != concreteFormalType) {
        nsError = ManagedValue(B.createUpcast(loc, nsError.getValue(),
                                              getLoweredType(nsErrorType)),
                               nsError.getCleanup());
      }
      return emitBridgedToNativeError(loc, nsError);
    }

    // If the concrete type is known to conform to _BridgedStoredNSError,
    // call the _nsError witness getter to extract the NSError directly,
    // then just erase the NSError.
    if (auto storedNSErrorConformance =
          SGM.getConformanceToBridgedStoredNSError(loc, concreteFormalType)) {
      auto nsErrorVar = SGM.getNSErrorRequirement(loc);
      if (!nsErrorVar) return emitUndef(loc, existentialTL.getLoweredType());

      SubstitutionList nsErrorVarSubstitutions;

      // Devirtualize.  Maybe this should be done implicitly by
      // emitPropertyLValue?
      if (storedNSErrorConformance->isConcrete()) {
        if (auto witnessVar = storedNSErrorConformance->getConcrete()
                                          ->getWitness(nsErrorVar, nullptr)) {
          nsErrorVar = cast<VarDecl>(witnessVar.getDecl());
          nsErrorVarSubstitutions = witnessVar.getSubstitutions();
        }
      }

      auto nativeError = F(SGFContext());

      WritebackScope writebackScope(*this);
      auto nsError =
        emitRValueForPropertyLoad(loc, nativeError, concreteFormalType,
                                  /*super*/ false, nsErrorVar,
                                  nsErrorVarSubstitutions,
                                  AccessSemantics::Ordinary, nsErrorType,
                                  SGFContext())
        .getAsSingleValue(*this, loc);

      return emitBridgedToNativeError(loc, nsError);
    }

    // Otherwise, if it's an archetype, try calling the _getEmbeddedNSError()
    // witness to try to dig out the embedded NSError.  But don't do this
    // when we're being called recursively.
    if (isa<ArchetypeType>(concreteFormalType) && allowEmbeddedNSError) {
      auto contBB = createBasicBlock();
      auto isNotPresentBB = createBasicBlock();
      auto isPresentBB = createBasicBlock();

      // Call swift_stdlib_getErrorEmbeddedNSError to attempt to extract an
      // NSError from the value.
      auto getEmbeddedNSErrorFn = SGM.getGetErrorEmbeddedNSError(loc);
      if (!getEmbeddedNSErrorFn)
        return emitUndef(loc, existentialTL.getLoweredType());

      Substitution getEmbeddedNSErrorSubstitutions[1] = {
        Substitution(concreteFormalType, conformances)
      };

      ManagedValue concreteValue = F(SGFContext());
      ManagedValue potentialNSError =
        emitApplyOfLibraryIntrinsic(loc,
                                    getEmbeddedNSErrorFn,
                                    getEmbeddedNSErrorSubstitutions,
                                    { concreteValue.copy(*this, loc) },
                                    SGFContext())
          .getAsSingleValue(*this, loc);

      // We're going to consume 'concreteValue' in exactly one branch,
      // so kill its cleanup now and recreate it on both branches.
      (void) concreteValue.forward(*this);

      // Check whether we got an NSError back.
      std::pair<EnumElementDecl*, SILBasicBlock*> cases[] = {
        { ctx.getOptionalSomeDecl(), isPresentBB },
        { ctx.getOptionalNoneDecl(), isNotPresentBB }
      };
      B.createSwitchEnum(loc, potentialNSError.forward(*this),
                         /*default*/ nullptr, cases);

      // If we did get an NSError, emit the existential erasure from that
      // NSError.
      B.emitBlock(isPresentBB);
      SILValue branchArg;
      {
        // Don't allow cleanups to escape the conditional block.
        FullExpr presentScope(Cleanups, CleanupLocation::get(loc));
        enterDestroyCleanup(concreteValue.getValue());

        // Receive the error value.  It's typed as an 'AnyObject' for
        // layering reasons, so perform an unchecked cast down to NSError.
        SILType anyObjectTy =
          potentialNSError.getType().getAnyOptionalObjectType();
        SILValue nsError = isPresentBB->createPHIArgument(
            anyObjectTy, ValueOwnershipKind::Owned);
        nsError = B.createUncheckedRefCast(loc, nsError, 
                                           getLoweredType(nsErrorType));

        branchArg = emitBridgedToNativeError(loc,
                                        emitManagedRValueWithCleanup(nsError))
                      .forward(*this);
      }
      B.createBranch(loc, contBB, branchArg);

      // If we did not get an NSError, just directly emit the existential.
      // Since this is a recursive call, make sure we don't end up in this
      // path again.
      B.emitBlock(isNotPresentBB);
      {
        FullExpr presentScope(Cleanups, CleanupLocation::get(loc));
        concreteValue = emitManagedRValueWithCleanup(concreteValue.getValue());
        branchArg = emitExistentialErasure(loc, concreteFormalType, concreteTL,
                                           existentialTL, conformances,
                                           SGFContext(),
                                           [&](SGFContext C) {
                                             return concreteValue;
                                           },
                                           /*allowEmbeddedNSError=*/false)
                      .forward(*this);
      }
      B.createBranch(loc, contBB, branchArg);

      // Continue.
      B.emitBlock(contBB);

      SILValue existentialResult = contBB->createPHIArgument(
          existentialTL.getLoweredType(), ValueOwnershipKind::Owned);
      return emitManagedRValueWithCleanup(existentialResult, existentialTL);
    }
  }

  switch (existentialTL.getLoweredType().getObjectType()
            .getPreferredExistentialRepresentation(SGM.M, concreteFormalType)) {
  case ExistentialRepresentation::None:
    llvm_unreachable("not an existential type");
  case ExistentialRepresentation::Metatype: {
    assert(existentialTL.isLoadable());

    SILValue metatype = F(SGFContext()).getUnmanagedValue();
    assert(metatype->getType().castTo<AnyMetatypeType>()->getRepresentation()
             == MetatypeRepresentation::Thick);

    auto upcast =
      B.createInitExistentialMetatype(loc, metatype,
                                      existentialTL.getLoweredType(),
                                      conformances);
    return ManagedValue::forUnmanaged(upcast);
  }
  case ExistentialRepresentation::Class: {
    assert(existentialTL.isLoadable());

    ManagedValue sub = F(SGFContext());
    SILValue v = B.createInitExistentialRef(loc,
                                            existentialTL.getLoweredType(),
                                            concreteFormalType,
                                            sub.getValue(),
                                            conformances);
    return ManagedValue(v, sub.getCleanup());
  }
  case ExistentialRepresentation::Boxed: {
    // Allocate the existential.
    auto *existential = B.createAllocExistentialBox(loc,
                                           existentialTL.getLoweredType(),
                                           concreteFormalType,
                                           conformances);
    auto *valueAddr = B.createProjectExistentialBox(loc,
                                           concreteTL.getLoweredType(),
                                           existential);
    // Initialize the concrete value in-place.
    ExistentialInitialization init(existential, valueAddr, concreteFormalType,
                                   ExistentialRepresentation::Boxed, *this);
    ManagedValue mv = F(SGFContext(&init));
    if (!mv.isInContext()) {
      mv.forwardInto(*this, loc, init.getAddress());
      init.finishInitialization(*this);
    }
    
    return emitManagedRValueWithCleanup(existential);
  }
  case ExistentialRepresentation::Opaque: {
  
    // If the concrete value is a pseudogeneric archetype, first erase it to
    // its upper bound.
    auto anyObjectProto = getASTContext()
      .getProtocol(KnownProtocolKind::AnyObject);
    auto anyObjectTy = anyObjectProto
      ? anyObjectProto->getDeclaredType()->getCanonicalType()
      : CanType();
    auto eraseToAnyObject =
    [&, concreteFormalType, F](SGFContext C) -> ManagedValue {
      auto concreteValue = F(SGFContext());
      auto anyObjectConformance = SGM.SwiftModule
        ->lookupConformance(concreteFormalType, anyObjectProto, nullptr);
      ProtocolConformanceRef buf[] = {
        *anyObjectConformance,
      };
      
      auto asAnyObject = B.createInitExistentialRef(loc,
                                  SILType::getPrimitiveObjectType(anyObjectTy),
                                  concreteFormalType,
                                  concreteValue.getValue(),
                                  getASTContext().AllocateCopy(buf));
      return ManagedValue(asAnyObject, concreteValue.getCleanup());
    };
    
    auto concreteTLPtr = &concreteTL;
    if (this->F.getLoweredFunctionType()->isPseudogeneric()) {
      if (anyObjectTy && concreteFormalType->is<ArchetypeType>()) {
        concreteFormalType = anyObjectTy;
        concreteTLPtr = &getTypeLowering(anyObjectTy);
        F = eraseToAnyObject;
      }
    }

    // Allocate the existential.
    SILValue existential =
      getBufferForExprResult(loc, existentialTL.getLoweredType(), C);

    // Allocate the concrete value inside the container.
    SILValue valueAddr = B.createInitExistentialAddr(
                            loc, existential,
                            concreteFormalType,
                            concreteTLPtr->getLoweredType(),
                            conformances);
    // Initialize the concrete value in-place.
    InitializationPtr init(
        new ExistentialInitialization(existential, valueAddr, concreteFormalType,
                                      ExistentialRepresentation::Opaque,
                                      *this));
    ManagedValue mv = F(SGFContext(init.get()));
    if (!mv.isInContext()) {
      mv.forwardInto(*this, loc, init->getAddress());
      init->finishInitialization(*this);
    }

    return manageBufferForExprResult(existential, existentialTL, C);
  }
  }

  llvm_unreachable("Unhandled ExistentialRepresentation in switch.");
}
示例#30
0
void SILGenFunction::emitCurryThunk(ValueDecl *vd,
                                    SILDeclRef from, SILDeclRef to) {
  SmallVector<SILValue, 8> curriedArgs;

  unsigned paramCount = from.uncurryLevel + 1;

  if (isa<ConstructorDecl>(vd) || isa<EnumElementDecl>(vd)) {
    // The first body parameter pattern for a constructor specifies the
    // "self" instance, but the constructor is invoked from outside on a
    // metatype.
    assert(from.uncurryLevel == 0 && to.uncurryLevel == 1
           && "currying constructor at level other than one?!");
    F.setBare(IsBare);
    auto selfMetaTy = vd->getType()->getAs<AnyFunctionType>()->getInput();
    auto metatypeVal = new (F.getModule()) SILArgument(F.begin(),
                                            getLoweredLoadableType(selfMetaTy));
    curriedArgs.push_back(metatypeVal);

  } else if (auto fd = dyn_cast<AbstractFunctionDecl>(vd)) {
    // Forward implicit closure context arguments.
    bool hasCaptures = fd->getCaptureInfo().hasLocalCaptures();
    if (hasCaptures)
      --paramCount;

    // Forward the curried formal arguments.
    auto forwardedPatterns = fd->getParameterLists().slice(0, paramCount);
    for (auto *paramPattern : reversed(forwardedPatterns))
      bindParametersForForwarding(paramPattern, curriedArgs);

    // Forward captures.
    if (hasCaptures) {
      auto captureInfo = SGM.Types.getLoweredLocalCaptures(fd);
      for (auto capture : captureInfo.getCaptures())
        forwardCaptureArgs(*this, curriedArgs, capture);
    }
  } else {
    llvm_unreachable("don't know how to curry this decl");
  }

  // Forward substitutions.
  ArrayRef<Substitution> subs;
  if (auto gp = getConstantInfo(to).ContextGenericParams) {
    subs = gp->getForwardingSubstitutions(getASTContext());
  }

  SILValue toFn = getNextUncurryLevelRef(*this, vd, to, from.isDirectReference,
                                         curriedArgs, subs);
  SILType resultTy
    = SGM.getConstantType(from).castTo<SILFunctionType>()
         ->getSingleResult().getSILType();
  resultTy = F.mapTypeIntoContext(resultTy);
  auto toTy = toFn->getType();

  // Forward archetypes and specialize if the function is generic.
  if (!subs.empty()) {
    auto toFnTy = toFn->getType().castTo<SILFunctionType>();
    toTy = getLoweredLoadableType(
              toFnTy->substGenericArgs(SGM.M, SGM.SwiftModule, subs));
  }

  // Partially apply the next uncurry level and return the result closure.
  auto closureTy =
    SILGenBuilder::getPartialApplyResultType(toFn->getType(), curriedArgs.size(),
                                             SGM.M, subs);
  SILInstruction *toClosure =
    B.createPartialApply(vd, toFn, toTy, subs, curriedArgs, closureTy);
  if (resultTy != closureTy)
    toClosure = B.createConvertFunction(vd, toClosure, resultTy);
  B.createReturn(ImplicitReturnLocation::getImplicitReturnLoc(vd), toClosure);
}