/// \brief Check if this instruction corresponds to user-written code.
static bool isUserCode(const SILInstruction *I) {
  SILLocation Loc = I->getLoc();
  if (Loc.isAutoGenerated())
    return false;
  
  // Branch instructions are not user code. These could belong to the control
  // flow statement we are folding (ex: while loop).
  // Also, unreachable instructions are not user code, they are "expected" in
  // unreachable blocks.
  if ((isa<BranchInst>(I) || isa<UnreachableInst>(I)) &&
      Loc.is<RegularLocation>())
    return false;
  
  // If the instruction corresponds to user-written return or some other
  // statement, we know it corresponds to user code.
  if (Loc.is<RegularLocation>() || Loc.is<ReturnLocation>()) {
    if (auto *E = Loc.getAsASTNode<Expr>())
      return !E->isImplicit();
    if (auto *D = Loc.getAsASTNode<Decl>())
      return !D->isImplicit();
    if (auto *S = Loc.getAsASTNode<Decl>())
      return !S->isImplicit();
    if (auto *P = Loc.getAsASTNode<Decl>())
      return !P->isImplicit();
    return true;
  }
  return false;
}
static void diagnoseMissingReturn(const UnreachableInst *UI,
                                  ASTContext &Context) {
  const SILBasicBlock *BB = UI->getParent();
  const SILFunction *F = BB->getParent();
  SILLocation FLoc = F->getLocation();

  Type ResTy;

  if (auto *FD = FLoc.getAsASTNode<FuncDecl>()) {
    ResTy = FD->getResultType();
  } else if (auto *CE = FLoc.getAsASTNode<ClosureExpr>()) {
    ResTy = CE->getResultType();
  } else {
    llvm_unreachable("unhandled case in MissingReturn");
  }

  bool isNoReturn = F->getLoweredFunctionType()->isNoReturn();

  // No action required if the function returns 'Void' or that the
  // function is marked 'noreturn'.
  if (ResTy->isVoid() || isNoReturn)
    return;

  SILLocation L = UI->getLoc();
  assert(L && ResTy);
  diagnose(Context,
           L.getEndSourceLoc(),
           diag::missing_return, ResTy,
           FLoc.isASTNode<ClosureExpr>() ? 1 : 0);
}
SILFunction *MaterializeForSetEmitter::createCallback(SILFunction &F,
                                                      GeneratorFn generator) {
  auto callbackType =
      SGM.Types.getMaterializeForSetCallbackType(WitnessStorage,
                                                 GenericSig,
                                                 SelfInterfaceType,
                                                 CallbackRepresentation);

  auto *genericEnv = GenericEnv;
  if (GenericEnv && GenericEnv->getGenericSignature()->areAllParamsConcrete())
    genericEnv = nullptr;

  auto callback =
    SGM.M.createFunction(Linkage, CallbackName, callbackType,
                         genericEnv, SILLocation(Witness),
                         IsBare, F.isTransparent(), F.isFragile(),
                         IsNotThunk,
                         /*ClassVisibility=*/SILFunction::NotRelevant,
                         /*InlineStrategy=*/InlineDefault,
                         /*EffectsKind=*/EffectsKind::Unspecified,
                         /*InsertBefore=*/&F);

  callback->setDebugScope(new (SGM.M) SILDebugScope(Witness, callback));

  PrettyStackTraceSILFunction X("silgen materializeForSet callback", callback);
  {
    SILGenFunction gen(SGM, *callback);

    auto makeParam = [&](unsigned index) -> SILArgument * {
      SILType type = gen.F.mapTypeIntoContext(
          gen.getSILType(callbackType->getParameters()[index]));
      return gen.F.begin()->createFunctionArgument(type);
    };

    // Add arguments for all the parameters.
    auto valueBuffer = makeParam(0);
    auto storageBuffer = makeParam(1);
    auto self = makeParam(2);
    (void) makeParam(3);

    SILLocation loc = Witness;
    loc.markAutoGenerated();

    // Call the generator function we were provided.
    {
      LexicalScope scope(gen.Cleanups, gen, CleanupLocation::get(loc));
      generator(gen, loc, valueBuffer, storageBuffer, self);
    }

    // Return void.
    auto result = gen.emitEmptyTuple(loc);
    gen.B.createReturn(loc, result);
  }

  callback->verify();
  return callback;
}
SILFunction *MaterializeForSetEmitter::createCallback(SILFunction &F,
                                                      GeneratorFn generator) {
  auto callbackType =
      SGM.Types.getMaterializeForSetCallbackType(WitnessStorage,
                                                 GenericSig,
                                                 SelfInterfaceType);
  auto callback =
      SGM.M.getOrCreateFunction(Witness, CallbackName, Linkage,
                                callbackType, IsBare,
                                F.isTransparent(),
                                F.isFragile());

  callback->setGenericEnvironment(GenericEnv);
  callback->setDebugScope(new (SGM.M) SILDebugScope(Witness, callback));

  PrettyStackTraceSILFunction X("silgen materializeForSet callback", callback);
  {
    SILGenFunction gen(SGM, *callback);

    auto makeParam = [&](unsigned index) -> SILArgument* {
      SILType type = gen.F.mapTypeIntoContext(
          callbackType->getParameters()[index].getSILType());
      return new (SGM.M) SILArgument(gen.F.begin(), type);
    };

    // Add arguments for all the parameters.
    auto valueBuffer = makeParam(0);
    auto storageBuffer = makeParam(1);
    auto self = makeParam(2);
    (void) makeParam(3);

    SILLocation loc = Witness;
    loc.markAutoGenerated();

    // Call the generator function we were provided.
    {
      LexicalScope scope(gen.Cleanups, gen, CleanupLocation::get(loc));
      generator(gen, loc, valueBuffer, storageBuffer, self);
    }

    // Return void.
    auto result = gen.emitEmptyTuple(loc);
    gen.B.createReturn(loc, result);
  }

  callback->verify();
  return callback;
}
Exemple #5
0
/// Given that this is an 'Unknown' value, emit diagnostic notes providing
/// context about what the problem is.
void SymbolicValue::emitUnknownDiagnosticNotes(SILLocation fallbackLoc) {
  auto badInst = dyn_cast<SILInstruction>(getUnknownNode());
  if (!badInst)
    return;

  bool emittedFirstNote = emitNoteDiagnostic(badInst, getUnknownReason(),
                                             fallbackLoc);

  auto sourceLoc = fallbackLoc.getSourceLoc();
  auto &module = badInst->getModule();
  if (sourceLoc.isInvalid()) {
    diagnose(module.getASTContext(), sourceLoc, diag::constexpr_not_evaluable);
    return;
  }
  for (auto &sourceLoc : llvm::reverse(getUnknownCallStack())) {
    // Skip unknown sources.
    if (!sourceLoc.isValid())
      continue;

    auto diag = emittedFirstNote ? diag::constexpr_called_from
                                 : diag::constexpr_not_evaluable;
    diagnose(module.getASTContext(), sourceLoc, diag);
    emittedFirstNote = true;
  }
}
Exemple #6
0
CleanupLocation CleanupLocation::get(SILLocation L) {
  if (Expr *E = L.getAsASTNode<Expr>())
    return CleanupLocation(E, L.getSpecialFlags());
  if (Stmt *S = L.getAsASTNode<Stmt>())
    return CleanupLocation(S, L.getSpecialFlags());
  if (Pattern *P = L.getAsASTNode<Pattern>())
    return CleanupLocation(P, L.getSpecialFlags());
  if (Decl *D = L.getAsASTNode<Decl>())
    return CleanupLocation(D, L.getSpecialFlags());
  if (L.isNull())
    return CleanupLocation();
  if (L.getAs<SILFileLocation>())
    return CleanupLocation();
  llvm_unreachable("Cannot construct Cleanup loc from the "
                   "given location.");
}
Exemple #7
0
SILValue SILGenFunction::emitGlobalFunctionRef(SILLocation loc,
                                               SILDeclRef constant,
                                               SILConstantInfo constantInfo) {
  assert(constantInfo == getConstantInfo(constant));

  // Builtins must be fully applied at the point of reference.
  if (constant.hasDecl() &&
      isa<BuiltinUnit>(constant.getDecl()->getDeclContext())) {
    SGM.diagnose(loc.getSourceLoc(), diag::not_implemented,
                 "delayed application of builtin");
    return SILUndef::get(constantInfo.getSILType(), SGM.M);
  }
  
  // If the constant is a thunk we haven't emitted yet, emit it.
  if (!SGM.hasFunction(constant)) {
    if (constant.isCurried) {
      SGM.emitCurryThunk(constant);
    } else if (constant.isForeignToNativeThunk()) {
      SGM.emitForeignToNativeThunk(constant);
    } else if (constant.isNativeToForeignThunk()) {
      SGM.emitNativeToForeignThunk(constant);
    } else if (constant.kind == SILDeclRef::Kind::EnumElement) {
      SGM.emitEnumConstructor(cast<EnumElementDecl>(constant.getDecl()));
    }
  }

  auto f = SGM.getFunction(constant, NotForDefinition);
  assert(f->getLoweredFunctionType() == constantInfo.SILFnType);
  return B.createFunctionRef(loc, f);
}
Exemple #8
0
void swift::printSILLocationDescription(llvm::raw_ostream &out,
                                        SILLocation loc,
                                        ASTContext &Context) {
  if (loc.isNull()) {
    out << "<<invalid location>>";
  } else if (loc.isSILFile()) {
    printSourceLocDescription(out, loc.getSourceLoc(), Context);
  } else if (auto decl = loc.getAsASTNode<Decl>()) {
    printDeclDescription(out, decl, Context);
  } else if (auto expr = loc.getAsASTNode<Expr>()) {
    printExprDescription(out, expr, Context);
  } else if (auto stmt = loc.getAsASTNode<Stmt>()) {
    printStmtDescription(out, stmt, Context);
  } else if (auto pattern = loc.castToASTNode<Pattern>()) {
    printPatternDescription(out, pattern, Context);
  }
}
static void diagnoseReturn(const SILInstruction *I, ASTContext &Context) {
  auto *TI = dyn_cast<TermInst>(I);
  if (!TI || !(isa<BranchInst>(TI) || isa<ReturnInst>(TI)))
    return;

  const SILBasicBlock *BB = TI->getParent();
  const SILFunction *F = BB->getParent();

  // Warn if we reach a return inside a noreturn function.
  if (F->getLoweredFunctionType()->isNoReturn()) {
    SILLocation L = TI->getLoc();
    if (L.is<ReturnLocation>())
      diagnose(Context, L.getSourceLoc(), diag::return_from_noreturn);
    if (L.is<ImplicitReturnLocation>())
      diagnose(Context, L.getSourceLoc(), diag::return_from_noreturn);
  }
}
static void diagnoseMissingReturn(const UnreachableInst *UI,
                                  ASTContext &Context) {
  const SILBasicBlock *BB = UI->getParent();
  const SILFunction *F = BB->getParent();
  SILLocation FLoc = F->getLocation();

  Type ResTy;
  BraceStmt *BS;

  if (auto *FD = FLoc.getAsASTNode<FuncDecl>()) {
    ResTy = FD->getResultInterfaceType();
    BS = FD->getBody(/*canSynthesize=*/false);
  } else if (auto *CD = FLoc.getAsASTNode<ConstructorDecl>()) {
    ResTy = CD->getResultInterfaceType();
    BS = FD->getBody();
  } else if (auto *CE = FLoc.getAsASTNode<ClosureExpr>()) {
    ResTy = CE->getResultType();
    BS = CE->getBody();
  } else {
    llvm_unreachable("unhandled case in MissingReturn");
  }

  SILLocation L = UI->getLoc();
  assert(L && ResTy);
  auto numElements = BS->getNumElements();
  if (numElements > 0) {
    auto element = BS->getElement(numElements - 1);
    if (auto expr = element.dyn_cast<Expr *>()) {
      if (expr->getType()->getCanonicalType() == ResTy->getCanonicalType()) {
        Context.Diags.diagnose(
          expr->getStartLoc(),
          diag::missing_return_last_expr, ResTy,
          FLoc.isASTNode<ClosureExpr>() ? 1 : 0)
        .fixItInsert(expr->getStartLoc(), "return ");
        return;
      }
    }
  }
  auto diagID = F->isNoReturnFunction() ? diag::missing_never_call
                                        : diag::missing_return;
  diagnose(Context,
           L.getEndSourceLoc(),
           diagID, ResTy,
           FLoc.isASTNode<ClosureExpr>() ? 1 : 0);
}
Exemple #11
0
InlinedLocation InlinedLocation::getInlinedLocation(SILLocation L) {
  if (Expr *E = L.getAsASTNode<Expr>())
    return InlinedLocation(E, L.getSpecialFlags());
  if (Stmt *S = L.getAsASTNode<Stmt>())
    return InlinedLocation(S, L.getSpecialFlags());
  if (Pattern *P = L.getAsASTNode<Pattern>())
    return InlinedLocation(P, L.getSpecialFlags());
  if (Decl *D = L.getAsASTNode<Decl>())
    return InlinedLocation(D, L.getSpecialFlags());

  if (L.hasSILFileSourceLoc())
    return InlinedLocation(L.getSILFileSourceLoc(), L.getSpecialFlags());

  if (L.isInTopLevel())
    return InlinedLocation::getModuleLocation(L.getSpecialFlags());

  if (L.isAutoGenerated()) {
    InlinedLocation IL;
    IL.markAutoGenerated();
    return IL;
  }
  llvm_unreachable("Cannot construct Inlined loc from the given location.");
}
static void diagnoseUnreachable(const SILInstruction *I,
                                ASTContext &Context) {
  if (auto *UI = dyn_cast<UnreachableInst>(I)){
    SILLocation L = UI->getLoc();

    // Invalid location means that the instruction has been generated by SIL
    // passes, such as DCE. FIXME: we might want to just introduce a separate
    // instruction kind, instead of keeping this invariant.
    //
    // We also do not want to emit diagnostics for code that was
    // transparently inlined. We should have already emitted these
    // diagnostics when we process the callee function prior to
    // inlining it.
    if (!L.hasASTLocation() || L.is<MandatoryInlinedLocation>())
      return;

    // The most common case of getting an unreachable instruction is a
    // missing return statement. In this case, we know that the instruction
    // location will be the enclosing function.
    if (L.isASTNode<AbstractFunctionDecl>() || L.isASTNode<ClosureExpr>()) {
      diagnoseMissingReturn(UI, Context);
      return;
    }

    // A non-exhaustive switch would also produce an unreachable instruction.
    if (L.isASTNode<SwitchStmt>()) {
      diagnose(Context, L.getEndSourceLoc(), diag::non_exhaustive_switch);
      return;
    }

    if (auto *Guard = L.getAsASTNode<GuardStmt>()) {
      diagnose(Context, Guard->getBody()->getEndLoc(),
               diag::guard_body_must_not_fallthrough);
      return;
    }
  }
}
Exemple #13
0
SILValue SILGenFunction::emitGlobalFunctionRef(SILLocation loc,
                                               SILDeclRef constant,
                                               SILConstantInfo constantInfo) {
  assert(constantInfo == getConstantInfo(constant));

  // Builtins must be fully applied at the point of reference.
  if (constant.hasDecl() &&
      isa<BuiltinUnit>(constant.getDecl()->getDeclContext())) {
    SGM.diagnose(loc.getSourceLoc(), diag::not_implemented,
                 "delayed application of builtin");
    return SILUndef::get(constantInfo.getSILType(), SGM.M);
  }
  
  // If the constant is a thunk we haven't emitted yet, emit it.
  if (!SGM.hasFunction(constant)) {
    if (constant.isCurried) {
      auto vd = constant.getDecl();
      // Reference the next uncurrying level of the function.
      SILDeclRef next = SILDeclRef(vd, constant.kind,
                                 SILDeclRef::ConstructAtBestResilienceExpansion,
                                 constant.uncurryLevel + 1);
      // If the function is fully uncurried and natively foreign, reference its
      // foreign entry point.
      if (!next.isCurried) {
        if (requiresForeignToNativeThunk(vd))
          next = next.asForeign();
      }
      
      // Preserve whether the curry thunks lead to a direct reference to the
      // method implementation.
      next = next.asDirectReference(constant.isDirectReference);

      SGM.emitCurryThunk(vd, constant, next);
    }
    // Otherwise, if this is a calling convention thunk we haven't emitted yet,
    // emit it.
    else if (constant.isForeignToNativeThunk()) {
      SGM.emitForeignToNativeThunk(constant);
    } else if (constant.isNativeToForeignThunk()) {
      SGM.emitNativeToForeignThunk(constant);
    } else if (constant.kind == SILDeclRef::Kind::EnumElement) {
      SGM.emitEnumConstructor(cast<EnumElementDecl>(constant.getDecl()));
    }
  }

  auto f = SGM.getFunction(constant, NotForDefinition);
  assert(f->getLoweredFunctionType() == constantInfo.SILFnType);
  return B.createFunctionRef(loc, f);
}
Exemple #14
0
MandatoryInlinedLocation
MandatoryInlinedLocation::getMandatoryInlinedLocation(SILLocation L) {
  if (Expr *E = L.getAsASTNode<Expr>())
    return MandatoryInlinedLocation(E, L.getSpecialFlags());
  if (Stmt *S = L.getAsASTNode<Stmt>())
    return MandatoryInlinedLocation(S, L.getSpecialFlags());
  if (Pattern *P = L.getAsASTNode<Pattern>())
    return MandatoryInlinedLocation(P, L.getSpecialFlags());
  if (Decl *D = L.getAsASTNode<Decl>())
    return MandatoryInlinedLocation(D, L.getSpecialFlags());

  if (L.isSILFile())
    return MandatoryInlinedLocation(L.Loc.SILFileLoc, L.getSpecialFlags());

  if (L.isInTopLevel())
    return MandatoryInlinedLocation::getModuleLocation(L.getSpecialFlags());

  llvm_unreachable("Cannot construct Inlined loc from the given location.");
}
/// Emit a check for whether a non-native function call produced an
/// error.
///
/// \c results should be left with only values that match the formal
/// direct results of the function.
void
SILGenFunction::emitForeignErrorCheck(SILLocation loc,
                                      SmallVectorImpl<ManagedValue> &results,
                                      ManagedValue errorSlot,
                                      bool suppressErrorCheck,
                                const ForeignErrorConvention &foreignError) {
  // All of this is autogenerated.
  loc.markAutoGenerated();

  switch (foreignError.getKind()) {
  case ForeignErrorConvention::ZeroPreservedResult:
    assert(results.size() == 1);
    emitResultIsZeroErrorCheck(*this, loc, results[0], errorSlot,
                               suppressErrorCheck,
                               /*zeroIsError*/ true);
    return;
  case ForeignErrorConvention::ZeroResult:
    assert(results.size() == 1);
    emitResultIsZeroErrorCheck(*this, loc, results.pop_back_val(),
                               errorSlot, suppressErrorCheck,
                               /*zeroIsError*/ true);
    return;
  case ForeignErrorConvention::NonZeroResult:
    assert(results.size() == 1);
    emitResultIsZeroErrorCheck(*this, loc, results.pop_back_val(),
                               errorSlot, suppressErrorCheck,
                               /*zeroIsError*/ false);
    return;
  case ForeignErrorConvention::NilResult:
    assert(results.size() == 1);
    results[0] = emitResultIsNilErrorCheck(*this, loc, results[0], errorSlot,
                                           suppressErrorCheck);
    return;
  case ForeignErrorConvention::NonNilError:
    // Leave the direct results alone.
    emitErrorIsNonNilErrorCheck(*this, loc, errorSlot, suppressErrorCheck);
    return;
  }
  llvm_unreachable("bad foreign error convention kind");
}
Exemple #16
0
static void emitSourceLocationArgs(SILGenFunction &gen,
                                   SILLocation loc,
                                   ManagedValue (&args)[4]) {
  auto &ctx = gen.getASTContext();
  auto sourceLoc = loc.getSourceLoc();
  
  StringRef filename = "";
  unsigned line = 0;
  if (sourceLoc.isValid()) {
    unsigned bufferID = ctx.SourceMgr.findBufferContainingLoc(sourceLoc);
    filename = ctx.SourceMgr.getIdentifierForBuffer(bufferID);
    line = ctx.SourceMgr.getLineAndColumn(sourceLoc).first;
  }
  
  bool isASCII = true;
  for (unsigned char c : filename) {
    if (c > 127) {
      isASCII = false;
      break;
    }
  }
  
  auto wordTy = SILType::getBuiltinWordType(ctx);
  auto i1Ty = SILType::getBuiltinIntegerType(1, ctx);
  
  // File
  SILValue literal = gen.B.createStringLiteral(loc, filename,
                                             StringLiteralInst::Encoding::UTF8);
  args[0] = ManagedValue::forUnmanaged(literal);
  // File length
  literal = gen.B.createIntegerLiteral(loc, wordTy, filename.size());
  args[1] = ManagedValue::forUnmanaged(literal);
  // File is ascii
  literal = gen.B.createIntegerLiteral(loc, i1Ty, isASCII);
  args[2] = ManagedValue::forUnmanaged(literal);
  // Line
  literal = gen.B.createIntegerLiteral(loc, wordTy, line);
  args[3] = ManagedValue::forUnmanaged(literal);
}
Exemple #17
0
/// Emits an explanatory note if there is useful information to note or if there
/// is an interesting SourceLoc to point at.
/// Returns true if a diagnostic was emitted.
static bool emitNoteDiagnostic(SILInstruction *badInst, UnknownReason reason,
                               SILLocation fallbackLoc) {
  auto loc = skipInternalLocations(badInst->getDebugLocation()).getLocation();
  if (loc.isNull()) {
    // If we have important clarifying information, make sure to emit it.
    if (reason == UnknownReason::Default || fallbackLoc.isNull())
      return false;
    loc = fallbackLoc;
  }

  auto &ctx = badInst->getModule().getASTContext();
  auto sourceLoc = loc.getSourceLoc();
  switch (reason) {
  case UnknownReason::Default:
    diagnose(ctx, sourceLoc, diag::constexpr_unknown_reason_default)
        .highlight(loc.getSourceRange());
    break;
  case UnknownReason::TooManyInstructions:
    // TODO: Should pop up a level of the stack trace.
    diagnose(ctx, sourceLoc, diag::constexpr_too_many_instructions,
             ConstExprLimit)
        .highlight(loc.getSourceRange());
    break;
  case UnknownReason::Loop:
    diagnose(ctx, sourceLoc, diag::constexpr_loop)
        .highlight(loc.getSourceRange());
    break;
  case UnknownReason::Overflow:
    diagnose(ctx, sourceLoc, diag::constexpr_overflow)
        .highlight(loc.getSourceRange());
    break;
  case UnknownReason::Trap:
    diagnose(ctx, sourceLoc, diag::constexpr_trap)
        .highlight(loc.getSourceRange());
    break;
  }
  return true;
}
/// \brief Issue an "unreachable code" diagnostic if the blocks contains or
/// leads to another block that contains user code.
///
/// Note, we rely on SILLocation information to determine if SILInstructions
/// correspond to user code.
static bool diagnoseUnreachableBlock(const SILBasicBlock &B,
                                     SILModule &M,
                                     const SILBasicBlockSet &Reachable,
                                     UnreachableUserCodeReportingState *State,
                                     const SILBasicBlock *TopLevelB,
                         llvm::SmallPtrSetImpl<const SILBasicBlock*> &Visited){
  if (Visited.count(&B))
    return false;
  Visited.insert(&B);
  
  assert(State);
  for (auto I = B.begin(), E = B.end(); I != E; ++I) {
    SILLocation Loc = I->getLoc();
    // If we've reached an implicit return, we have not found any user code and
    // can stop searching for it.
    if (Loc.is<ImplicitReturnLocation>() || Loc.isAutoGenerated())
      return false;

    // Check if the instruction corresponds to user-written code, also make
    // sure we don't report an error twice for the same instruction.
    if (isUserCode(&*I) && !State->BlocksWithErrors.count(&B)) {

      // Emit the diagnostic.
      auto BrInfoIter = State->MetaMap.find(TopLevelB);
      assert(BrInfoIter != State->MetaMap.end());
      auto BrInfo = BrInfoIter->second;

      switch (BrInfo.Kind) {
      case (UnreachableKind::FoldedBranch):
        // Emit the diagnostic on the unreachable block and emit the
        // note on the branch responsible for the unreachable code.
        diagnose(M.getASTContext(), Loc.getSourceLoc(), diag::unreachable_code);
        diagnose(M.getASTContext(), BrInfo.Loc.getSourceLoc(),
                 diag::unreachable_code_branch, BrInfo.CondIsAlwaysTrue);
        break;

      case (UnreachableKind::FoldedSwitchEnum): {
        // If we are warning about a switch condition being a constant, the main
        // emphasis should be on the condition (to ensure we have a single
        // message per switch).
        const SwitchStmt *SS = BrInfo.Loc.getAsASTNode<SwitchStmt>();
        if (!SS)
          break;
        assert(SS);
        const Expr *SE = SS->getSubjectExpr();
        diagnose(M.getASTContext(), SE->getLoc(), diag::switch_on_a_constant);
        diagnose(M.getASTContext(), Loc.getSourceLoc(),
                 diag::unreachable_code_note);
        break;
      }

      case (UnreachableKind::NoreturnCall): {
        // Specialcase when we are warning about unreachable code after a call
        // to a noreturn function.
        if (!BrInfo.Loc.isASTNode<ExplicitCastExpr>()) {
          assert(BrInfo.Loc.isASTNode<ApplyExpr>());
          diagnose(M.getASTContext(), Loc.getSourceLoc(),
                   diag::unreachable_code);
          diagnose(M.getASTContext(), BrInfo.Loc.getSourceLoc(),
                   diag::call_to_noreturn_note);
        }
        break;
      }
      }

      // Record that we've reported this unreachable block to avoid duplicates
      // in the future.
      State->BlocksWithErrors.insert(&B);
      return true;
    }
  }

  // This block could be empty if it's terminator has been folded.
  if (B.empty())
    return false;

  // If we have not found user code in this block, inspect it's successors.
  // Check if at least one of the successors contains user code.
  for (auto I = B.succ_begin(), E = B.succ_end(); I != E; ++I) {
    SILBasicBlock *SB = *I;
    bool HasReachablePred = false;
    for (auto PI = SB->pred_begin(), PE = SB->pred_end(); PI != PE; ++PI) {
      if (Reachable.count(*PI))
        HasReachablePred = true;
    }

    // If all of the predecessors of this successor are unreachable, check if
    // it contains user code.
    if (!HasReachablePred && diagnoseUnreachableBlock(*SB, M, Reachable,
                                                    State, TopLevelB, Visited))
      return true;
  }
  
  return false;
}
Exemple #19
0
/// \brief Inlines all mandatory inlined functions into the body of a function,
/// first recursively inlining all mandatory apply instructions in those
/// functions into their bodies if necessary.
///
/// \param F the function to be processed
/// \param AI nullptr if this is being called from the top level; the relevant
///   ApplyInst requiring the recursive call when non-null
/// \param FullyInlinedSet the set of all functions already known to be fully
///   processed, to avoid processing them over again
/// \param SetFactory an instance of ImmutableFunctionSet::Factory
/// \param CurrentInliningSet the set of functions currently being inlined in
///   the current call stack of recursive calls
///
/// \returns true if successful, false if failed due to circular inlining.
static bool
runOnFunctionRecursively(SILFunction *F, FullApplySite AI,
                         SILModule::LinkingMode Mode,
                         DenseFunctionSet &FullyInlinedSet,
                         ImmutableFunctionSet::Factory &SetFactory,
                         ImmutableFunctionSet CurrentInliningSet,
                         ClassHierarchyAnalysis *CHA) {
  // Avoid reprocessing functions needlessly.
  if (FullyInlinedSet.count(F))
    return true;

  // Prevent attempt to circularly inline.
  if (CurrentInliningSet.contains(F)) {
    // This cannot happen on a top-level call, so AI should be non-null.
    assert(AI && "Cannot have circular inline without apply");
    SILLocation L = AI.getLoc();
    assert(L && "Must have location for transparent inline apply");
    diagnose(F->getModule().getASTContext(), L.getStartSourceLoc(),
             diag::circular_transparent);
    return false;
  }

  // Add to the current inlining set (immutably, so we only affect the set
  // during this call and recursive subcalls).
  CurrentInliningSet = SetFactory.add(CurrentInliningSet, F);

  SmallVector<SILValue, 16> CaptureArgs;
  SmallVector<SILValue, 32> FullArgs;

  for (auto FI = F->begin(), FE = F->end(); FI != FE; ++FI) {
    for (auto I = FI->begin(), E = FI->end(); I != E; ++I) {
      FullApplySite InnerAI = FullApplySite::isa(&*I);

      if (!InnerAI)
        continue;

      auto *ApplyBlock = InnerAI.getParent();

      auto NewInstPair = tryDevirtualizeApply(InnerAI, CHA);
      if (auto *NewInst = NewInstPair.first) {
        replaceDeadApply(InnerAI, NewInst);
        if (auto *II = dyn_cast<SILInstruction>(NewInst))
          I = II->getIterator();
        else
          I = NewInst->getParentBlock()->begin();
        auto NewAI = FullApplySite::isa(NewInstPair.second.getInstruction());
        if (!NewAI)
          continue;

        InnerAI = NewAI;
      }

      SILLocation Loc = InnerAI.getLoc();
      SILValue CalleeValue = InnerAI.getCallee();
      bool IsThick;
      PartialApplyInst *PAI;
      SILFunction *CalleeFunction = getCalleeFunction(InnerAI, IsThick,
                                                      CaptureArgs, FullArgs,
                                                      PAI,
                                                      Mode);
      if (!CalleeFunction ||
          CalleeFunction->isTransparent() == IsNotTransparent)
        continue;

      if (F->isFragile() &&
          !CalleeFunction->hasValidLinkageForFragileRef()) {
        if (!CalleeFunction->hasValidLinkageForFragileInline()) {
          llvm::errs() << "caller: " << F->getName() << "\n";
          llvm::errs() << "callee: " << CalleeFunction->getName() << "\n";
          llvm_unreachable("Should never be inlining a resilient function into "
                           "a fragile function");
        }
        continue;
      }

      // Then recursively process it first before trying to inline it.
      if (!runOnFunctionRecursively(CalleeFunction, InnerAI, Mode,
                                    FullyInlinedSet, SetFactory,
                                    CurrentInliningSet, CHA)) {
        // If we failed due to circular inlining, then emit some notes to
        // trace back the failure if we have more information.
        // FIXME: possibly it could be worth recovering and attempting other
        // inlines within this same recursive call rather than simply
        // propagating the failure.
        if (AI) {
          SILLocation L = AI.getLoc();
          assert(L && "Must have location for transparent inline apply");
          diagnose(F->getModule().getASTContext(), L.getStartSourceLoc(),
                   diag::note_while_inlining);
        }
        return false;
      }

      // Inline function at I, which also changes I to refer to the first
      // instruction inlined in the case that it succeeds. We purposely
      // process the inlined body after inlining, because the inlining may
      // have exposed new inlining opportunities beyond those present in
      // the inlined function when processed independently.
      DEBUG(llvm::errs() << "Inlining @" << CalleeFunction->getName()
                         << " into @" << InnerAI.getFunction()->getName()
                         << "\n");

      // If we intend to inline a thick function, then we need to balance the
      // reference counts for correctness.
      if (IsThick && I != ApplyBlock->begin()) {
        // We need to find an appropriate location for our fix up code
        // We used to do this after inlining Without any modifications
        // This caused us to add a release in a wrong place:
        // It would release a value *before* retaining it!
        // It is really problematic to do this after inlining -
        // Finding a valid insertion point is tricky:
        // Inlining might add new basic blocks and/or remove the apply
        // We want to add the fix up *just before* where the current apply is!
        // Unfortunately, we *can't* add the fix up code here:
        // Inlining might fail for any reason -
        // If that occurred we'd need to undo our fix up code.
        // Instead, we split the current basic block -
        // Making sure we have a basic block that starts with our apply.
        SILBuilderWithScope B(I);
        ApplyBlock = splitBasicBlockAndBranch(B, &*I, nullptr, nullptr);
        I = ApplyBlock->begin();
      }

      // Decrement our iterator (carefully, to avoid going off the front) so it
      // is valid after inlining is done.  Inlining deletes the apply, and can
      // introduce multiple new basic blocks.
      if (I != ApplyBlock->begin())
        --I;
      else
        I = ApplyBlock->end();

      std::vector<Substitution> ApplySubs(InnerAI.getSubstitutions());

      if (PAI) {
        auto PAISubs = PAI->getSubstitutions();
        ApplySubs.insert(ApplySubs.end(), PAISubs.begin(), PAISubs.end());
      }

      SILOpenedArchetypesTracker OpenedArchetypesTracker(*F);
      F->getModule().registerDeleteNotificationHandler(
          &OpenedArchetypesTracker);
      // The callee only needs to know about opened archetypes used in
      // the substitution list.
      OpenedArchetypesTracker.registerUsedOpenedArchetypes(InnerAI.getInstruction());
      if (PAI) {
        OpenedArchetypesTracker.registerUsedOpenedArchetypes(PAI);
      }

      SILInliner Inliner(*F, *CalleeFunction,
                         SILInliner::InlineKind::MandatoryInline,
                         ApplySubs, OpenedArchetypesTracker);
      if (!Inliner.inlineFunction(InnerAI, FullArgs)) {
        I = InnerAI.getInstruction()->getIterator();
        continue;
      }

      // Inlining was successful. Remove the apply.
      InnerAI.getInstruction()->eraseFromParent();

      // Reestablish our iterator if it wrapped.
      if (I == ApplyBlock->end())
        I = ApplyBlock->begin();

      // Update the iterator when instructions are removed.
      DeleteInstructionsHandler DeletionHandler(I);

      // If the inlined apply was a thick function, then we need to balance the
      // reference counts for correctness.
      if (IsThick)
        fixupReferenceCounts(I, Loc, CalleeValue, CaptureArgs);

      // Now that the IR is correct, see if we can remove dead callee
      // computations (e.g. dead partial_apply closures).
      cleanupCalleeValue(CalleeValue, CaptureArgs, FullArgs);

      // Reposition iterators possibly invalidated by mutation.
      FI = SILFunction::iterator(ApplyBlock);
      E = ApplyBlock->end();
      assert(FI == SILFunction::iterator(I->getParent()) &&
             "Mismatch between the instruction and basic block");
      ++NumMandatoryInlines;
    }
  }

  // Keep track of full inlined functions so we don't waste time recursively
  // reprocessing them.
  FullyInlinedSet.insert(F);
  return true;
}
void SILGenFunction::emitClassConstructorInitializer(ConstructorDecl *ctor) {
  MagicFunctionName = SILGenModule::getMagicFunctionName(ctor);

  assert(ctor->getBody() && "Class constructor without a body?");

  // True if this constructor delegates to a peer constructor with self.init().
  bool isDelegating = false;
  if (!ctor->hasStubImplementation()) {
    isDelegating = ctor->getDelegatingOrChainedInitKind(nullptr) ==
      ConstructorDecl::BodyInitKind::Delegating;
  }

  // Set up the 'self' argument.  If this class has a superclass, we set up
  // self as a box.  This allows "self reassignment" to happen in super init
  // method chains, which is important for interoperating with Objective-C
  // classes.  We also use a box for delegating constructors, since the
  // delegated-to initializer may also replace self.
  //
  // TODO: If we could require Objective-C classes to have an attribute to get
  // this behavior, we could avoid runtime overhead here.
  VarDecl *selfDecl = ctor->getImplicitSelfDecl();
  auto *dc = ctor->getDeclContext();
  auto selfClassDecl = dc->getAsClassOrClassExtensionContext();
  bool NeedsBoxForSelf = isDelegating ||
    (selfClassDecl->hasSuperclass() && !ctor->hasStubImplementation());
  bool usesObjCAllocator = Lowering::usesObjCAllocator(selfClassDecl);

  // If needed, mark 'self' as uninitialized so that DI knows to
  // enforce its DI properties on stored properties.
  MarkUninitializedInst::Kind MUKind;

  if (isDelegating)
    MUKind = MarkUninitializedInst::DelegatingSelf;
  else if (selfClassDecl->requiresStoredPropertyInits() &&
           usesObjCAllocator) {
    // Stored properties will be initialized in a separate
    // .cxx_construct method called by the Objective-C runtime.
    assert(selfClassDecl->hasSuperclass() &&
           "Cannot use ObjC allocation without a superclass");
    MUKind = MarkUninitializedInst::DerivedSelfOnly;
  } else if (selfClassDecl->hasSuperclass())
    MUKind = MarkUninitializedInst::DerivedSelf;
  else
    MUKind = MarkUninitializedInst::RootSelf;

  if (NeedsBoxForSelf) {
    // Allocate the local variable for 'self'.
    emitLocalVariableWithCleanup(selfDecl, false)->finishInitialization(*this);

    auto &SelfVarLoc = VarLocs[selfDecl];
    SelfVarLoc.value = B.createMarkUninitialized(selfDecl,
                                                 SelfVarLoc.value, MUKind);
  }

  // Emit the prolog for the non-self arguments.
  // FIXME: Handle self along with the other body patterns.
  emitProlog(ctor->getParameterList(1),
             TupleType::getEmpty(F.getASTContext()), ctor, ctor->hasThrows());

  SILType selfTy = getLoweredLoadableType(selfDecl->getType());
  SILValue selfArg = new (SGM.M) SILArgument(F.begin(), selfTy, selfDecl);

  if (!NeedsBoxForSelf) {
    SILLocation PrologueLoc(selfDecl);
    PrologueLoc.markAsPrologue();
    B.createDebugValue(PrologueLoc, selfArg);
  }

  if (!ctor->hasStubImplementation()) {
    assert(selfTy.hasReferenceSemantics() &&
           "can't emit a value type ctor here");
    if (NeedsBoxForSelf) {
      SILLocation prologueLoc = RegularLocation(ctor);
      prologueLoc.markAsPrologue();
      B.createStore(prologueLoc, selfArg, VarLocs[selfDecl].value,
                    StoreOwnershipQualifier::Unqualified);
    } else {
      selfArg = B.createMarkUninitialized(selfDecl, selfArg, MUKind);
      VarLocs[selfDecl] = VarLoc::get(selfArg);
      enterDestroyCleanup(VarLocs[selfDecl].value);
    }
  }

  // Prepare the end of initializer location.
  SILLocation endOfInitLoc = RegularLocation(ctor);
  endOfInitLoc.pointToEnd();

  // Create a basic block to jump to for the implicit 'self' return.
  // We won't emit the block until after we've emitted the body.
  prepareEpilog(Type(), ctor->hasThrows(),
                CleanupLocation::get(endOfInitLoc));

  // If the constructor can fail, set up an alternative epilog for constructor
  // failure.
  SILBasicBlock *failureExitBB = nullptr;
  SILArgument *failureExitArg = nullptr;
  auto &resultLowering = getTypeLowering(ctor->getResultType());

  if (ctor->getFailability() != OTK_None) {
    SILBasicBlock *failureBB = createBasicBlock(FunctionSection::Postmatter);

    RegularLocation loc(ctor);
    loc.markAutoGenerated();

    // On failure, we'll clean up everything and return nil instead.
    SavedInsertionPoint savedIP(*this, failureBB, FunctionSection::Postmatter);

    failureExitBB = createBasicBlock();
    failureExitArg = new (F.getModule())
      SILArgument(failureExitBB, resultLowering.getLoweredType());

    Cleanups.emitCleanupsForReturn(ctor);
    SILValue nilResult = B.createEnum(loc, {},
                                      getASTContext().getOptionalNoneDecl(),
                                      resultLowering.getLoweredType());
    B.createBranch(loc, failureExitBB, nilResult);

    B.setInsertionPoint(failureExitBB);
    B.createReturn(loc, failureExitArg);

    FailDest = JumpDest(failureBB, Cleanups.getCleanupsDepth(), ctor);
  }

  // Handle member initializers.
  if (isDelegating) {
    // A delegating initializer does not initialize instance
    // variables.
  } else if (ctor->hasStubImplementation()) {
    // Nor does a stub implementation.
  } else if (selfClassDecl->requiresStoredPropertyInits() &&
             usesObjCAllocator) {
    // When the class requires all stored properties to have initial
    // values and we're using Objective-C's allocation, stored
    // properties are initialized via the .cxx_construct method, which
    // will be called by the runtime.

    // Note that 'self' has been fully initialized at this point.
  } else {
    // Emit the member initializers.
    emitMemberInitializers(dc, selfDecl, selfClassDecl);
  }

  emitProfilerIncrement(ctor->getBody());
  // Emit the constructor body.
  emitStmt(ctor->getBody());

  
  // Build a custom epilog block, since the AST representation of the
  // constructor decl (which has no self in the return type) doesn't match the
  // SIL representation.
  {
    SavedInsertionPoint savedIP(*this, ReturnDest.getBlock());
    assert(B.getInsertionBB()->empty() && "Epilog already set up?");
    auto cleanupLoc = CleanupLocation(ctor);

    // If we're using a box for self, reload the value at the end of the init
    // method.
    if (NeedsBoxForSelf) {
      // Emit the call to super.init() right before exiting from the initializer.
      if (Expr *SI = ctor->getSuperInitCall())
        emitRValue(SI);

      selfArg = B.createLoad(cleanupLoc, VarLocs[selfDecl].value,
                             LoadOwnershipQualifier::Unqualified);
    }
    
    // We have to do a retain because we are returning the pointer +1.
    B.emitCopyValueOperation(cleanupLoc, selfArg);

    // Inject the self value into an optional if the constructor is failable.
    if (ctor->getFailability() != OTK_None) {
      RegularLocation loc(ctor);
      loc.markAutoGenerated();
      selfArg = B.createEnum(loc, selfArg,
                             getASTContext().getOptionalSomeDecl(),
                             getLoweredLoadableType(ctor->getResultType()));
    }
  }
  
  // Emit the epilog and post-matter.
  auto returnLoc = emitEpilog(ctor, /*UsesCustomEpilog*/true);

  // Finish off the epilog by returning.  If this is a failable ctor, then we
  // actually jump to the failure epilog to keep the invariant that there is
  // only one SIL return instruction per SIL function.
  if (B.hasValidInsertionPoint()) {
    if (failureExitBB)
      B.createBranch(returnLoc, failureExitBB, selfArg);
    else
      B.createReturn(returnLoc, selfArg);
  }
}
/// \brief Inlines all mandatory inlined functions into the body of a function,
/// first recursively inlining all mandatory apply instructions in those
/// functions into their bodies if necessary.
///
/// \param F the function to be processed
/// \param AI nullptr if this is being called from the top level; the relevant
///   ApplyInst requiring the recursive call when non-null
/// \param FullyInlinedSet the set of all functions already known to be fully
///   processed, to avoid processing them over again
/// \param SetFactory an instance of ImmutableFunctionSet::Factory
/// \param CurrentInliningSet the set of functions currently being inlined in
///   the current call stack of recursive calls
///
/// \returns true if successful, false if failed due to circular inlining.
static bool
runOnFunctionRecursively(SILFunction *F, FullApplySite AI,
                         DenseFunctionSet &FullyInlinedSet,
                         ImmutableFunctionSet::Factory &SetFactory,
                         ImmutableFunctionSet CurrentInliningSet,
                         ClassHierarchyAnalysis *CHA) {
  // Avoid reprocessing functions needlessly.
  if (FullyInlinedSet.count(F))
    return true;

  // Prevent attempt to circularly inline.
  if (CurrentInliningSet.contains(F)) {
    // This cannot happen on a top-level call, so AI should be non-null.
    assert(AI && "Cannot have circular inline without apply");
    SILLocation L = AI.getLoc();
    assert(L && "Must have location for transparent inline apply");
    diagnose(F->getModule().getASTContext(), L.getStartSourceLoc(),
             diag::circular_transparent);
    return false;
  }

  // Add to the current inlining set (immutably, so we only affect the set
  // during this call and recursive subcalls).
  CurrentInliningSet = SetFactory.add(CurrentInliningSet, F);

  SmallVector<std::pair<SILValue, ParameterConvention>, 16> CaptureArgs;
  SmallVector<SILValue, 32> FullArgs;

  for (auto BI = F->begin(), BE = F->end(); BI != BE; ++BI) {
    for (auto II = BI->begin(), IE = BI->end(); II != IE; ++II) {
      FullApplySite InnerAI = FullApplySite::isa(&*II);

      if (!InnerAI)
        continue;

      auto *ApplyBlock = InnerAI.getParent();

      // *NOTE* If devirtualization succeeds, sometimes II will not be InnerAI,
      // but a casted result of InnerAI or even a block argument due to
      // abstraction changes when calling the witness or class method. We still
      // know that InnerAI dominates II though.
      std::tie(InnerAI, II) = tryDevirtualizeApplyHelper(InnerAI, II, CHA);
      if (!InnerAI)
        continue;

      SILValue CalleeValue = InnerAI.getCallee();
      bool IsThick;
      PartialApplyInst *PAI;
      SILFunction *CalleeFunction = getCalleeFunction(
          F, InnerAI, IsThick, CaptureArgs, FullArgs, PAI);

      if (!CalleeFunction)
        continue;

      // Then recursively process it first before trying to inline it.
      if (!runOnFunctionRecursively(CalleeFunction, InnerAI,
                                    FullyInlinedSet, SetFactory,
                                    CurrentInliningSet, CHA)) {
        // If we failed due to circular inlining, then emit some notes to
        // trace back the failure if we have more information.
        // FIXME: possibly it could be worth recovering and attempting other
        // inlines within this same recursive call rather than simply
        // propagating the failure.
        if (AI) {
          SILLocation L = AI.getLoc();
          assert(L && "Must have location for transparent inline apply");
          diagnose(F->getModule().getASTContext(), L.getStartSourceLoc(),
                   diag::note_while_inlining);
        }
        return false;
      }

      // Get our list of substitutions.
      auto Subs = (PAI
                   ? PAI->getSubstitutionMap()
                   : InnerAI.getSubstitutionMap());

      SILOpenedArchetypesTracker OpenedArchetypesTracker(F);
      F->getModule().registerDeleteNotificationHandler(
          &OpenedArchetypesTracker);
      // The callee only needs to know about opened archetypes used in
      // the substitution list.
      OpenedArchetypesTracker.registerUsedOpenedArchetypes(
          InnerAI.getInstruction());
      if (PAI) {
        OpenedArchetypesTracker.registerUsedOpenedArchetypes(PAI);
      }

      SILInliner Inliner(*F, *CalleeFunction,
                         SILInliner::InlineKind::MandatoryInline, Subs,
                         OpenedArchetypesTracker);
      if (!Inliner.canInlineFunction(InnerAI)) {
        // See comment above about casting when devirtualizing and how this
        // sometimes causes II and InnerAI to be different and even in different
        // blocks.
        II = InnerAI.getInstruction()->getIterator();
        continue;
      }

      // Inline function at I, which also changes I to refer to the first
      // instruction inlined in the case that it succeeds. We purposely
      // process the inlined body after inlining, because the inlining may
      // have exposed new inlining opportunities beyond those present in
      // the inlined function when processed independently.
      LLVM_DEBUG(llvm::errs() << "Inlining @" << CalleeFunction->getName()
                              << " into @" << InnerAI.getFunction()->getName()
                              << "\n");

      // If we intend to inline a thick function, then we need to balance the
      // reference counts for correctness.
      if (IsThick) {
        bool IsCalleeGuaranteed =
            PAI &&
            PAI->getType().castTo<SILFunctionType>()->isCalleeGuaranteed();
        fixupReferenceCounts(II, CalleeValue, CaptureArgs, IsCalleeGuaranteed);
      }

      // Decrement our iterator (carefully, to avoid going off the front) so it
      // is valid after inlining is done.  Inlining deletes the apply, and can
      // introduce multiple new basic blocks.
      II = prev_or_default(II, ApplyBlock->begin(), ApplyBlock->end());

      Inliner.inlineFunction(InnerAI, FullArgs);

      // We were able to inline successfully. Remove the apply.
      InnerAI.getInstruction()->eraseFromParent();

      // Reestablish our iterator if it wrapped.
      if (II == ApplyBlock->end())
        II = ApplyBlock->begin();

      // Update the iterator when instructions are removed.
      DeleteInstructionsHandler DeletionHandler(II);

      // Now that the IR is correct, see if we can remove dead callee
      // computations (e.g. dead partial_apply closures).
      cleanupCalleeValue(CalleeValue, FullArgs);

      // Reposition iterators possibly invalidated by mutation.
      BI = SILFunction::iterator(ApplyBlock);
      IE = ApplyBlock->end();
      assert(BI == SILFunction::iterator(II->getParent()) &&
             "Mismatch between the instruction and basic block");
      ++NumMandatoryInlines;
    }
  }

  // Keep track of full inlined functions so we don't waste time recursively
  // reprocessing them.
  FullyInlinedSet.insert(F);
  return true;
}
void MaterializeForSetEmitter::emit(SILGenFunction &gen, ManagedValue self,
                                    SILValue resultBuffer,
                                    SILValue callbackBuffer,
                                    ArrayRef<ManagedValue> indices) {
  SILLocation loc = Witness;
  loc.markAutoGenerated();

  // If there's an abstraction difference, we always need to use the
  // get/set pattern.
  AccessStrategy strategy;
  if (WitnessStorage->getType()->is<ReferenceStorageType>() ||
      (Conformance && RequirementStorageType != WitnessStorageType)) {
    strategy = AccessStrategy::DispatchToAccessor;
  } else {
    strategy = WitnessStorage->getAccessStrategy(TheAccessSemantics,
                                                 AccessKind::ReadWrite);
  }

  // Handle the indices.
  RValue indicesRV;
  if (isa<SubscriptDecl>(WitnessStorage)) {
    indicesRV = collectIndicesFromParameters(gen, loc, indices);
  } else {
    assert(indices.empty() && "indices for a non-subscript?");
  }

  // As above, assume that we don't need to reabstract 'self'.

  // Choose the right implementation.
  SILValue address;
  SILFunction *callbackFn = nullptr;
  switch (strategy) {
  case AccessStrategy::Storage:
    address = emitUsingStorage(gen, loc, self, std::move(indicesRV));
    break;

  case AccessStrategy::Addressor:
    address = emitUsingAddressor(gen, loc, self, std::move(indicesRV),
                                 callbackBuffer, callbackFn);
    break;

  case AccessStrategy::DirectToAccessor:
  case AccessStrategy::DispatchToAccessor:
    address = emitUsingGetterSetter(gen, loc, self, std::move(indicesRV),
                                    resultBuffer, callbackBuffer, callbackFn);
    break;
  }

  // Return the address as a Builtin.RawPointer.
  SILType rawPointerTy = SILType::getRawPointerType(gen.getASTContext());
  address = gen.B.createAddressToPointer(loc, address, rawPointerTy);

  SILType resultTupleTy = gen.F.mapTypeIntoContext(
                 gen.F.getLoweredFunctionType()->getResult().getSILType());
  SILType optCallbackTy = resultTupleTy.getTupleElementType(1);

  // Form the callback.
  SILValue callback;
  if (callbackFn) {
    // Make a reference to the function.
    callback = gen.B.createFunctionRef(loc, callbackFn);

    // If it's polymorphic, cast to RawPointer and then back to the
    // right monomorphic type.  The safety of this cast relies on some
    // assumptions about what exactly IRGen can reconstruct from the
    // callback's thick type argument.
    if (callbackFn->getLoweredFunctionType()->isPolymorphic()) {
      callback = gen.B.createThinFunctionToPointer(loc, callback, rawPointerTy);

      OptionalTypeKind optKind;
      auto callbackTy = optCallbackTy.getAnyOptionalObjectType(SGM.M, optKind);
      callback = gen.B.createPointerToThinFunction(loc, callback, callbackTy);
    }

    callback = gen.B.createOptionalSome(loc, callback, optCallbackTy);
  } else {
    callback = gen.B.createOptionalNone(loc, optCallbackTy);
  }

  // Form the result and return.
  auto result = gen.B.createTuple(loc, resultTupleTy, { address, callback });
  gen.Cleanups.emitCleanupsForReturn(CleanupLocation::get(loc));
  gen.B.createReturn(loc, result);
}
/// Do a syntactic pattern match to determine whether the call is a call
/// to swap(&base[index1], &base[index2]), which can
/// be replaced with a call to MutableCollection.swapAt(_:_:) on base.
///
/// Returns true if the call can be replaced. Returns the call expression,
/// the base expression, and the two indices as out expressions.
///
/// This method takes an array of all the ApplyInsts for calls to swap()
/// in the function to avoid needing to construct a parent map over the AST
/// to find the CallExpr for the inout accesses.
static bool
canReplaceWithCallToCollectionSwapAt(const BeginAccessInst *Access1,
                                     const BeginAccessInst *Access2,
                                     ArrayRef<ApplyInst *> CallsToSwap,
                                     ASTContext &Ctx,
                                     CallExpr *&FoundCall,
                                     Expr *&Base,
                                     Expr *&Index1,
                                     Expr *&Index2) {
  if (CallsToSwap.empty())
    return false;

  // Inout arguments must be modifications.
  if (Access1->getAccessKind() != SILAccessKind::Modify ||
      Access2->getAccessKind() != SILAccessKind::Modify) {
    return false;
  }

  SILLocation Loc1 = Access1->getLoc();
  SILLocation Loc2 = Access2->getLoc();
  if (Loc1.isNull() || Loc2.isNull())
    return false;

  auto *InOut1 = Loc1.getAsASTNode<InOutExpr>();
  auto *InOut2 = Loc2.getAsASTNode<InOutExpr>();
  if (!InOut1 || !InOut2)
    return false;

  FoundCall = nullptr;
  // Look through all the calls to swap() recorded in the function to find
  // which one we're diagnosing.
  for (ApplyInst *AI : CallsToSwap) {
    SILLocation CallLoc = AI->getLoc();
    if (CallLoc.isNull())
      continue;

    auto *CE = CallLoc.getAsASTNode<CallExpr>();
    if (!CE)
      continue;

    assert(isCallToStandardLibrarySwap(CE, Ctx));
    // swap() takes two arguments.
    auto *ArgTuple = cast<TupleExpr>(CE->getArg());
    const Expr *Arg1 = ArgTuple->getElement(0);
    const Expr *Arg2 = ArgTuple->getElement(1);
    if ((Arg1 == InOut1 && Arg2 == InOut2)) {
        FoundCall = CE;
      break;
    }
  }
  if (!FoundCall)
    return false;

  // We found a call to swap(&e1, &e2). Now check to see whether it
  // matches the form swap(&someCollection[index1], &someCollection[index2]).
  auto *SE1 = dyn_cast<SubscriptExpr>(InOut1->getSubExpr());
  if (!SE1)
    return false;
  auto *SE2 = dyn_cast<SubscriptExpr>(InOut2->getSubExpr());
  if (!SE2)
    return false;

  // Do the two subscripts refer to the same subscript declaration?
  auto *Decl1 = cast<SubscriptDecl>(SE1->getDecl().getDecl());
  auto *Decl2 = cast<SubscriptDecl>(SE2->getDecl().getDecl());
  if (Decl1 != Decl2)
    return false;

  ProtocolDecl *MutableCollectionDecl = Ctx.getMutableCollectionDecl();

  // Is the subcript either (1) on MutableCollection itself or (2) a
  // a witness for a subscript on MutableCollection?
  bool IsSubscriptOnMutableCollection = false;
  ProtocolDecl *ProtocolForDecl =
      Decl1->getDeclContext()->getSelfProtocolDecl();
  if (ProtocolForDecl) {
    IsSubscriptOnMutableCollection = (ProtocolForDecl == MutableCollectionDecl);
  } else {
    for (ValueDecl *Req : Decl1->getSatisfiedProtocolRequirements()) {
      DeclContext *ReqDC = Req->getDeclContext();
      ProtocolDecl *ReqProto = ReqDC->getSelfProtocolDecl();
      assert(ReqProto && "Protocol requirement not in a protocol?");

      if (ReqProto == MutableCollectionDecl) {
        IsSubscriptOnMutableCollection = true;
        break;
      }
    }
  }

  if (!IsSubscriptOnMutableCollection)
    return false;

  // We're swapping two subscripts on mutable collections -- but are they
  // the same collection? Approximate this by checking for textual
  // equality on the base expressions. This is just an approximation,
  // but is fine for a best-effort Fix-It.
  SourceManager &SM = Ctx.SourceMgr;
  StringRef Base1Text = extractExprText(SE1->getBase(), SM);
  StringRef Base2Text = extractExprText(SE2->getBase(), SM);

  if (Base1Text != Base2Text)
    return false;

  auto *Index1Paren = dyn_cast<ParenExpr>(SE1->getIndex());
  if (!Index1Paren)
    return false;

  auto *Index2Paren = dyn_cast<ParenExpr>(SE2->getIndex());
  if (!Index2Paren)
    return false;

  Base = SE1->getBase();
  Index1 = Index1Paren->getSubExpr();
  Index2 = Index2Paren->getSubExpr();
  return true;
}
Exemple #24
0
void SILGenFunction::emitClassConstructorInitializer(ConstructorDecl *ctor) {
  MagicFunctionName = SILGenModule::getMagicFunctionName(ctor);

  assert(ctor->getBody() && "Class constructor without a body?");

  // True if this constructor delegates to a peer constructor with self.init().
  bool isDelegating = false;
  if (!ctor->hasStubImplementation()) {
    isDelegating = ctor->getDelegatingOrChainedInitKind(nullptr) ==
      ConstructorDecl::BodyInitKind::Delegating;
  }

  // Set up the 'self' argument.  If this class has a superclass, we set up
  // self as a box.  This allows "self reassignment" to happen in super init
  // method chains, which is important for interoperating with Objective-C
  // classes.  We also use a box for delegating constructors, since the
  // delegated-to initializer may also replace self.
  //
  // TODO: If we could require Objective-C classes to have an attribute to get
  // this behavior, we could avoid runtime overhead here.
  VarDecl *selfDecl = ctor->getImplicitSelfDecl();
  auto *dc = ctor->getDeclContext();
  auto selfClassDecl = dc->getAsClassOrClassExtensionContext();
  bool NeedsBoxForSelf = isDelegating ||
    (selfClassDecl->hasSuperclass() && !ctor->hasStubImplementation());
  bool usesObjCAllocator = Lowering::usesObjCAllocator(selfClassDecl);

  // If needed, mark 'self' as uninitialized so that DI knows to
  // enforce its DI properties on stored properties.
  MarkUninitializedInst::Kind MUKind;

  if (isDelegating)
    MUKind = MarkUninitializedInst::DelegatingSelf;
  else if (selfClassDecl->requiresStoredPropertyInits() &&
           usesObjCAllocator) {
    // Stored properties will be initialized in a separate
    // .cxx_construct method called by the Objective-C runtime.
    assert(selfClassDecl->hasSuperclass() &&
           "Cannot use ObjC allocation without a superclass");
    MUKind = MarkUninitializedInst::DerivedSelfOnly;
  } else if (selfClassDecl->hasSuperclass())
    MUKind = MarkUninitializedInst::DerivedSelf;
  else
    MUKind = MarkUninitializedInst::RootSelf;

  if (NeedsBoxForSelf) {
    // Allocate the local variable for 'self'.
    emitLocalVariableWithCleanup(selfDecl, MUKind)->finishInitialization(*this);
  }

  // Emit the prolog for the non-self arguments.
  // FIXME: Handle self along with the other body patterns.
  uint16_t ArgNo = emitProlog(ctor->getParameterList(1),
                              TupleType::getEmpty(F.getASTContext()), ctor,
                              ctor->hasThrows());

  SILType selfTy = getLoweredLoadableType(selfDecl->getType());
  ManagedValue selfArg = B.createInputFunctionArgument(selfTy, selfDecl);

  if (!NeedsBoxForSelf) {
    SILLocation PrologueLoc(selfDecl);
    PrologueLoc.markAsPrologue();
    SILDebugVariable DbgVar(selfDecl->isLet(), ++ArgNo);
    B.createDebugValue(PrologueLoc, selfArg.getValue(), DbgVar);
  }

  if (!ctor->hasStubImplementation()) {
    assert(selfTy.hasReferenceSemantics() &&
           "can't emit a value type ctor here");
    if (NeedsBoxForSelf) {
      SILLocation prologueLoc = RegularLocation(ctor);
      prologueLoc.markAsPrologue();
      // SEMANTIC ARC TODO: When the verifier is complete, review this.
      B.emitStoreValueOperation(prologueLoc, selfArg.forward(*this),
                                VarLocs[selfDecl].value,
                                StoreOwnershipQualifier::Init);
    } else {
      selfArg = B.createMarkUninitialized(selfDecl, selfArg, MUKind);
      VarLocs[selfDecl] = VarLoc::get(selfArg.getValue());
    }
  }

  // Prepare the end of initializer location.
  SILLocation endOfInitLoc = RegularLocation(ctor);
  endOfInitLoc.pointToEnd();

  // Create a basic block to jump to for the implicit 'self' return.
  // We won't emit the block until after we've emitted the body.
  prepareEpilog(Type(), ctor->hasThrows(),
                CleanupLocation::get(endOfInitLoc));

  auto resultType = ctor->mapTypeIntoContext(ctor->getResultInterfaceType());

  // If the constructor can fail, set up an alternative epilog for constructor
  // failure.
  SILBasicBlock *failureExitBB = nullptr;
  SILArgument *failureExitArg = nullptr;
  auto &resultLowering = getTypeLowering(resultType);

  if (ctor->getFailability() != OTK_None) {
    SILBasicBlock *failureBB = createBasicBlock(FunctionSection::Postmatter);

    RegularLocation loc(ctor);
    loc.markAutoGenerated();

    // On failure, we'll clean up everything and return nil instead.
    SILGenSavedInsertionPoint savedIP(*this, failureBB,
                                      FunctionSection::Postmatter);

    failureExitBB = createBasicBlock();
    failureExitArg = failureExitBB->createPHIArgument(
        resultLowering.getLoweredType(), ValueOwnershipKind::Owned);

    Cleanups.emitCleanupsForReturn(ctor);
    SILValue nilResult =
        B.createEnum(loc, SILValue(), getASTContext().getOptionalNoneDecl(),
                     resultLowering.getLoweredType());
    B.createBranch(loc, failureExitBB, nilResult);

    B.setInsertionPoint(failureExitBB);
    B.createReturn(loc, failureExitArg);

    FailDest = JumpDest(failureBB, Cleanups.getCleanupsDepth(), ctor);
  }

  // Handle member initializers.
  if (isDelegating) {
    // A delegating initializer does not initialize instance
    // variables.
  } else if (ctor->hasStubImplementation()) {
    // Nor does a stub implementation.
  } else if (selfClassDecl->requiresStoredPropertyInits() &&
             usesObjCAllocator) {
    // When the class requires all stored properties to have initial
    // values and we're using Objective-C's allocation, stored
    // properties are initialized via the .cxx_construct method, which
    // will be called by the runtime.

    // Note that 'self' has been fully initialized at this point.
  } else {
    // Emit the member initializers.
    emitMemberInitializers(ctor, selfDecl, selfClassDecl);
  }

  emitProfilerIncrement(ctor->getBody());
  // Emit the constructor body.
  emitStmt(ctor->getBody());

  // Emit the call to super.init() right before exiting from the initializer.
  if (NeedsBoxForSelf) {
    if (auto *SI = ctor->getSuperInitCall()) {
      B.setInsertionPoint(ReturnDest.getBlock());

      emitRValue(SI);

      B.emitBlock(B.splitBlockForFallthrough(), ctor);

      ReturnDest = JumpDest(B.getInsertionBB(),
                            ReturnDest.getDepth(),
                            ReturnDest.getCleanupLocation());
      B.clearInsertionPoint();
    }
  }

  CleanupStateRestorationScope SelfCleanupSave(Cleanups);

  // Build a custom epilog block, since the AST representation of the
  // constructor decl (which has no self in the return type) doesn't match the
  // SIL representation.
  {
    // Ensure that before we add additional cleanups, that we have emitted all
    // cleanups at this point.
    assert(!Cleanups.hasAnyActiveCleanups(getCleanupsDepth(),
                                          ReturnDest.getDepth()) &&
           "emitting epilog in wrong scope");

    SILGenSavedInsertionPoint savedIP(*this, ReturnDest.getBlock());
    auto cleanupLoc = CleanupLocation(ctor);

    // If we're using a box for self, reload the value at the end of the init
    // method.
    if (NeedsBoxForSelf) {
      ManagedValue storedSelf =
          ManagedValue::forUnmanaged(VarLocs[selfDecl].value);
      selfArg = B.createLoadCopy(cleanupLoc, storedSelf);
    } else {
      // We have to do a retain because we are returning the pointer +1.
      //
      // SEMANTIC ARC TODO: When the verifier is complete, we will need to
      // change this to selfArg = B.emitCopyValueOperation(...). Currently due
      // to the way that SILGen performs folding of copy_value, destroy_value,
      // the returned selfArg may be deleted causing us to have a
      // dead-pointer. Instead just use the old self value since we have a
      // class.
      selfArg = B.createCopyValue(cleanupLoc, selfArg);
    }

    // Inject the self value into an optional if the constructor is failable.
    if (ctor->getFailability() != OTK_None) {
      RegularLocation loc(ctor);
      loc.markAutoGenerated();
      selfArg = B.createEnum(loc, selfArg,
                             getASTContext().getOptionalSomeDecl(),
                             getLoweredLoadableType(resultType));
    }

    // Save our cleanup state. We want all other potential cleanups to fire, but
    // not this one.
    if (selfArg.hasCleanup())
      SelfCleanupSave.pushCleanupState(selfArg.getCleanup(),
                                       CleanupState::Dormant);

    // Translate our cleanup to the new top cleanup.
    //
    // This is needed to preserve the invariant in getEpilogBB that when
    // cleanups are emitted, everything above ReturnDest.getDepth() has been
    // emitted. This is not true if we use ManagedValue and friends in the
    // epilogBB, thus the translation. We perform the same check above that
    // getEpilogBB performs to ensure that we still do not have the same
    // problem.
    ReturnDest = std::move(ReturnDest).translate(getTopCleanup());
  }
  
  // Emit the epilog and post-matter.
  auto returnLoc = emitEpilog(ctor, /*UsesCustomEpilog*/true);

  // Unpop our selfArg cleanup, so we can forward.
  std::move(SelfCleanupSave).pop();

  // Finish off the epilog by returning.  If this is a failable ctor, then we
  // actually jump to the failure epilog to keep the invariant that there is
  // only one SIL return instruction per SIL function.
  if (B.hasValidInsertionPoint()) {
    if (failureExitBB)
      B.createBranch(returnLoc, failureExitBB, selfArg.forward(*this));
    else
      B.createReturn(returnLoc, selfArg.forward(*this));
  }
}
/// \brief Inlines all mandatory inlined functions into the body of a function,
/// first recursively inlining all mandatory apply instructions in those
/// functions into their bodies if necessary.
///
/// \param F the function to be processed
/// \param AI nullptr if this is being called from the top level; the relevant
///   ApplyInst requiring the recursive call when non-null
/// \param FullyInlinedSet the set of all functions already known to be fully
///   processed, to avoid processing them over again
/// \param SetFactory an instance of ImmutableFunctionSet::Factory
/// \param CurrentInliningSet the set of functions currently being inlined in
///   the current call stack of recursive calls
///
/// \returns true if successful, false if failed due to circular inlining.
static bool
runOnFunctionRecursively(SILFunction *F, FullApplySite AI,
                         SILModule::LinkingMode Mode,
                         DenseFunctionSet &FullyInlinedSet,
                         ImmutableFunctionSet::Factory &SetFactory,
                         ImmutableFunctionSet CurrentInliningSet,
                         ClassHierarchyAnalysis *CHA) {
  // Avoid reprocessing functions needlessly.
  if (FullyInlinedSet.count(F))
    return true;

  // Prevent attempt to circularly inline.
  if (CurrentInliningSet.contains(F)) {
    // This cannot happen on a top-level call, so AI should be non-null.
    assert(AI && "Cannot have circular inline without apply");
    SILLocation L = AI.getLoc();
    assert(L && "Must have location for transparent inline apply");
    diagnose(F->getModule().getASTContext(), L.getStartSourceLoc(),
             diag::circular_transparent);
    return false;
  }

  // Add to the current inlining set (immutably, so we only affect the set
  // during this call and recursive subcalls).
  CurrentInliningSet = SetFactory.add(CurrentInliningSet, F);

  SmallVector<SILValue, 16> CaptureArgs;
  SmallVector<SILValue, 32> FullArgs;
  for (auto FI = F->begin(), FE = F->end(); FI != FE; ++FI) {
    for (auto I = FI->begin(), E = FI->end(); I != E; ++I) {
      FullApplySite InnerAI = FullApplySite::isa(&*I);

      if (!InnerAI)
        continue;

      auto *ApplyBlock = InnerAI.getParent();

      auto NewInstPair = tryDevirtualizeApply(InnerAI, CHA);
      if (auto *NewInst = NewInstPair.first) {
        replaceDeadApply(InnerAI, NewInst);
        if (auto *II = dyn_cast<SILInstruction>(NewInst))
          I = II->getIterator();
        else
          I = NewInst->getParentBB()->begin();
        auto NewAI = FullApplySite::isa(NewInstPair.second.getInstruction());
        if (!NewAI)
          continue;

        InnerAI = NewAI;
      }

      SILLocation Loc = InnerAI.getLoc();
      SILValue CalleeValue = InnerAI.getCallee();
      bool IsThick;
      PartialApplyInst *PAI;
      SILFunction *CalleeFunction = getCalleeFunction(InnerAI, IsThick,
                                                      CaptureArgs, FullArgs,
                                                      PAI,
                                                      Mode);
      if (!CalleeFunction ||
          CalleeFunction->isTransparent() == IsNotTransparent)
        continue;

      // Then recursively process it first before trying to inline it.
      if (!runOnFunctionRecursively(CalleeFunction, InnerAI, Mode,
                                    FullyInlinedSet, SetFactory,
                                    CurrentInliningSet, CHA)) {
        // If we failed due to circular inlining, then emit some notes to
        // trace back the failure if we have more information.
        // FIXME: possibly it could be worth recovering and attempting other
        // inlines within this same recursive call rather than simply
        // propagating the failure.
        if (AI) {
          SILLocation L = AI.getLoc();
          assert(L && "Must have location for transparent inline apply");
          diagnose(F->getModule().getASTContext(), L.getStartSourceLoc(),
                   diag::note_while_inlining);
        }
        return false;
      }

      // Inline function at I, which also changes I to refer to the first
      // instruction inlined in the case that it succeeds. We purposely
      // process the inlined body after inlining, because the inlining may
      // have exposed new inlining opportunities beyond those present in
      // the inlined function when processed independently.
      DEBUG(llvm::errs() << "Inlining @" << CalleeFunction->getName()
                         << " into @" << InnerAI.getFunction()->getName()
                         << "\n");

      // Decrement our iterator (carefully, to avoid going off the front) so it
      // is valid after inlining is done.  Inlining deletes the apply, and can
      // introduce multiple new basic blocks.
      if (I != ApplyBlock->begin())
        --I;
      else
        I = ApplyBlock->end();

      TypeSubstitutionMap ContextSubs;
      std::vector<Substitution> ApplySubs(InnerAI.getSubstitutions());

      if (PAI) {
        auto PAISubs = PAI->getSubstitutions();
        ApplySubs.insert(ApplySubs.end(), PAISubs.begin(), PAISubs.end());
      }

      ContextSubs.copyFrom(CalleeFunction->getContextGenericParams()
                                         ->getSubstitutionMap(ApplySubs));

      SILInliner Inliner(*F, *CalleeFunction,
                         SILInliner::InlineKind::MandatoryInline,
                         ContextSubs, ApplySubs);
      if (!Inliner.inlineFunction(InnerAI, FullArgs)) {
        I = InnerAI.getInstruction()->getIterator();
        continue;
      }

      // Inlining was successful. Remove the apply.
      InnerAI.getInstruction()->eraseFromParent();

      // Reestablish our iterator if it wrapped.
      if (I == ApplyBlock->end())
        I = ApplyBlock->begin();
      else
        ++I;

      // If the inlined apply was a thick function, then we need to balance the
      // reference counts for correctness.
      if (IsThick)
        fixupReferenceCounts(I, Loc, CalleeValue, CaptureArgs);

      // Now that the IR is correct, see if we can remove dead callee
      // computations (e.g. dead partial_apply closures).
      cleanupCalleeValue(CalleeValue, CaptureArgs, FullArgs);

      // Reposition iterators possibly invalidated by mutation.
      FI = SILFunction::iterator(ApplyBlock);
      I = ApplyBlock->begin();
      E = ApplyBlock->end();
      ++NumMandatoryInlines;
    }
  }

  // Keep track of full inlined functions so we don't waste time recursively
  // reprocessing them.
  FullyInlinedSet.insert(F);
  return true;
}
SILFunction *MaterializeForSetEmitter::createCallback(SILFunction &F, GeneratorFn generator) {
  auto &ctx = SGM.getASTContext();
 
  // Mangle this as if it were a conformance thunk for a closure
  // within the witness.
  std::string name;
  {
    ClosureExpr closure(/*patterns*/ nullptr,
                        /*throws*/ SourceLoc(),
                        /*arrow*/ SourceLoc(),
                        /*in*/ SourceLoc(),
                        /*result*/ TypeLoc(),
                        /*discriminator*/ 0,
                        /*context*/ Witness);
    closure.setType(getMaterializeForSetCallbackType(ctx,
                                 getSelfTypeForCallbackDeclaration(Witness)));
    closure.getCaptureInfo().setGenericParamCaptures(true);

    Mangle::Mangler mangler;
    if (Conformance) {
      mangler.append("_TTW");
      mangler.mangleProtocolConformance(Conformance);
    } else {
      mangler.append("_T");
    }
    mangler.mangleClosureEntity(&closure, /*uncurryLevel=*/1);
    name = mangler.finalize();
  }

  // Get lowered formal types for callback parameters.
  Type selfType = SelfInterfaceType;
  Type selfMetatypeType = MetatypeType::get(SelfInterfaceType,
                                            MetatypeRepresentation::Thick);

  {
    GenericContextScope scope(SGM.Types, GenericSig);

    // If 'self' is a metatype, make it @thin or @thick as needed, but not inside
    // selfMetatypeType.
    if (auto metatype = selfType->getAs<MetatypeType>()) {
      if (!metatype->hasRepresentation())
        selfType = SGM.getLoweredType(metatype).getSwiftRValueType();
    }
  }

  // Create the SILFunctionType for the callback.
  SILParameterInfo params[] = {
    { ctx.TheRawPointerType, ParameterConvention::Direct_Unowned },
    { ctx.TheUnsafeValueBufferType, ParameterConvention::Indirect_Inout },
    { selfType->getCanonicalType(), ParameterConvention::Indirect_Inout },
    { selfMetatypeType->getCanonicalType(), ParameterConvention::Direct_Unowned },
  };
  SILResultInfo result = {
    TupleType::getEmpty(ctx), ResultConvention::Unowned
  };
  auto extInfo = 
    SILFunctionType::ExtInfo()
      .withRepresentation(SILFunctionTypeRepresentation::Thin);

  auto callbackType = SILFunctionType::get(GenericSig, extInfo,
                                /*callee*/ ParameterConvention::Direct_Unowned,
                                           params, result, None, ctx);
  auto callback =
    SGM.M.getOrCreateFunction(Witness, name, Linkage, callbackType,
                              IsBare,
                              F.isTransparent(),
                              F.isFragile());

  callback->setContextGenericParams(GenericParams);
  callback->setDebugScope(new (SGM.M) SILDebugScope(Witness, *callback));

  PrettyStackTraceSILFunction X("silgen materializeForSet callback", callback);
  {
    SILGenFunction gen(SGM, *callback);

    auto makeParam = [&](unsigned index) -> SILArgument* {
      SILType type = gen.F.mapTypeIntoContext(params[index].getSILType());
      return new (SGM.M) SILArgument(gen.F.begin(), type);
    };

    // Add arguments for all the parameters.
    auto valueBuffer = makeParam(0);
    auto storageBuffer = makeParam(1);
    auto self = makeParam(2);
    (void) makeParam(3);

    SILLocation loc = Witness;
    loc.markAutoGenerated();

    // Call the generator function we were provided.
    {
      LexicalScope scope(gen.Cleanups, gen, CleanupLocation::get(loc));
      generator(gen, loc, valueBuffer, storageBuffer, self);
    }

    // Return void.
    auto result = gen.emitEmptyTuple(loc);
    gen.B.createReturn(loc, result);
  }

  callback->verify();
  return callback;
}
Exemple #27
0
/// tryToRemoveDeadAllocation - If the allocation is an autogenerated allocation
/// that is only stored to (after load promotion) then remove it completely.
bool AllocOptimize::tryToRemoveDeadAllocation() {
  assert((isa<AllocBoxInst>(TheMemory) || isa<AllocStackInst>(TheMemory)) &&
         "Unhandled allocation case");

  // We don't want to remove allocations that are required for useful debug
  // information at -O0.  As such, we only remove allocations if:
  //
  // 1. They are in a transparent function.
  // 2. They are in a normal function, but didn't come from a VarDecl, or came
  //    from one that was autogenerated or inlined from a transparent function.
  SILLocation Loc = TheMemory->getLoc();
  if (!TheMemory->getFunction()->isTransparent() &&
      Loc.getAsASTNode<VarDecl>() && !Loc.isAutoGenerated() &&
      !Loc.is<MandatoryInlinedLocation>())
    return false;

  // Check the uses list to see if there are any non-store uses left over after
  // load promotion and other things DI does.
  for (auto &U : Uses) {
    // Ignore removed instructions.
    if (U.Inst == nullptr) continue;

    switch (U.Kind) {
    case DIUseKind::SelfInit:
    case DIUseKind::SuperInit:
      llvm_unreachable("Can't happen on allocations");
    case DIUseKind::Assign:
    case DIUseKind::PartialStore:
    case DIUseKind::InitOrAssign:
      break;    // These don't prevent removal.
    case DIUseKind::Initialization:
      if (!isa<ApplyInst>(U.Inst) &&
          // A copy_addr that is not a take affects the retain count
          // of the source.
          (!isa<CopyAddrInst>(U.Inst) ||
           cast<CopyAddrInst>(U.Inst)->isTakeOfSrc()))
        break;
      // FALL THROUGH.
     SWIFT_FALLTHROUGH;
    case DIUseKind::Load:
    case DIUseKind::IndirectIn:
    case DIUseKind::InOutUse:
    case DIUseKind::Escape:
      DEBUG(llvm::dbgs() << "*** Failed to remove autogenerated alloc: "
            "kept alive by: " << *U.Inst);
      return false;   // These do prevent removal.
    }
  }

  // If the memory object has non-trivial type, then removing the deallocation
  // will drop any releases.  Check that there is nothing preventing removal.
  if (!MemoryType.isTrivial(Module)) {
    for (auto *R : Releases) {
      if (R == nullptr || isa<DeallocStackInst>(R) || isa<DeallocBoxInst>(R))
        continue;

      DEBUG(llvm::dbgs() << "*** Failed to remove autogenerated alloc: "
            "kept alive by release: " << *R);
      return false;
    }
  }

  DEBUG(llvm::dbgs() << "*** Removing autogenerated alloc_stack: "<<*TheMemory);

  // If it is safe to remove, do it.  Recursively remove all instructions
  // hanging off the allocation instruction, then return success.  Let the
  // caller remove the allocation itself to avoid iterator invalidation.
  eraseUsesOfInstruction(TheMemory);

  return true;
}
SILFunction *MaterializeForSetEmitter::createCallback(SILFunction &F,
                                                      GeneratorFn generator) {
  auto callbackType =
      SGM.Types.getMaterializeForSetCallbackType(WitnessStorage,
                                                 GenericSig,
                                                 SelfInterfaceType,
                                                 CallbackRepresentation);

  auto *genericEnv = GenericEnv;
  if (GenericEnv && GenericEnv->getGenericSignature()->areAllParamsConcrete())
    genericEnv = nullptr;

  // The callback's symbol is irrelevant (it is just returned as a value from
  // the actual materializeForSet function), and so we only need to make sure we
  // don't break things in cases when there may be multiple definitions.
  auto callbackLinkage =
      F.isSerialized() ? SILLinkage::Shared : SILLinkage::Private;

  auto callback = SGM.M.createFunction(
      callbackLinkage, CallbackName, callbackType, genericEnv,
      SILLocation(Witness), IsBare, F.isTransparent(), F.isSerialized(), IsNotThunk,
      SubclassScope::NotApplicable,
      /*inlineStrategy=*/InlineDefault,
      /*EK=*/EffectsKind::Unspecified,
      /*InsertBefore=*/&F);

  callback->setDebugScope(new (SGM.M) SILDebugScope(Witness, callback));

  PrettyStackTraceSILFunction X("silgen materializeForSet callback", callback);
  {
    SILGenFunction SGF(SGM, *callback);

    auto makeParam = [&](unsigned index) -> SILArgument * {
      SILType type = SGF.F.mapTypeIntoContext(
          SGF.getSILType(callbackType->getParameters()[index]));
      return SGF.F.begin()->createFunctionArgument(type);
    };

    // Add arguments for all the parameters.
    auto valueBuffer = makeParam(0);
    auto storageBuffer = makeParam(1);
    auto self = makeParam(2);
    (void) makeParam(3);

    SILLocation loc = Witness;
    loc.markAutoGenerated();

    // Call the generator function we were provided.
    {
      LexicalScope scope(SGF, CleanupLocation::get(loc));
      generator(SGF, loc, valueBuffer, storageBuffer, self);
    }

    // Return void.
    auto result = SGF.emitEmptyTuple(loc);
    SGF.B.createReturn(loc, result);
  }

  callback->verify();
  return callback;
}
/// Inlines all mandatory inlined functions into the body of a function,
/// first recursively inlining all mandatory apply instructions in those
/// functions into their bodies if necessary.
///
/// \param F the function to be processed
/// \param AI nullptr if this is being called from the top level; the relevant
///   ApplyInst requiring the recursive call when non-null
/// \param FullyInlinedSet the set of all functions already known to be fully
///   processed, to avoid processing them over again
/// \param SetFactory an instance of ImmutableFunctionSet::Factory
/// \param CurrentInliningSet the set of functions currently being inlined in
///   the current call stack of recursive calls
///
/// \returns true if successful, false if failed due to circular inlining.
static bool
runOnFunctionRecursively(SILOptFunctionBuilder &FuncBuilder,
			 SILFunction *F, FullApplySite AI,
                         DenseFunctionSet &FullyInlinedSet,
                         ImmutableFunctionSet::Factory &SetFactory,
                         ImmutableFunctionSet CurrentInliningSet,
                         ClassHierarchyAnalysis *CHA) {
  // Avoid reprocessing functions needlessly.
  if (FullyInlinedSet.count(F))
    return true;

  // Prevent attempt to circularly inline.
  if (CurrentInliningSet.contains(F)) {
    // This cannot happen on a top-level call, so AI should be non-null.
    assert(AI && "Cannot have circular inline without apply");
    SILLocation L = AI.getLoc();
    assert(L && "Must have location for transparent inline apply");
    diagnose(F->getModule().getASTContext(), L.getStartSourceLoc(),
             diag::circular_transparent);
    return false;
  }

  // Add to the current inlining set (immutably, so we only affect the set
  // during this call and recursive subcalls).
  CurrentInliningSet = SetFactory.add(CurrentInliningSet, F);

  SmallVector<std::pair<SILValue, ParameterConvention>, 16> CaptureArgs;
  SmallVector<SILValue, 32> FullArgs;

  // Visiting blocks in reverse order avoids revisiting instructions after block
  // splitting, which would be quadratic.
  for (auto BI = F->rbegin(), BE = F->rend(), nextBB = BI; BI != BE;
       BI = nextBB) {
    // After inlining, the block iterator will be adjusted to point to the last
    // block containing inlined instructions. This way, the inlined function
    // body will be reprocessed within the caller's context without revisiting
    // any original instructions.
    nextBB = std::next(BI);

    // While iterating over this block, instructions are inserted and deleted.
    // To avoid quadratic block splitting, instructions must be processed in
    // reverse order (block splitting reassigned the parent pointer of all
    // instructions below the split point).
    for (auto II = BI->rbegin(); II != BI->rend(); ++II) {
      FullApplySite InnerAI = FullApplySite::isa(&*II);
      if (!InnerAI)
        continue;

      // *NOTE* If devirtualization succeeds, devirtInst may not be InnerAI,
      // but a casted result of InnerAI or even a block argument due to
      // abstraction changes when calling the witness or class method.
      auto *devirtInst = tryDevirtualizeApplyHelper(InnerAI, CHA);
      // Restore II to the current apply site.
      II = devirtInst->getReverseIterator();
      // If the devirtualized call result is no longer a invalid FullApplySite,
      // then it has succeeded, but the result is not immediately inlinable.
      InnerAI = FullApplySite::isa(devirtInst);
      if (!InnerAI)
        continue;

      SILValue CalleeValue = InnerAI.getCallee();
      bool IsThick;
      PartialApplyInst *PAI;
      SILFunction *CalleeFunction = getCalleeFunction(
          F, InnerAI, IsThick, CaptureArgs, FullArgs, PAI);

      if (!CalleeFunction)
        continue;

      // Then recursively process it first before trying to inline it.
      if (!runOnFunctionRecursively(FuncBuilder, CalleeFunction, InnerAI,
                                    FullyInlinedSet, SetFactory,
                                    CurrentInliningSet, CHA)) {
        // If we failed due to circular inlining, then emit some notes to
        // trace back the failure if we have more information.
        // FIXME: possibly it could be worth recovering and attempting other
        // inlines within this same recursive call rather than simply
        // propagating the failure.
        if (AI) {
          SILLocation L = AI.getLoc();
          assert(L && "Must have location for transparent inline apply");
          diagnose(F->getModule().getASTContext(), L.getStartSourceLoc(),
                   diag::note_while_inlining);
        }
        return false;
      }

      // Get our list of substitutions.
      auto Subs = (PAI
                   ? PAI->getSubstitutionMap()
                   : InnerAI.getSubstitutionMap());

      SILOpenedArchetypesTracker OpenedArchetypesTracker(F);
      F->getModule().registerDeleteNotificationHandler(
          &OpenedArchetypesTracker);
      // The callee only needs to know about opened archetypes used in
      // the substitution list.
      OpenedArchetypesTracker.registerUsedOpenedArchetypes(
          InnerAI.getInstruction());
      if (PAI) {
        OpenedArchetypesTracker.registerUsedOpenedArchetypes(PAI);
      }

      SILInliner Inliner(FuncBuilder, SILInliner::InlineKind::MandatoryInline,
                         Subs, OpenedArchetypesTracker);
      if (!Inliner.canInlineApplySite(InnerAI))
        continue;

      // Inline function at I, which also changes I to refer to the first
      // instruction inlined in the case that it succeeds. We purposely
      // process the inlined body after inlining, because the inlining may
      // have exposed new inlining opportunities beyond those present in
      // the inlined function when processed independently.
      LLVM_DEBUG(llvm::errs() << "Inlining @" << CalleeFunction->getName()
                              << " into @" << InnerAI.getFunction()->getName()
                              << "\n");

      // If we intend to inline a thick function, then we need to balance the
      // reference counts for correctness.
      if (IsThick) {
        bool IsCalleeGuaranteed =
            PAI &&
            PAI->getType().castTo<SILFunctionType>()->isCalleeGuaranteed();
        fixupReferenceCounts(InnerAI.getInstruction(), CalleeValue, CaptureArgs,
                             IsCalleeGuaranteed);
      }

      // Register a callback to record potentially unused function values after
      // inlining.
      ClosureCleanup closureCleanup;
      Inliner.setDeletionCallback([&closureCleanup](SILInstruction *I) {
        closureCleanup.recordDeadFunction(I);
      });

      // Inlining deletes the apply, and can introduce multiple new basic
      // blocks. After this, CalleeValue and other instructions may be invalid.
      // nextBB will point to the last inlined block
      auto firstInlinedInstAndLastBB =
          Inliner.inlineFunction(CalleeFunction, InnerAI, FullArgs);
      nextBB = firstInlinedInstAndLastBB.second->getReverseIterator();
      ++NumMandatoryInlines;

      // The IR is now valid, and trivial dead arguments are removed. However,
      // we may be able to remove dead callee computations (e.g. dead
      // partial_apply closures).
      closureCleanup.cleanupDeadClosures(F);

      // Resume inlining within nextBB, which contains only the inlined
      // instructions and possibly instructions in the original call block that
      // have not yet been visited.
      break;
    }
  }
  // Keep track of full inlined functions so we don't waste time recursively
  // reprocessing them.
  FullyInlinedSet.insert(F);
  return true;
}
void MaterializeForSetEmitter::emit(SILGenFunction &gen) {
  SILLocation loc = Witness;
  loc.markAutoGenerated();

  gen.F.setBare(IsBare);

  SmallVector<ManagedValue, 4> params;
  gen.collectThunkParams(loc, params, /*allowPlusZero*/ true);

  ManagedValue self = params.back();
  SILValue resultBuffer = params[0].getUnmanagedValue();
  SILValue callbackBuffer = params[1].getUnmanagedValue();
  auto indices = ArrayRef<ManagedValue>(params).slice(2).drop_back();

  // If there's an abstraction difference, we always need to use the
  // get/set pattern.
  AccessStrategy strategy;
  if (WitnessStorage->getType()->is<ReferenceStorageType>() ||
      (RequirementStorageType != WitnessStorageType)) {
    strategy = AccessStrategy::DispatchToAccessor;
  } else {
    strategy = WitnessStorage->getAccessStrategy(TheAccessSemantics,
                                                 AccessKind::ReadWrite);
  }

  // Handle the indices.
  RValue indicesRV;
  if (isa<SubscriptDecl>(WitnessStorage)) {
    indicesRV = collectIndicesFromParameters(gen, loc, indices);
  } else {
    assert(indices.empty() && "indices for a non-subscript?");
  }

  // As above, assume that we don't need to reabstract 'self'.

  // Choose the right implementation.
  SILValue address;
  SILFunction *callbackFn = nullptr;
  switch (strategy) {
  case AccessStrategy::BehaviorStorage:
    llvm_unreachable("materializeForSet should never engage in behavior init");
  
  case AccessStrategy::Storage:
    address = emitUsingStorage(gen, loc, self, std::move(indicesRV));
    break;

  case AccessStrategy::Addressor:
    address = emitUsingAddressor(gen, loc, self, std::move(indicesRV),
                                 callbackBuffer, callbackFn);
    break;

  case AccessStrategy::DirectToAccessor:
  case AccessStrategy::DispatchToAccessor:
    address = emitUsingGetterSetter(gen, loc, self, std::move(indicesRV),
                                    resultBuffer, callbackBuffer, callbackFn);
    break;
  }

  // Return the address as a Builtin.RawPointer.
  SILType rawPointerTy = SILType::getRawPointerType(gen.getASTContext());
  address = gen.B.createAddressToPointer(loc, address, rawPointerTy);

  SILType resultTupleTy = gen.F.mapTypeIntoContext(
                 gen.F.getLoweredFunctionType()->getSILResult());
  SILType optCallbackTy = resultTupleTy.getTupleElementType(1);

  // Form the callback.
  SILValue callback;
  if (callbackFn) {
    // Make a reference to the callback.
    callback = gen.B.createFunctionRef(loc, callbackFn);
    callback = gen.B.createThinFunctionToPointer(loc, callback, rawPointerTy);
    callback = gen.B.createOptionalSome(loc, callback, optCallbackTy);
  } else {
    // There is no callback.
    callback = gen.B.createOptionalNone(loc, optCallbackTy);
  }

  // Form the result and return.
  auto result = gen.B.createTuple(loc, resultTupleTy, { address, callback });
  gen.Cleanups.emitCleanupsForReturn(CleanupLocation::get(loc));
  gen.B.createReturn(loc, result);
}