Esempio n. 1
0
/// \brief Issue an "unreachable code" diagnostic if the blocks contains or
/// leads to another block that contains user code.
///
/// Note, we rely on SILLocation information to determine if SILInstructions
/// correspond to user code.
static bool diagnoseUnreachableBlock(const SILBasicBlock &B,
                                     SILModule &M,
                                     const SILBasicBlockSet &Reachable,
                                     UnreachableUserCodeReportingState *State,
                                     const SILBasicBlock *TopLevelB,
                         llvm::SmallPtrSetImpl<const SILBasicBlock*> &Visited){
  if (Visited.count(&B))
    return false;
  Visited.insert(&B);
  
  assert(State);
  for (auto I = B.begin(), E = B.end(); I != E; ++I) {
    SILLocation Loc = I->getLoc();
    // If we've reached an implicit return, we have not found any user code and
    // can stop searching for it.
    if (Loc.is<ImplicitReturnLocation>() || Loc.isAutoGenerated())
      return false;

    // Check if the instruction corresponds to user-written code, also make
    // sure we don't report an error twice for the same instruction.
    if (isUserCode(&*I) && !State->BlocksWithErrors.count(&B)) {

      // Emit the diagnostic.
      auto BrInfoIter = State->MetaMap.find(TopLevelB);
      assert(BrInfoIter != State->MetaMap.end());
      auto BrInfo = BrInfoIter->second;

      switch (BrInfo.Kind) {
      case (UnreachableKind::FoldedBranch):
        // Emit the diagnostic on the unreachable block and emit the
        // note on the branch responsible for the unreachable code.
        diagnose(M.getASTContext(), Loc.getSourceLoc(), diag::unreachable_code);
        diagnose(M.getASTContext(), BrInfo.Loc.getSourceLoc(),
                 diag::unreachable_code_branch, BrInfo.CondIsAlwaysTrue);
        break;

      case (UnreachableKind::FoldedSwitchEnum): {
        // If we are warning about a switch condition being a constant, the main
        // emphasis should be on the condition (to ensure we have a single
        // message per switch).
        const SwitchStmt *SS = BrInfo.Loc.getAsASTNode<SwitchStmt>();
        if (!SS)
          break;
        assert(SS);
        const Expr *SE = SS->getSubjectExpr();
        diagnose(M.getASTContext(), SE->getLoc(), diag::switch_on_a_constant);
        diagnose(M.getASTContext(), Loc.getSourceLoc(),
                 diag::unreachable_code_note);
        break;
      }

      case (UnreachableKind::NoreturnCall): {
        // Specialcase when we are warning about unreachable code after a call
        // to a noreturn function.
        if (!BrInfo.Loc.isASTNode<ExplicitCastExpr>()) {
          assert(BrInfo.Loc.isASTNode<ApplyExpr>());
          diagnose(M.getASTContext(), Loc.getSourceLoc(),
                   diag::unreachable_code);
          diagnose(M.getASTContext(), BrInfo.Loc.getSourceLoc(),
                   diag::call_to_noreturn_note);
        }
        break;
      }
      }

      // Record that we've reported this unreachable block to avoid duplicates
      // in the future.
      State->BlocksWithErrors.insert(&B);
      return true;
    }
  }

  // This block could be empty if it's terminator has been folded.
  if (B.empty())
    return false;

  // If we have not found user code in this block, inspect it's successors.
  // Check if at least one of the successors contains user code.
  for (auto I = B.succ_begin(), E = B.succ_end(); I != E; ++I) {
    SILBasicBlock *SB = *I;
    bool HasReachablePred = false;
    for (auto PI = SB->pred_begin(), PE = SB->pred_end(); PI != PE; ++PI) {
      if (Reachable.count(*PI))
        HasReachablePred = true;
    }

    // If all of the predecessors of this successor are unreachable, check if
    // it contains user code.
    if (!HasReachablePred && diagnoseUnreachableBlock(*SB, M, Reachable,
                                                    State, TopLevelB, Visited))
      return true;
  }
  
  return false;
}
/// Insert monomorphic inline caches for a specific class or metatype
/// type \p SubClassTy.
static FullApplySite speculateMonomorphicTarget(FullApplySite AI,
                                                SILType SubType,
                                                CheckedCastBranchInst *&CCBI) {
  CCBI = nullptr;
  // Bail if this class_method cannot be devirtualized.
  if (!canDevirtualizeClassMethod(AI, SubType))
    return FullApplySite();

  if (SubType.getSwiftRValueType()->hasDynamicSelfType())
    return FullApplySite();

  // Create a diamond shaped control flow and a checked_cast_branch
  // instruction that checks the exact type of the object.
  // This cast selects between two paths: one that calls the slow dynamic
  // dispatch and one that calls the specific method.
  auto It = AI.getInstruction()->getIterator();
  SILFunction *F = AI.getFunction();
  SILBasicBlock *Entry = AI.getParent();

  // Iden is the basic block containing the direct call.
  SILBasicBlock *Iden = F->createBasicBlock();
  // Virt is the block containing the slow virtual call.
  SILBasicBlock *Virt = F->createBasicBlock();
  Iden->createPHIArgument(SubType, ValueOwnershipKind::Owned);

  SILBasicBlock *Continue = Entry->split(It);

  SILBuilderWithScope Builder(Entry, AI.getInstruction());
  // Create the checked_cast_branch instruction that checks at runtime if the
  // class instance is identical to the SILType.

  ClassMethodInst *CMI = cast<ClassMethodInst>(AI.getCallee());

  CCBI = Builder.createCheckedCastBranch(AI.getLoc(), /*exact*/ true,
                                       CMI->getOperand(), SubType, Iden,
                                       Virt);
  It = CCBI->getIterator();

  SILBuilderWithScope VirtBuilder(Virt, AI.getInstruction());
  SILBuilderWithScope IdenBuilder(Iden, AI.getInstruction());
  // This is the class reference downcasted into subclass SubType.
  SILValue DownCastedClassInstance = Iden->getArgument(0);

  // Copy the two apply instructions into the two blocks.
  FullApplySite IdenAI = CloneApply(AI, IdenBuilder);
  FullApplySite VirtAI = CloneApply(AI, VirtBuilder);

  // See if Continue has a release on self as the instruction right after the
  // apply. If it exists, move it into position in the diamond.
  SILBasicBlock::iterator next =
      next_or_end(Continue->begin(), Continue->end());
  auto *Release =
      (next == Continue->end()) ? nullptr : dyn_cast<StrongReleaseInst>(next);
  if (Release && Release->getOperand() == CMI->getOperand()) {
    VirtBuilder.createStrongRelease(Release->getLoc(), CMI->getOperand(),
                                    Release->getAtomicity());
    IdenBuilder.createStrongRelease(Release->getLoc(), DownCastedClassInstance,
                                    Release->getAtomicity());
    Release->eraseFromParent();
  }

  // Create a PHInode for returning the return value from both apply
  // instructions.
  SILArgument *Arg =
      Continue->createPHIArgument(AI.getType(), ValueOwnershipKind::Owned);
  if (!isa<TryApplyInst>(AI)) {
    if (AI.getSubstCalleeType()->isNoReturnFunction()) {
      IdenBuilder.createUnreachable(AI.getLoc());
      VirtBuilder.createUnreachable(AI.getLoc());
    } else {
      IdenBuilder.createBranch(AI.getLoc(), Continue,
                               { cast<ApplyInst>(IdenAI) });
      VirtBuilder.createBranch(AI.getLoc(), Continue,
                               { cast<ApplyInst>(VirtAI) });
    }
  }

  // Remove the old Apply instruction.
  assert(AI.getInstruction() == &Continue->front() &&
         "AI should be the first instruction in the split Continue block");
  if (isa<TryApplyInst>(AI)) {
    AI.getInstruction()->eraseFromParent();
    assert(Continue->empty() &&
           "There should not be an instruction after try_apply");
    Continue->eraseFromParent();
  } else {
    auto apply = cast<ApplyInst>(AI);
    apply->replaceAllUsesWith(Arg);
    apply->eraseFromParent();
    assert(!Continue->empty() &&
           "There should be at least a terminator after AI");
  }

  // Update the stats.
  NumTargetsPredicted++;

  // Devirtualize the apply instruction on the identical path.
  auto NewInstPair = devirtualizeClassMethod(IdenAI, DownCastedClassInstance);
  assert(NewInstPair.first && "Expected to be able to devirtualize apply!");
  replaceDeadApply(IdenAI, NewInstPair.first);

  // Split critical edges resulting from VirtAI.
  if (auto *TAI = dyn_cast<TryApplyInst>(VirtAI)) {
    auto *ErrorBB = TAI->getFunction()->createBasicBlock();
    ErrorBB->createPHIArgument(TAI->getErrorBB()->getArgument(0)->getType(),
                               ValueOwnershipKind::Owned);
    Builder.setInsertionPoint(ErrorBB);
    Builder.createBranch(TAI->getLoc(), TAI->getErrorBB(),
                         {ErrorBB->getArgument(0)});

    auto *NormalBB = TAI->getFunction()->createBasicBlock();
    NormalBB->createPHIArgument(TAI->getNormalBB()->getArgument(0)->getType(),
                                ValueOwnershipKind::Owned);
    Builder.setInsertionPoint(NormalBB);
    Builder.createBranch(TAI->getLoc(), TAI->getNormalBB(),
                         {NormalBB->getArgument(0)});

    Builder.setInsertionPoint(VirtAI.getInstruction());
    SmallVector<SILValue, 4> Args;
    for (auto Arg : VirtAI.getArguments()) {
      Args.push_back(Arg);
    }
    FullApplySite NewVirtAI = Builder.createTryApply(VirtAI.getLoc(), VirtAI.getCallee(),
        VirtAI.getSubstitutions(),
        Args, NormalBB, ErrorBB);
    VirtAI.getInstruction()->eraseFromParent();
    VirtAI = NewVirtAI;
  }

  return VirtAI;
}
Esempio n. 3
0
std::pair<Optional<SILValue>, SILLocation>
SILGenFunction::emitEpilogBB(SILLocation TopLevel) {
  assert(ReturnDest.getBlock() && "no epilog bb prepared?!");
  SILBasicBlock *epilogBB = ReturnDest.getBlock();
  SILLocation ImplicitReturnFromTopLevel =
    ImplicitReturnLocation::getImplicitReturnLoc(TopLevel);
  SmallVector<SILValue, 4> directResults;
  Optional<SILLocation> returnLoc = None;

  // If the current BB isn't terminated, and we require a return, then we
  // are not allowed to fall off the end of the function and can't reach here.
  if (NeedsReturn && B.hasValidInsertionPoint())
    B.createUnreachable(ImplicitReturnFromTopLevel);

  if (epilogBB->pred_empty()) {
    // If the epilog was not branched to at all, kill the BB and
    // just emit the epilog into the current BB.
    while (!epilogBB->empty())
      epilogBB->back().eraseFromParent();
    eraseBasicBlock(epilogBB);

    // If the current bb is terminated then the epilog is just unreachable.
    if (!B.hasValidInsertionPoint())
      return { None, TopLevel };

    // We emit the epilog at the current insertion point.
    returnLoc = ImplicitReturnFromTopLevel;

  } else if (std::next(epilogBB->pred_begin()) == epilogBB->pred_end()
             && !B.hasValidInsertionPoint()) {
    // If the epilog has a single predecessor and there's no current insertion
    // point to fall through from, then we can weld the epilog to that
    // predecessor BB.

    // Steal the branch argument as the return value if present.
    SILBasicBlock *pred = *epilogBB->pred_begin();
    BranchInst *predBranch = cast<BranchInst>(pred->getTerminator());
    assert(predBranch->getArgs().size() == epilogBB->bbarg_size() &&
           "epilog predecessor arguments does not match block params");

    for (auto index : indices(predBranch->getArgs())) {
      SILValue result = predBranch->getArgs()[index];
      directResults.push_back(result);
      epilogBB->getBBArg(index)->replaceAllUsesWith(result);
    }

    // If we are optimizing, we should use the return location from the single,
    // previously processed, return statement if any.
    if (predBranch->getLoc().is<ReturnLocation>()) {
      returnLoc = predBranch->getLoc();
    } else {
      returnLoc = ImplicitReturnFromTopLevel;
    }
    
    // Kill the branch to the now-dead epilog BB.
    pred->erase(predBranch);

    // Move any instructions from the EpilogBB to the end of the 'pred' block.
    pred->spliceAtEnd(epilogBB);

    // Finally we can erase the epilog BB.
    eraseBasicBlock(epilogBB);

    // Emit the epilog into its former predecessor.
    B.setInsertionPoint(pred);
  } else {
    // Move the epilog block to the end of the ordinary section.
    auto endOfOrdinarySection = StartOfPostmatter;
    B.moveBlockTo(epilogBB, endOfOrdinarySection);

    // Emit the epilog into the epilog bb. Its arguments are the
    // direct results.
    directResults.append(epilogBB->bbarg_begin(), epilogBB->bbarg_end());

    // If we are falling through from the current block, the return is implicit.
    B.emitBlock(epilogBB, ImplicitReturnFromTopLevel);
  }
  
  // Emit top-level cleanups into the epilog block.
  assert(!Cleanups.hasAnyActiveCleanups(getCleanupsDepth(),
                                        ReturnDest.getDepth()) &&
         "emitting epilog in wrong scope");

  auto cleanupLoc = CleanupLocation::get(TopLevel);
  Cleanups.emitCleanupsForReturn(cleanupLoc);

  // If the return location is known to be that of an already
  // processed return, use it. (This will get triggered when the
  // epilog logic is simplified.)
  //
  // Otherwise make the ret instruction part of the cleanups.
  if (!returnLoc) returnLoc = cleanupLoc;

  // Build the return value.  We don't do this if there are no direct
  // results; this can happen for void functions, but also happens when
  // prepareEpilog was asked to not add result arguments to the epilog
  // block.
  SILValue returnValue;
  if (!directResults.empty()) {
    assert(directResults.size()
             == F.getLoweredFunctionType()->getNumDirectResults());
    returnValue = buildReturnValue(*this, TopLevel, directResults);
  }

  return { returnValue, *returnLoc };
}
Esempio n. 4
0
std::pair<Optional<SILValue>, SILLocation>
SILGenFunction::emitEpilogBB(SILLocation TopLevel) {
  assert(ReturnDest.getBlock() && "no epilog bb prepared?!");
  SILBasicBlock *epilogBB = ReturnDest.getBlock();
  SILLocation ImplicitReturnFromTopLevel =
    ImplicitReturnLocation::getImplicitReturnLoc(TopLevel);
  SILValue returnValue;
  Optional<SILLocation> returnLoc = None;

  // If the current BB isn't terminated, and we require a return, then we
  // are not allowed to fall off the end of the function and can't reach here.
  if (NeedsReturn && B.hasValidInsertionPoint())
    B.createUnreachable(ImplicitReturnFromTopLevel);

  if (epilogBB->pred_empty()) {
    bool hadArg = !epilogBB->bbarg_empty();

    // If the epilog was not branched to at all, kill the BB and
    // just emit the epilog into the current BB.
    while (!epilogBB->empty())
      epilogBB->back().eraseFromParent();
    eraseBasicBlock(epilogBB);

    // If the current bb is terminated then the epilog is just unreachable.
    if (!B.hasValidInsertionPoint())
      return { None, TopLevel };
    // We emit the epilog at the current insertion point.
    assert(!hadArg && "NeedsReturn is false but epilog had argument?!");
    (void)hadArg;
    returnLoc = ImplicitReturnFromTopLevel;

  } else if (std::next(epilogBB->pred_begin()) == epilogBB->pred_end()
             && !B.hasValidInsertionPoint()) {
    // If the epilog has a single predecessor and there's no current insertion
    // point to fall through from, then we can weld the epilog to that
    // predecessor BB.
    bool needsArg = false;
    if (!epilogBB->bbarg_empty()) {
      assert(epilogBB->bbarg_size() == 1 && "epilog should take 0 or 1 args");
      needsArg = true;
    }

    // Steal the branch argument as the return value if present.
    SILBasicBlock *pred = *epilogBB->pred_begin();
    BranchInst *predBranch = cast<BranchInst>(pred->getTerminator());
    assert(predBranch->getArgs().size() == (needsArg ? 1 : 0) &&
           "epilog predecessor arguments does not match block params");

    if (needsArg) {
      returnValue = predBranch->getArgs()[0];
      // RAUW the old BB argument (if any) with the new value.
      SILValue(*epilogBB->bbarg_begin(),0).replaceAllUsesWith(returnValue);
    }

    // If we are optimizing, we should use the return location from the single,
    // previously processed, return statement if any.
    if (predBranch->getLoc().is<ReturnLocation>()) {
      returnLoc = predBranch->getLoc();
    } else {
      returnLoc = ImplicitReturnFromTopLevel;
    }
    
    // Kill the branch to the now-dead epilog BB.
    pred->erase(predBranch);

    // Move any instructions from the EpilogBB to the end of the 'pred' block.
    pred->spliceAtEnd(epilogBB);

    // Finally we can erase the epilog BB.
    eraseBasicBlock(epilogBB);

    // Emit the epilog into its former predecessor.
    B.setInsertionPoint(pred);
  } else {
    // Move the epilog block to the end of the ordinary section.
    auto endOfOrdinarySection =
      (StartOfPostmatter ? SILFunction::iterator(StartOfPostmatter) : F.end());
    B.moveBlockTo(epilogBB, endOfOrdinarySection);

    // Emit the epilog into the epilog bb. Its argument is the return value.
    if (!epilogBB->bbarg_empty()) {
      assert(epilogBB->bbarg_size() == 1 && "epilog should take 0 or 1 args");
      returnValue = epilogBB->bbarg_begin()[0];
    }

    // If we are falling through from the current block, the return is implicit.
    B.emitBlock(epilogBB, ImplicitReturnFromTopLevel);
  }
  
  // Emit top-level cleanups into the epilog block.
  assert(!Cleanups.hasAnyActiveCleanups(getCleanupsDepth(),
                                        ReturnDest.getDepth()) &&
         "emitting epilog in wrong scope");

  auto cleanupLoc = CleanupLocation::get(TopLevel);
  Cleanups.emitCleanupsForReturn(cleanupLoc);

  // If the return location is known to be that of an already
  // processed return, use it. (This will get triggered when the
  // epilog logic is simplified.)
  //
  // Otherwise make the ret instruction part of the cleanups.
  if (!returnLoc) returnLoc = cleanupLoc;

  return { returnValue, *returnLoc };
}