Exemplo n.º 1
0
/**********************************************************************
 * 
 * Method:      SerialPort()
 *
 * Description: Default constructor for the serial port class.
 *
 * Notes:    
 *
 * Returns:     None defined.
 *
 **********************************************************************/
SerialPort::SerialPort(int            port,
                       unsigned long  baudRate,
                       unsigned int   txQueueSize,
                       unsigned int   rxQueueSize)
{
    //
    // Initialize the logical device.
    //
    switch (port)
    {
      case UART0:
        channel = 0;
        break;

      default:
        channel = -1;
        break;
    }

    //
    // Create input and output FIFO's.
    //
    pTxQueue = new CircBuf(txQueueSize);
    pRxQueue = new CircBuf(rxQueueSize);

    // 
    // Initialize the hardware device.
    // 
    scc.reset(channel);
    scc.init(channel, baudRate, pTxQueue, pRxQueue);

}   /* SerialPort() */
Exemplo n.º 2
0
/// Use Tarjan's strongly connected components (SCC) algorithm to find
/// the SCCs in the call graph.
void BottomUpFunctionOrder::DFS(SILFunction *Start) {
  // Set the DFSNum for this node if we haven't already, and if we
  // have, which indicates it's already been visited, return.
  if (!DFSNum.insert(std::make_pair(Start, NextDFSNum)).second)
    return;

  assert(MinDFSNum.find(Start) == MinDFSNum.end() &&
         "Function should not already have a minimum DFS number!");

  MinDFSNum[Start] = NextDFSNum;
  ++NextDFSNum;

  DFSStack.insert(Start);

  // Visit all the instructions, looking for apply sites.
  for (auto &B : *Start) {
    for (auto &I : B) {
      auto FAS = FullApplySite::isa(&I);
      if (!FAS)
        continue;

      auto Callees = BCA->getCalleeList(FAS);
      for (auto *CalleeFn : Callees) {
        // If not yet visited, visit the callee.
        if (DFSNum.find(CalleeFn) == DFSNum.end()) {
          DFS(CalleeFn);
          MinDFSNum[Start] = std::min(MinDFSNum[Start], MinDFSNum[CalleeFn]);
        } else if (DFSStack.count(CalleeFn)) {
          // If the callee is on the stack, it update our minimum DFS
          // number based on it's DFS number.
          MinDFSNum[Start] = std::min(MinDFSNum[Start], DFSNum[CalleeFn]);
        }
      }
    }
  }

  // If our DFS number is the minimum found, we've found a
  // (potentially singleton) SCC, so pop the nodes off the stack and
  // push the new SCC on our stack of SCCs.
  if (DFSNum[Start] == MinDFSNum[Start]) {
    SCC CurrentSCC;

    SILFunction *Popped;
    do {
      Popped = DFSStack.pop_back_val();
      CurrentSCC.push_back(Popped);
    } while (Popped != Start);

    TheSCCs.push_back(CurrentSCC);
  }
}
void solve() {
    int ans = 0;
    memset(match,-1,sizeof(match));
    for(int i = 1; i <= n; i++) {
        memset(mk,false,sizeof(mk));
        if(true == find(i))
            ans ++;
    }

    townboy.init(m);
    memset(to,-1,sizeof(to));
    for(int i = 1; i <= m; i++) {
        if(-1 == match[i])
            continue;
        to[match[i]] = i;
    }

    for(int i = 1; i <= n; i++) {
        int size = G[i].size();
        if(-1 == to[i]) {
            for(int f = 0; f < size; f++) {
                int v = G[i][f];
                for(int g = 1; g <= m; g++)
                    if(v != g)
                        townboy.add(g,v);
            }
        }
        else {
            int size = G[i].size();
            for(int f = 0 ; f < size; f++) {
                int v = G[i][f];
                if(v == to[i])
                    continue;
                townboy.add(to[i],v);
            }
        }
    }
    for(int i = 1; i <= m; i++) {
        if(-1 != match[i])
            continue;
        for(int f = 1; f <= m; f++) {
            if(i == f)
                continue;
            townboy.add(i,f);
        }
    }
    townboy.find_scc();
}
Exemplo n.º 4
0
/**********************************************************************
 * 
 * Method:      getchar()
 *
 * Description: Read one character from the serial port.
 *
 * Notes:
 *
 * Returns:     The next character found on this input stream.
 *              -1 is returned in the case of an error.
 *
 **********************************************************************/
int
SerialPort::getchar(void)
{
    int  c;



    //
    // If the receive engine is stalled, restart it.
    //
    if (! pRxQueue->isFull())
    {
        scc.rxStart(channel);
    }

    if (pRxQueue->isEmpty())
    {
        return (-1);               // There is no input data available.
    }

    //
    // Read the next byte out of the receive FIFO.
    //
    c = pRxQueue->remove();

    return (c);

}   /* getchar() */
Exemplo n.º 5
0
int
SerialPort::getSizeBuffer()
{
    int     c;
	c = scc.get_Count(channel);
    return (c);

}   /* gets() */
Exemplo n.º 6
0
/**********************************************************************
 * 
 * Method:      putchar()
 *
 * Description: Write one character to the serial port.
 *
 * Notes:
 *
 * Returns:     The transmitted character is returned on success.  
 *              -1 is returned in the case of an error.
 *
 **********************************************************************/
char
SerialPort::putchar(char c)
{
    if (pTxQueue->isFull())
    {
        return (-1);
    }

    //
    // Add the character to the transmit FIFO.
    //
    pTxQueue->add((item)c);
	if (c == '\n')
    	pTxQueue->add('\r');

    //
    // Start the transmit engine (if it's stalled).
    //
    scc.txStart(channel);

    return (c);

}   /* putchar() */
Exemplo n.º 7
0
LazyCallGraph::SCC &llvm::updateCGAndAnalysisManagerForFunctionPass(
    LazyCallGraph &G, LazyCallGraph::SCC &InitialC, LazyCallGraph::Node &N,
    CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR, bool DebugLogging) {
  typedef LazyCallGraph::Node Node;
  typedef LazyCallGraph::Edge Edge;
  typedef LazyCallGraph::SCC SCC;
  typedef LazyCallGraph::RefSCC RefSCC;

  RefSCC &InitialRC = InitialC.getOuterRefSCC();
  SCC *C = &InitialC;
  RefSCC *RC = &InitialRC;
  Function &F = N.getFunction();

  // Walk the function body and build up the set of retained, promoted, and
  // demoted edges.
  SmallVector<Constant *, 16> Worklist;
  SmallPtrSet<Constant *, 16> Visited;
  SmallPtrSet<Function *, 16> RetainedEdges;
  SmallSetVector<Function *, 4> PromotedRefTargets;
  SmallSetVector<Function *, 4> DemotedCallTargets;

  // First walk the function and handle all called functions. We do this first
  // because if there is a single call edge, whether there are ref edges is
  // irrelevant.
  for (Instruction &I : instructions(F))
    if (auto CS = CallSite(&I))
      if (Function *Callee = CS.getCalledFunction())
        if (Visited.insert(Callee).second && !Callee->isDeclaration()) {
          const Edge *E = N.lookup(*Callee);
          // FIXME: We should really handle adding new calls. While it will
          // make downstream usage more complex, there is no fundamental
          // limitation and it will allow passes within the CGSCC to be a bit
          // more flexible in what transforms they can do. Until then, we
          // verify that new calls haven't been introduced.
          assert(E && "No function transformations should introduce *new* "
                      "call edges! Any new calls should be modeled as "
                      "promoted existing ref edges!");
          RetainedEdges.insert(Callee);
          if (!E->isCall())
            PromotedRefTargets.insert(Callee);
        }

  // Now walk all references.
  for (Instruction &I : instructions(F))
    for (Value *Op : I.operand_values())
      if (Constant *C = dyn_cast<Constant>(Op))
        if (Visited.insert(C).second)
          Worklist.push_back(C);

  LazyCallGraph::visitReferences(Worklist, Visited, [&](Function &Referee) {
    const Edge *E = N.lookup(Referee);
    // FIXME: Similarly to new calls, we also currently preclude
    // introducing new references. See above for details.
    assert(E && "No function transformations should introduce *new* ref "
                "edges! Any new ref edges would require IPO which "
                "function passes aren't allowed to do!");
    RetainedEdges.insert(&Referee);
    if (E->isCall())
      DemotedCallTargets.insert(&Referee);
  });

  // First remove all of the edges that are no longer present in this function.
  // We have to build a list of dead targets first and then remove them as the
  // data structures will all be invalidated by removing them.
  SmallVector<PointerIntPair<Node *, 1, Edge::Kind>, 4> DeadTargets;
  for (Edge &E : N)
    if (!RetainedEdges.count(&E.getFunction()))
      DeadTargets.push_back({E.getNode(), E.getKind()});
  for (auto DeadTarget : DeadTargets) {
    Node &TargetN = *DeadTarget.getPointer();
    bool IsCall = DeadTarget.getInt() == Edge::Call;
    SCC &TargetC = *G.lookupSCC(TargetN);
    RefSCC &TargetRC = TargetC.getOuterRefSCC();

    if (&TargetRC != RC) {
      RC->removeOutgoingEdge(N, TargetN);
      if (DebugLogging)
        dbgs() << "Deleting outgoing edge from '" << N << "' to '" << TargetN
               << "'\n";
      continue;
    }
    if (DebugLogging)
      dbgs() << "Deleting internal " << (IsCall ? "call" : "ref")
             << " edge from '" << N << "' to '" << TargetN << "'\n";

    if (IsCall)
      C = incorporateNewSCCRange(RC->switchInternalEdgeToRef(N, TargetN), G, N,
                                 C, AM, UR, DebugLogging);

    auto NewRefSCCs = RC->removeInternalRefEdge(N, TargetN);
    if (!NewRefSCCs.empty()) {
      // Note that we don't bother to invalidate analyses as ref-edge
      // connectivity is not really observable in any way and is intended
      // exclusively to be used for ordering of transforms rather than for
      // analysis conclusions.

      // The RC worklist is in reverse postorder, so we first enqueue the
      // current RefSCC as it will remain the parent of all split RefSCCs, then
      // we enqueue the new ones in RPO except for the one which contains the
      // source node as that is the "bottom" we will continue processing in the
      // bottom-up walk.
      UR.RCWorklist.insert(RC);
      if (DebugLogging)
        dbgs() << "Enqueuing the existing RefSCC in the update worklist: "
               << *RC << "\n";
      // Update the RC to the "bottom".
      assert(G.lookupSCC(N) == C && "Changed the SCC when splitting RefSCCs!");
      RC = &C->getOuterRefSCC();
      assert(G.lookupRefSCC(N) == RC && "Failed to update current RefSCC!");
      assert(NewRefSCCs.front() == RC &&
             "New current RefSCC not first in the returned list!");
      for (RefSCC *NewRC : reverse(
               make_range(std::next(NewRefSCCs.begin()), NewRefSCCs.end()))) {
        assert(NewRC != RC && "Should not encounter the current RefSCC further "
                              "in the postorder list of new RefSCCs.");
        UR.RCWorklist.insert(NewRC);
        if (DebugLogging)
          dbgs() << "Enqueuing a new RefSCC in the update worklist: " << *NewRC
                 << "\n";
      }
    }
  }

  // Next demote all the call edges that are now ref edges. This helps make
  // the SCCs small which should minimize the work below as we don't want to
  // form cycles that this would break.
  for (Function *RefTarget : DemotedCallTargets) {
    Node &TargetN = *G.lookup(*RefTarget);
    SCC &TargetC = *G.lookupSCC(TargetN);
    RefSCC &TargetRC = TargetC.getOuterRefSCC();

    // The easy case is when the target RefSCC is not this RefSCC. This is
    // only supported when the target RefSCC is a child of this RefSCC.
    if (&TargetRC != RC) {
      assert(RC->isAncestorOf(TargetRC) &&
             "Cannot potentially form RefSCC cycles here!");
      RC->switchOutgoingEdgeToRef(N, TargetN);
      if (DebugLogging)
        dbgs() << "Switch outgoing call edge to a ref edge from '" << N
               << "' to '" << TargetN << "'\n";
      continue;
    }

    // Otherwise we are switching an internal call edge to a ref edge. This
    // may split up some SCCs.
    C = incorporateNewSCCRange(RC->switchInternalEdgeToRef(N, TargetN), G, N, C,
                               AM, UR, DebugLogging);
  }

  // Now promote ref edges into call edges.
  for (Function *CallTarget : PromotedRefTargets) {
    Node &TargetN = *G.lookup(*CallTarget);
    SCC &TargetC = *G.lookupSCC(TargetN);
    RefSCC &TargetRC = TargetC.getOuterRefSCC();

    // The easy case is when the target RefSCC is not this RefSCC. This is
    // only supported when the target RefSCC is a child of this RefSCC.
    if (&TargetRC != RC) {
      assert(RC->isAncestorOf(TargetRC) &&
             "Cannot potentially form RefSCC cycles here!");
      RC->switchOutgoingEdgeToCall(N, TargetN);
      if (DebugLogging)
        dbgs() << "Switch outgoing ref edge to a call edge from '" << N
               << "' to '" << TargetN << "'\n";
      continue;
    }
    if (DebugLogging)
      dbgs() << "Switch an internal ref edge to a call edge from '" << N
             << "' to '" << TargetN << "'\n";

    // Otherwise we are switching an internal ref edge to a call edge. This
    // may merge away some SCCs, and we add those to the UpdateResult. We also
    // need to make sure to update the worklist in the event SCCs have moved
    // before the current one in the post-order sequence.
    auto InitialSCCIndex = RC->find(*C) - RC->begin();
    auto InvalidatedSCCs = RC->switchInternalEdgeToCall(N, TargetN);
    if (!InvalidatedSCCs.empty()) {
      C = &TargetC;
      assert(G.lookupSCC(N) == C && "Failed to update current SCC!");

      // Any analyses cached for this SCC are no longer precise as the shape
      // has changed by introducing this cycle.
      AM.invalidate(*C, PreservedAnalyses::none());

      for (SCC *InvalidatedC : InvalidatedSCCs) {
        assert(InvalidatedC != C && "Cannot invalidate the current SCC!");
        UR.InvalidatedSCCs.insert(InvalidatedC);

        // Also clear any cached analyses for the SCCs that are dead. This
        // isn't really necessary for correctness but can release memory.
        AM.clear(*InvalidatedC);
      }
    }
    auto NewSCCIndex = RC->find(*C) - RC->begin();
    if (InitialSCCIndex < NewSCCIndex) {
      // Put our current SCC back onto the worklist as we'll visit other SCCs
      // that are now definitively ordered prior to the current one in the
      // post-order sequence, and may end up observing more precise context to
      // optimize the current SCC.
      UR.CWorklist.insert(C);
      if (DebugLogging)
        dbgs() << "Enqueuing the existing SCC in the worklist: " << *C << "\n";
      // Enqueue in reverse order as we pop off the back of the worklist.
      for (SCC &MovedC : reverse(make_range(RC->begin() + InitialSCCIndex,
                                            RC->begin() + NewSCCIndex))) {
        UR.CWorklist.insert(&MovedC);
        if (DebugLogging)
          dbgs() << "Enqueuing a newly earlier in post-order SCC: " << MovedC
                 << "\n";
      }
    }
  }

  assert(!UR.InvalidatedSCCs.count(C) && "Invalidated the current SCC!");
  assert(!UR.InvalidatedRefSCCs.count(RC) && "Invalidated the current RefSCC!");
  assert(&C->getOuterRefSCC() == RC && "Current SCC not in current RefSCC!");

  // Record the current RefSCC and SCC for higher layers of the CGSCC pass
  // manager now that all the updates have been applied.
  if (RC != &InitialRC)
    UR.UpdatedRC = RC;
  if (C != &InitialC)
    UR.UpdatedC = C;

  return *C;
}
Exemplo n.º 8
0
LazyCallGraph::SCC &llvm::updateCGAndAnalysisManagerForFunctionPass(
    LazyCallGraph &G, LazyCallGraph::SCC &InitialC, LazyCallGraph::Node &N,
    CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR) {
  using Node = LazyCallGraph::Node;
  using Edge = LazyCallGraph::Edge;
  using SCC = LazyCallGraph::SCC;
  using RefSCC = LazyCallGraph::RefSCC;

  RefSCC &InitialRC = InitialC.getOuterRefSCC();
  SCC *C = &InitialC;
  RefSCC *RC = &InitialRC;
  Function &F = N.getFunction();

  // Walk the function body and build up the set of retained, promoted, and
  // demoted edges.
  SmallVector<Constant *, 16> Worklist;
  SmallPtrSet<Constant *, 16> Visited;
  SmallPtrSet<Node *, 16> RetainedEdges;
  SmallSetVector<Node *, 4> PromotedRefTargets;
  SmallSetVector<Node *, 4> DemotedCallTargets;

  // First walk the function and handle all called functions. We do this first
  // because if there is a single call edge, whether there are ref edges is
  // irrelevant.
  for (Instruction &I : instructions(F))
    if (auto CS = CallSite(&I))
      if (Function *Callee = CS.getCalledFunction())
        if (Visited.insert(Callee).second && !Callee->isDeclaration()) {
          Node &CalleeN = *G.lookup(*Callee);
          Edge *E = N->lookup(CalleeN);
          // FIXME: We should really handle adding new calls. While it will
          // make downstream usage more complex, there is no fundamental
          // limitation and it will allow passes within the CGSCC to be a bit
          // more flexible in what transforms they can do. Until then, we
          // verify that new calls haven't been introduced.
          assert(E && "No function transformations should introduce *new* "
                      "call edges! Any new calls should be modeled as "
                      "promoted existing ref edges!");
          bool Inserted = RetainedEdges.insert(&CalleeN).second;
          (void)Inserted;
          assert(Inserted && "We should never visit a function twice.");
          if (!E->isCall())
            PromotedRefTargets.insert(&CalleeN);
        }

  // Now walk all references.
  for (Instruction &I : instructions(F))
    for (Value *Op : I.operand_values())
      if (auto *C = dyn_cast<Constant>(Op))
        if (Visited.insert(C).second)
          Worklist.push_back(C);

  auto VisitRef = [&](Function &Referee) {
    Node &RefereeN = *G.lookup(Referee);
    Edge *E = N->lookup(RefereeN);
    // FIXME: Similarly to new calls, we also currently preclude
    // introducing new references. See above for details.
    assert(E && "No function transformations should introduce *new* ref "
                "edges! Any new ref edges would require IPO which "
                "function passes aren't allowed to do!");
    bool Inserted = RetainedEdges.insert(&RefereeN).second;
    (void)Inserted;
    assert(Inserted && "We should never visit a function twice.");
    if (E->isCall())
      DemotedCallTargets.insert(&RefereeN);
  };
  LazyCallGraph::visitReferences(Worklist, Visited, VisitRef);

  // Include synthetic reference edges to known, defined lib functions.
  for (auto *F : G.getLibFunctions())
    // While the list of lib functions doesn't have repeats, don't re-visit
    // anything handled above.
    if (!Visited.count(F))
      VisitRef(*F);

  // First remove all of the edges that are no longer present in this function.
  // The first step makes these edges uniformly ref edges and accumulates them
  // into a separate data structure so removal doesn't invalidate anything.
  SmallVector<Node *, 4> DeadTargets;
  for (Edge &E : *N) {
    if (RetainedEdges.count(&E.getNode()))
      continue;

    SCC &TargetC = *G.lookupSCC(E.getNode());
    RefSCC &TargetRC = TargetC.getOuterRefSCC();
    if (&TargetRC == RC && E.isCall()) {
      if (C != &TargetC) {
        // For separate SCCs this is trivial.
        RC->switchTrivialInternalEdgeToRef(N, E.getNode());
      } else {
        // Now update the call graph.
        C = incorporateNewSCCRange(RC->switchInternalEdgeToRef(N, E.getNode()),
                                   G, N, C, AM, UR);
      }
    }

    // Now that this is ready for actual removal, put it into our list.
    DeadTargets.push_back(&E.getNode());
  }
  // Remove the easy cases quickly and actually pull them out of our list.
  DeadTargets.erase(
      llvm::remove_if(DeadTargets,
                      [&](Node *TargetN) {
                        SCC &TargetC = *G.lookupSCC(*TargetN);
                        RefSCC &TargetRC = TargetC.getOuterRefSCC();

                        // We can't trivially remove internal targets, so skip
                        // those.
                        if (&TargetRC == RC)
                          return false;

                        RC->removeOutgoingEdge(N, *TargetN);
                        LLVM_DEBUG(dbgs() << "Deleting outgoing edge from '"
                                          << N << "' to '" << TargetN << "'\n");
                        return true;
                      }),
      DeadTargets.end());

  // Now do a batch removal of the internal ref edges left.
  auto NewRefSCCs = RC->removeInternalRefEdge(N, DeadTargets);
  if (!NewRefSCCs.empty()) {
    // The old RefSCC is dead, mark it as such.
    UR.InvalidatedRefSCCs.insert(RC);

    // Note that we don't bother to invalidate analyses as ref-edge
    // connectivity is not really observable in any way and is intended
    // exclusively to be used for ordering of transforms rather than for
    // analysis conclusions.

    // Update RC to the "bottom".
    assert(G.lookupSCC(N) == C && "Changed the SCC when splitting RefSCCs!");
    RC = &C->getOuterRefSCC();
    assert(G.lookupRefSCC(N) == RC && "Failed to update current RefSCC!");

    // The RC worklist is in reverse postorder, so we enqueue the new ones in
    // RPO except for the one which contains the source node as that is the
    // "bottom" we will continue processing in the bottom-up walk.
    assert(NewRefSCCs.front() == RC &&
           "New current RefSCC not first in the returned list!");
    for (RefSCC *NewRC : llvm::reverse(make_range(std::next(NewRefSCCs.begin()),
                                                  NewRefSCCs.end()))) {
      assert(NewRC != RC && "Should not encounter the current RefSCC further "
                            "in the postorder list of new RefSCCs.");
      UR.RCWorklist.insert(NewRC);
      LLVM_DEBUG(dbgs() << "Enqueuing a new RefSCC in the update worklist: "
                        << *NewRC << "\n");
    }
  }

  // Next demote all the call edges that are now ref edges. This helps make
  // the SCCs small which should minimize the work below as we don't want to
  // form cycles that this would break.
  for (Node *RefTarget : DemotedCallTargets) {
    SCC &TargetC = *G.lookupSCC(*RefTarget);
    RefSCC &TargetRC = TargetC.getOuterRefSCC();

    // The easy case is when the target RefSCC is not this RefSCC. This is
    // only supported when the target RefSCC is a child of this RefSCC.
    if (&TargetRC != RC) {
      assert(RC->isAncestorOf(TargetRC) &&
             "Cannot potentially form RefSCC cycles here!");
      RC->switchOutgoingEdgeToRef(N, *RefTarget);
      LLVM_DEBUG(dbgs() << "Switch outgoing call edge to a ref edge from '" << N
                        << "' to '" << *RefTarget << "'\n");
      continue;
    }

    // We are switching an internal call edge to a ref edge. This may split up
    // some SCCs.
    if (C != &TargetC) {
      // For separate SCCs this is trivial.
      RC->switchTrivialInternalEdgeToRef(N, *RefTarget);
      continue;
    }

    // Now update the call graph.
    C = incorporateNewSCCRange(RC->switchInternalEdgeToRef(N, *RefTarget), G, N,
                               C, AM, UR);
  }

  // Now promote ref edges into call edges.
  for (Node *CallTarget : PromotedRefTargets) {
    SCC &TargetC = *G.lookupSCC(*CallTarget);
    RefSCC &TargetRC = TargetC.getOuterRefSCC();

    // The easy case is when the target RefSCC is not this RefSCC. This is
    // only supported when the target RefSCC is a child of this RefSCC.
    if (&TargetRC != RC) {
      assert(RC->isAncestorOf(TargetRC) &&
             "Cannot potentially form RefSCC cycles here!");
      RC->switchOutgoingEdgeToCall(N, *CallTarget);
      LLVM_DEBUG(dbgs() << "Switch outgoing ref edge to a call edge from '" << N
                        << "' to '" << *CallTarget << "'\n");
      continue;
    }
    LLVM_DEBUG(dbgs() << "Switch an internal ref edge to a call edge from '"
                      << N << "' to '" << *CallTarget << "'\n");

    // Otherwise we are switching an internal ref edge to a call edge. This
    // may merge away some SCCs, and we add those to the UpdateResult. We also
    // need to make sure to update the worklist in the event SCCs have moved
    // before the current one in the post-order sequence
    bool HasFunctionAnalysisProxy = false;
    auto InitialSCCIndex = RC->find(*C) - RC->begin();
    bool FormedCycle = RC->switchInternalEdgeToCall(
        N, *CallTarget, [&](ArrayRef<SCC *> MergedSCCs) {
          for (SCC *MergedC : MergedSCCs) {
            assert(MergedC != &TargetC && "Cannot merge away the target SCC!");

            HasFunctionAnalysisProxy |=
                AM.getCachedResult<FunctionAnalysisManagerCGSCCProxy>(
                    *MergedC) != nullptr;

            // Mark that this SCC will no longer be valid.
            UR.InvalidatedSCCs.insert(MergedC);

            // FIXME: We should really do a 'clear' here to forcibly release
            // memory, but we don't have a good way of doing that and
            // preserving the function analyses.
            auto PA = PreservedAnalyses::allInSet<AllAnalysesOn<Function>>();
            PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
            AM.invalidate(*MergedC, PA);
          }
        });

    // If we formed a cycle by creating this call, we need to update more data
    // structures.
    if (FormedCycle) {
      C = &TargetC;
      assert(G.lookupSCC(N) == C && "Failed to update current SCC!");

      // If one of the invalidated SCCs had a cached proxy to a function
      // analysis manager, we need to create a proxy in the new current SCC as
      // the invalidated SCCs had their functions moved.
      if (HasFunctionAnalysisProxy)
        AM.getResult<FunctionAnalysisManagerCGSCCProxy>(*C, G);

      // Any analyses cached for this SCC are no longer precise as the shape
      // has changed by introducing this cycle. However, we have taken care to
      // update the proxies so it remains valide.
      auto PA = PreservedAnalyses::allInSet<AllAnalysesOn<Function>>();
      PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
      AM.invalidate(*C, PA);
    }
    auto NewSCCIndex = RC->find(*C) - RC->begin();
    // If we have actually moved an SCC to be topologically "below" the current
    // one due to merging, we will need to revisit the current SCC after
    // visiting those moved SCCs.
    //
    // It is critical that we *do not* revisit the current SCC unless we
    // actually move SCCs in the process of merging because otherwise we may
    // form a cycle where an SCC is split apart, merged, split, merged and so
    // on infinitely.
    if (InitialSCCIndex < NewSCCIndex) {
      // Put our current SCC back onto the worklist as we'll visit other SCCs
      // that are now definitively ordered prior to the current one in the
      // post-order sequence, and may end up observing more precise context to
      // optimize the current SCC.
      UR.CWorklist.insert(C);
      LLVM_DEBUG(dbgs() << "Enqueuing the existing SCC in the worklist: " << *C
                        << "\n");
      // Enqueue in reverse order as we pop off the back of the worklist.
      for (SCC &MovedC : llvm::reverse(make_range(RC->begin() + InitialSCCIndex,
                                                  RC->begin() + NewSCCIndex))) {
        UR.CWorklist.insert(&MovedC);
        LLVM_DEBUG(dbgs() << "Enqueuing a newly earlier in post-order SCC: "
                          << MovedC << "\n");
      }
    }
  }

  assert(!UR.InvalidatedSCCs.count(C) && "Invalidated the current SCC!");
  assert(!UR.InvalidatedRefSCCs.count(RC) && "Invalidated the current RefSCC!");
  assert(&C->getOuterRefSCC() == RC && "Current SCC not in current RefSCC!");

  // Record the current RefSCC and SCC for higher layers of the CGSCC pass
  // manager now that all the updates have been applied.
  if (RC != &InitialRC)
    UR.UpdatedRC = RC;
  if (C != &InitialC)
    UR.UpdatedC = C;

  return *C;
}