Esempio n. 1
0
bool Inliner::runOnSCC(CallGraphSCC &SCC) {
  CallGraph &CG = getAnalysis<CallGraph>();
  const TargetData *TD = getAnalysisIfAvailable<TargetData>();

  SmallPtrSet<Function*, 8> SCCFunctions;
  DEBUG(dbgs() << "Inliner visiting SCC:");
  for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
    Function *F = (*I)->getFunction();
    if (F) SCCFunctions.insert(F);
    DEBUG(dbgs() << " " << (F ? F->getName() : "INDIRECTNODE"));
  }

  // Scan through and identify all call sites ahead of time so that we only
  // inline call sites in the original functions, not call sites that result
  // from inlining other functions.
  SmallVector<std::pair<CallSite, int>, 16> CallSites;
  
  // When inlining a callee produces new call sites, we want to keep track of
  // the fact that they were inlined from the callee.  This allows us to avoid
  // infinite inlining in some obscure cases.  To represent this, we use an
  // index into the InlineHistory vector.
  SmallVector<std::pair<Function*, int>, 8> InlineHistory;

  for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
    Function *F = (*I)->getFunction();
    if (!F) continue;
    
    for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
      for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
        CallSite CS(cast<Value>(I));
        // If this isn't a call, or it is a call to an intrinsic, it can
        // never be inlined.
        if (!CS || isa<IntrinsicInst>(I))
          continue;
        
        // If this is a direct call to an external function, we can never inline
        // it.  If it is an indirect call, inlining may resolve it to be a
        // direct call, so we keep it.
        if (CS.getCalledFunction() && CS.getCalledFunction()->isDeclaration())
          continue;
        
        CallSites.push_back(std::make_pair(CS, -1));
      }
  }

  DEBUG(dbgs() << ": " << CallSites.size() << " call sites.\n");

  // If there are no calls in this function, exit early.
  if (CallSites.empty())
    return false;
  
  // Now that we have all of the call sites, move the ones to functions in the
  // current SCC to the end of the list.
  unsigned FirstCallInSCC = CallSites.size();
  for (unsigned i = 0; i < FirstCallInSCC; ++i)
    if (Function *F = CallSites[i].first.getCalledFunction())
      if (SCCFunctions.count(F))
        std::swap(CallSites[i--], CallSites[--FirstCallInSCC]);

  
  InlinedArrayAllocasTy InlinedArrayAllocas;
  InlineFunctionInfo InlineInfo(&CG, TD);
  
  // Now that we have all of the call sites, loop over them and inline them if
  // it looks profitable to do so.
  bool Changed = false;
  bool LocalChange;
  do {
    LocalChange = false;
    // Iterate over the outer loop because inlining functions can cause indirect
    // calls to become direct calls.
    for (unsigned CSi = 0; CSi != CallSites.size(); ++CSi) {
      CallSite CS = CallSites[CSi].first;
      
      Function *Caller = CS.getCaller();
      Function *Callee = CS.getCalledFunction();

      // If this call site is dead and it is to a readonly function, we should
      // just delete the call instead of trying to inline it, regardless of
      // size.  This happens because IPSCCP propagates the result out of the
      // call and then we're left with the dead call.
      if (isInstructionTriviallyDead(CS.getInstruction())) {
        DEBUG(dbgs() << "    -> Deleting dead call: "
                     << *CS.getInstruction() << "\n");
        // Update the call graph by deleting the edge from Callee to Caller.
        CG[Caller]->removeCallEdgeFor(CS);
        CS.getInstruction()->eraseFromParent();
        ++NumCallsDeleted;
        // Update the cached cost info with the missing call
        growCachedCostInfo(Caller, NULL);
      } else {
        // We can only inline direct calls to non-declarations.
        if (Callee == 0 || Callee->isDeclaration()) continue;
      
        // If this call site was obtained by inlining another function, verify
        // that the include path for the function did not include the callee
        // itself.  If so, we'd be recursively inlining the same function,
        // which would provide the same callsites, which would cause us to
        // infinitely inline.
        int InlineHistoryID = CallSites[CSi].second;
        if (InlineHistoryID != -1 &&
            InlineHistoryIncludes(Callee, InlineHistoryID, InlineHistory))
          continue;
        
        
        // If the policy determines that we should inline this function,
        // try to do so.
        if (!shouldInline(CS))
          continue;

        // Attempt to inline the function.
        if (!InlineCallIfPossible(CS, InlineInfo, InlinedArrayAllocas,
                                  InlineHistoryID, InsertLifetime))
          continue;
        ++NumInlined;
        
        // If inlining this function gave us any new call sites, throw them
        // onto our worklist to process.  They are useful inline candidates.
        if (!InlineInfo.InlinedCalls.empty()) {
          // Create a new inline history entry for this, so that we remember
          // that these new callsites came about due to inlining Callee.
          int NewHistoryID = InlineHistory.size();
          InlineHistory.push_back(std::make_pair(Callee, InlineHistoryID));

          for (unsigned i = 0, e = InlineInfo.InlinedCalls.size();
               i != e; ++i) {
            Value *Ptr = InlineInfo.InlinedCalls[i];
            CallSite NewCS = Ptr;
            simplifyCallSiteArguments(TD, NewCS);
            CallSites.push_back(std::make_pair(NewCS, NewHistoryID));
          }
        }
        
        // Update the cached cost info with the inlined call.
        growCachedCostInfo(Caller, Callee);
      }
      
      // If we inlined or deleted the last possible call site to the function,
      // delete the function body now.
      if (Callee && Callee->use_empty() && Callee->hasLocalLinkage() &&
          // TODO: Can remove if in SCC now.
          !SCCFunctions.count(Callee) &&
          
          // The function may be apparently dead, but if there are indirect
          // callgraph references to the node, we cannot delete it yet, this
          // could invalidate the CGSCC iterator.
          CG[Callee]->getNumReferences() == 0) {
        DEBUG(dbgs() << "    -> Deleting dead function: "
              << Callee->getName() << "\n");
        CallGraphNode *CalleeNode = CG[Callee];
        
        // Remove any call graph edges from the callee to its callees.
        CalleeNode->removeAllCalledFunctions();
        
        resetCachedCostInfo(Callee);
        
        // Removing the node for callee from the call graph and delete it.
        delete CG.removeFunctionFromModule(CalleeNode);
        ++NumDeleted;
      }

      // Remove this call site from the list.  If possible, use 
      // swap/pop_back for efficiency, but do not use it if doing so would
      // move a call site to a function in this SCC before the
      // 'FirstCallInSCC' barrier.
      if (SCC.isSingular()) {
        CallSites[CSi] = CallSites.back();
        CallSites.pop_back();
      } else {
        CallSites.erase(CallSites.begin()+CSi);
      }
      --CSi;

      Changed = true;
      LocalChange = true;
    }
  } while (LocalChange);

  return Changed;
}
void UseOverrideCheck::check(const MatchFinder::MatchResult &Result) {
  const FunctionDecl *Method = Result.Nodes.getStmtAs<FunctionDecl>("method");
  const SourceManager &Sources = *Result.SourceManager;

  assert(Method != nullptr);
  if (Method->getInstantiatedFromMemberFunction() != nullptr)
    Method = Method->getInstantiatedFromMemberFunction();

  if (Method->isImplicit() || Method->getLocation().isMacroID() ||
      Method->isOutOfLine())
    return;

  bool HasVirtual = Method->isVirtualAsWritten();
  bool HasOverride = Method->getAttr<OverrideAttr>();
  bool HasFinal = Method->getAttr<FinalAttr>();

  bool OnlyVirtualSpecified = HasVirtual && !HasOverride && !HasFinal;
  unsigned KeywordCount = HasVirtual + HasOverride + HasFinal;

  if (!OnlyVirtualSpecified && KeywordCount == 1)
    return; // Nothing to do.

  std::string Message;

  if (OnlyVirtualSpecified) {
    Message =
        "prefer using 'override' or (rarely) 'final' instead of 'virtual'";
  } else if (KeywordCount == 0) {
    Message = "annotate this function with 'override' or (rarely) 'final'";
  } else {
    StringRef Redundant =
        HasVirtual ? (HasOverride && HasFinal ? "'virtual' and 'override' are"
                                              : "'virtual' is")
                   : "'override' is";
    StringRef Correct = HasFinal ? "'final'" : "'override'";

    Message =
        (llvm::Twine(Redundant) +
         " redundant since the function is already declared " + Correct).str();
  }

  DiagnosticBuilder Diag = diag(Method->getLocation(), Message);

  CharSourceRange FileRange = Lexer::makeFileCharRange(
      CharSourceRange::getTokenRange(Method->getSourceRange()), Sources,
      Result.Context->getLangOpts());

  if (!FileRange.isValid())
    return;

  // FIXME: Instead of re-lexing and looking for specific macros such as
  // 'ABSTRACT', properly store the location of 'virtual' and '= 0' in each
  // FunctionDecl.
  SmallVector<Token, 16> Tokens = ParseTokens(FileRange, Result);

  // Add 'override' on inline declarations that don't already have it.
  if (!HasFinal && !HasOverride) {
    SourceLocation InsertLoc;
    StringRef ReplacementText = "override ";

    for (Token T : Tokens) {
      if (T.is(tok::kw___attribute)) {
        InsertLoc = T.getLocation();
        break;
      }
    }

    if (Method->hasAttrs()) {
      for (const clang::Attr *A : Method->getAttrs()) {
        if (!A->isImplicit()) {
          SourceLocation Loc =
              Sources.getExpansionLoc(A->getRange().getBegin());
          if (!InsertLoc.isValid() ||
              Sources.isBeforeInTranslationUnit(Loc, InsertLoc))
            InsertLoc = Loc;
        }
      }
    }

    if (InsertLoc.isInvalid() && Method->doesThisDeclarationHaveABody() &&
        Method->getBody() && !Method->isDefaulted()) {
      // For methods with inline definition, add the override keyword at the
      // end of the declaration of the function, but prefer to put it on the
      // same line as the declaration if the beginning brace for the start of
      // the body falls on the next line.
      Token LastNonCommentToken;
      for (Token T : Tokens) {
        if (!T.is(tok::comment)) {
          LastNonCommentToken = T;
        }
      }
      InsertLoc = LastNonCommentToken.getEndLoc();
      ReplacementText = " override";
    }

    if (!InsertLoc.isValid()) {
      // For declarations marked with "= 0" or "= [default|delete]", the end
      // location will point until after those markings. Therefore, the override
      // keyword shouldn't be inserted at the end, but before the '='.
      if (Tokens.size() > 2 && (GetText(Tokens.back(), Sources) == "0" ||
                                Tokens.back().is(tok::kw_default) ||
                                Tokens.back().is(tok::kw_delete)) &&
          GetText(Tokens[Tokens.size() - 2], Sources) == "=") {
        InsertLoc = Tokens[Tokens.size() - 2].getLocation();
      } else if (GetText(Tokens.back(), Sources) == "ABSTRACT") {
        InsertLoc = Tokens.back().getLocation();
      }
    }

    if (!InsertLoc.isValid()) {
      InsertLoc = FileRange.getEnd();
      ReplacementText = " override";
    }
    Diag << FixItHint::CreateInsertion(InsertLoc, ReplacementText);
  }

  if (HasFinal && HasOverride) {
    SourceLocation OverrideLoc = Method->getAttr<OverrideAttr>()->getLocation();
    Diag << FixItHint::CreateRemoval(
        CharSourceRange::getTokenRange(OverrideLoc, OverrideLoc));
  }

  if (HasVirtual) {
    for (Token Tok : Tokens) {
      if (Tok.is(tok::kw_virtual)) {
        Diag << FixItHint::CreateRemoval(CharSourceRange::getTokenRange(
            Tok.getLocation(), Tok.getLocation()));
        break;
      }
    }
  }
}
Esempio n. 3
0
// Propagate existing explicit probabilities from either profile data or
// 'expect' intrinsic processing. Examine metadata against unreachable
// heuristic. The probability of the edge coming to unreachable block is
// set to min of metadata and unreachable heuristic.
bool BranchProbabilityInfo::calcMetadataWeights(const BasicBlock *BB) {
  const TerminatorInst *TI = BB->getTerminator();
  assert(TI->getNumSuccessors() > 1 && "expected more than one successor!");
  if (!isa<BranchInst>(TI) && !isa<SwitchInst>(TI))
    return false;

  MDNode *WeightsNode = TI->getMetadata(LLVMContext::MD_prof);
  if (!WeightsNode)
    return false;

  // Check that the number of successors is manageable.
  assert(TI->getNumSuccessors() < UINT32_MAX && "Too many successors");

  // Ensure there are weights for all of the successors. Note that the first
  // operand to the metadata node is a name, not a weight.
  if (WeightsNode->getNumOperands() != TI->getNumSuccessors() + 1)
    return false;

  // Build up the final weights that will be used in a temporary buffer.
  // Compute the sum of all weights to later decide whether they need to
  // be scaled to fit in 32 bits.
  uint64_t WeightSum = 0;
  SmallVector<uint32_t, 2> Weights;
  SmallVector<unsigned, 2> UnreachableIdxs;
  SmallVector<unsigned, 2> ReachableIdxs;
  Weights.reserve(TI->getNumSuccessors());
  for (unsigned i = 1, e = WeightsNode->getNumOperands(); i != e; ++i) {
    ConstantInt *Weight =
        mdconst::dyn_extract<ConstantInt>(WeightsNode->getOperand(i));
    if (!Weight)
      return false;
    assert(Weight->getValue().getActiveBits() <= 32 &&
           "Too many bits for uint32_t");
    Weights.push_back(Weight->getZExtValue());
    WeightSum += Weights.back();
    if (PostDominatedByUnreachable.count(TI->getSuccessor(i - 1)))
      UnreachableIdxs.push_back(i - 1);
    else
      ReachableIdxs.push_back(i - 1);
  }
  assert(Weights.size() == TI->getNumSuccessors() && "Checked above");

  // If the sum of weights does not fit in 32 bits, scale every weight down
  // accordingly.
  uint64_t ScalingFactor =
      (WeightSum > UINT32_MAX) ? WeightSum / UINT32_MAX + 1 : 1;

  if (ScalingFactor > 1) {
    WeightSum = 0;
    for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) {
      Weights[i] /= ScalingFactor;
      WeightSum += Weights[i];
    }
  }
  assert(WeightSum <= UINT32_MAX &&
         "Expected weights to scale down to 32 bits");

  if (WeightSum == 0 || ReachableIdxs.size() == 0) {
    for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
      Weights[i] = 1;
    WeightSum = TI->getNumSuccessors();
  }

  // Set the probability.
  SmallVector<BranchProbability, 2> BP;
  for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
    BP.push_back({ Weights[i], static_cast<uint32_t>(WeightSum) });

  // Examine the metadata against unreachable heuristic.
  // If the unreachable heuristic is more strong then we use it for this edge.
  if (UnreachableIdxs.size() > 0 && ReachableIdxs.size() > 0) {
    auto ToDistribute = BranchProbability::getZero();
    auto UnreachableProb = UR_TAKEN_PROB;
    for (auto i : UnreachableIdxs)
      if (UnreachableProb < BP[i]) {
        ToDistribute += BP[i] - UnreachableProb;
        BP[i] = UnreachableProb;
      }

    // If we modified the probability of some edges then we must distribute
    // the difference between reachable blocks.
    if (ToDistribute > BranchProbability::getZero()) {
      BranchProbability PerEdge = ToDistribute / ReachableIdxs.size();
      for (auto i : ReachableIdxs)
        BP[i] += PerEdge;
    }
  }

  for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
    setEdgeProbability(BB, i, BP[i]);

  return true;
}
Esempio n. 4
0
void FinalOverriderCollector::Collect(const CXXRecordDecl *RD, 
                                      bool VirtualBase,
                                      const CXXRecordDecl *InVirtualSubobject,
                                      CXXFinalOverriderMap &Overriders) {
  unsigned SubobjectNumber = 0;
  if (!VirtualBase)
    SubobjectNumber
      = ++SubobjectCount[cast<CXXRecordDecl>(RD->getCanonicalDecl())];

  for (CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin(),
         BaseEnd = RD->bases_end(); Base != BaseEnd; ++Base) {
    if (const RecordType *RT = Base->getType()->getAs<RecordType>()) {
      const CXXRecordDecl *BaseDecl = cast<CXXRecordDecl>(RT->getDecl());
      if (!BaseDecl->isPolymorphic())
        continue;

      if (Overriders.empty() && !Base->isVirtual()) {
        // There are no other overriders of virtual member functions,
        // so let the base class fill in our overriders for us.
        Collect(BaseDecl, false, InVirtualSubobject, Overriders);
        continue;
      }

      // Collect all of the overridders from the base class subobject
      // and merge them into the set of overridders for this class.
      // For virtual base classes, populate or use the cached virtual
      // overrides so that we do not walk the virtual base class (and
      // its base classes) more than once.
      CXXFinalOverriderMap ComputedBaseOverriders;
      CXXFinalOverriderMap *BaseOverriders = &ComputedBaseOverriders;
      if (Base->isVirtual()) {
        CXXFinalOverriderMap *&MyVirtualOverriders = VirtualOverriders[BaseDecl];
        BaseOverriders = MyVirtualOverriders;
        if (!MyVirtualOverriders) {
          MyVirtualOverriders = new CXXFinalOverriderMap;

          // Collect may cause VirtualOverriders to reallocate, invalidating the
          // MyVirtualOverriders reference. Set BaseOverriders to the right
          // value now.
          BaseOverriders = MyVirtualOverriders;

          Collect(BaseDecl, true, BaseDecl, *MyVirtualOverriders);
        }
      } else
        Collect(BaseDecl, false, InVirtualSubobject, ComputedBaseOverriders);

      // Merge the overriders from this base class into our own set of
      // overriders.
      for (CXXFinalOverriderMap::iterator OM = BaseOverriders->begin(), 
                               OMEnd = BaseOverriders->end();
           OM != OMEnd;
           ++OM) {
        const CXXMethodDecl *CanonOM
          = cast<CXXMethodDecl>(OM->first->getCanonicalDecl());
        Overriders[CanonOM].add(OM->second);
      }
    }
  }

  for (CXXRecordDecl::method_iterator M = RD->method_begin(), 
                                   MEnd = RD->method_end();
       M != MEnd;
       ++M) {
    // We only care about virtual methods.
    if (!M->isVirtual())
      continue;

    CXXMethodDecl *CanonM = cast<CXXMethodDecl>(M->getCanonicalDecl());

    if (CanonM->begin_overridden_methods()
                                       == CanonM->end_overridden_methods()) {
      // This is a new virtual function that does not override any
      // other virtual function. Add it to the map of virtual
      // functions for which we are tracking overridders. 

      // C++ [class.virtual]p2:
      //   For convenience we say that any virtual function overrides itself.
      Overriders[CanonM].add(SubobjectNumber,
                             UniqueVirtualMethod(CanonM, SubobjectNumber,
                                                 InVirtualSubobject));
      continue;
    }

    // This virtual method overrides other virtual methods, so it does
    // not add any new slots into the set of overriders. Instead, we
    // replace entries in the set of overriders with the new
    // overrider. To do so, we dig down to the original virtual
    // functions using data recursion and update all of the methods it
    // overrides.
    typedef std::pair<CXXMethodDecl::method_iterator, 
                      CXXMethodDecl::method_iterator> OverriddenMethods;
    SmallVector<OverriddenMethods, 4> Stack;
    Stack.push_back(std::make_pair(CanonM->begin_overridden_methods(),
                                   CanonM->end_overridden_methods()));
    while (!Stack.empty()) {
      OverriddenMethods OverMethods = Stack.back();
      Stack.pop_back();

      for (; OverMethods.first != OverMethods.second; ++OverMethods.first) {
        const CXXMethodDecl *CanonOM
          = cast<CXXMethodDecl>((*OverMethods.first)->getCanonicalDecl());

        // C++ [class.virtual]p2:
        //   A virtual member function C::vf of a class object S is
        //   a final overrider unless the most derived class (1.8)
        //   of which S is a base class subobject (if any) declares
        //   or inherits another member function that overrides vf.
        //
        // Treating this object like the most derived class, we
        // replace any overrides from base classes with this
        // overriding virtual function.
        Overriders[CanonOM].replaceAll(
                               UniqueVirtualMethod(CanonM, SubobjectNumber,
                                                   InVirtualSubobject));

        if (CanonOM->begin_overridden_methods()
                                       == CanonOM->end_overridden_methods())
          continue;

        // Continue recursion to the methods that this virtual method
        // overrides.
        Stack.push_back(std::make_pair(CanonOM->begin_overridden_methods(),
                                       CanonOM->end_overridden_methods()));
      }
    }

    // C++ [class.virtual]p2:
    //   For convenience we say that any virtual function overrides itself.
    Overriders[CanonM].add(SubobjectNumber,
                           UniqueVirtualMethod(CanonM, SubobjectNumber,
                                               InVirtualSubobject));
  }
}
Esempio n. 5
0
void UnwrappedLineParser::calculateBraceTypes() {
  // We'll parse forward through the tokens until we hit
  // a closing brace or eof - note that getNextToken() will
  // parse macros, so this will magically work inside macro
  // definitions, too.
  unsigned StoredPosition = Tokens->getPosition();
  unsigned Position = StoredPosition;
  FormatToken *Tok = FormatTok;
  // Keep a stack of positions of lbrace tokens. We will
  // update information about whether an lbrace starts a
  // braced init list or a different block during the loop.
  SmallVector<unsigned, 8> LBraceStack;
  assert(Tok->Tok.is(tok::l_brace));
  do {
    FormatToken *NextTok = Tokens->getNextToken();
    switch (Tok->Tok.getKind()) {
    case tok::l_brace:
      LBraceStack.push_back(Position);
      break;
    case tok::r_brace:
      if (!LBraceStack.empty()) {
        if (LBraces[LBraceStack.back()] == BS_Unknown) {
          // If there is a comma, semicolon or right paren after the closing
          // brace, we assume this is a braced initializer list.

          // FIXME: Note that this currently works only because we do not
          // use the brace information while inside a braced init list.
          // Thus, if the parent is a braced init list, we consider all
          // brace blocks inside it braced init list. That works good enough
          // for now, but we will need to fix it to correctly handle lambdas.
          if (NextTok->Tok.is(tok::comma) || NextTok->Tok.is(tok::semi) ||
              NextTok->Tok.is(tok::r_paren) || NextTok->Tok.is(tok::l_brace))
            LBraces[LBraceStack.back()] = BS_BracedInit;
          else
            LBraces[LBraceStack.back()] = BS_Block;
        }
        LBraceStack.pop_back();
      }
      break;
    case tok::semi:
    case tok::kw_if:
    case tok::kw_while:
    case tok::kw_for:
    case tok::kw_switch:
    case tok::kw_try:
      if (!LBraceStack.empty()) 
        LBraces[LBraceStack.back()] = BS_Block;
      break;
    default:
      break;
    }
    Tok = NextTok;
    ++Position;
  } while (Tok->Tok.isNot(tok::eof));
  // Assume other blocks for all unclosed opening braces.
  for (unsigned i = 0, e = LBraceStack.size(); i != e; ++i) {
    if (LBraces[LBraceStack[i]] == BS_Unknown)
      LBraces[LBraceStack[i]] = BS_Block;
  }
  FormatTok = Tokens->setPosition(StoredPosition);
}
Esempio n. 6
0
bool GuardWideningImpl::combineRangeChecks(
    SmallVectorImpl<GuardWideningImpl::RangeCheck> &Checks,
    SmallVectorImpl<GuardWideningImpl::RangeCheck> &RangeChecksOut) {
  unsigned OldCount = Checks.size();
  while (!Checks.empty()) {
    // Pick all of the range checks with a specific base and length, and try to
    // merge them.
    Value *CurrentBase = Checks.front().getBase();
    Value *CurrentLength = Checks.front().getLength();

    SmallVector<GuardWideningImpl::RangeCheck, 3> CurrentChecks;

    auto IsCurrentCheck = [&](GuardWideningImpl::RangeCheck &RC) {
      return RC.getBase() == CurrentBase && RC.getLength() == CurrentLength;
    };

    std::copy_if(Checks.begin(), Checks.end(),
                 std::back_inserter(CurrentChecks), IsCurrentCheck);
    Checks.erase(remove_if(Checks, IsCurrentCheck), Checks.end());

    assert(CurrentChecks.size() != 0 && "We know we have at least one!");

    if (CurrentChecks.size() < 3) {
      RangeChecksOut.insert(RangeChecksOut.end(), CurrentChecks.begin(),
                            CurrentChecks.end());
      continue;
    }

    // CurrentChecks.size() will typically be 3 here, but so far there has been
    // no need to hard-code that fact.

    std::sort(CurrentChecks.begin(), CurrentChecks.end(),
              [&](const GuardWideningImpl::RangeCheck &LHS,
                  const GuardWideningImpl::RangeCheck &RHS) {
      return LHS.getOffsetValue().slt(RHS.getOffsetValue());
    });

    // Note: std::sort should not invalidate the ChecksStart iterator.

    ConstantInt *MinOffset = CurrentChecks.front().getOffset(),
                *MaxOffset = CurrentChecks.back().getOffset();

    unsigned BitWidth = MaxOffset->getValue().getBitWidth();
    if ((MaxOffset->getValue() - MinOffset->getValue())
            .ugt(APInt::getSignedMinValue(BitWidth)))
      return false;

    APInt MaxDiff = MaxOffset->getValue() - MinOffset->getValue();
    const APInt &HighOffset = MaxOffset->getValue();
    auto OffsetOK = [&](const GuardWideningImpl::RangeCheck &RC) {
      return (HighOffset - RC.getOffsetValue()).ult(MaxDiff);
    };

    if (MaxDiff.isMinValue() ||
        !std::all_of(std::next(CurrentChecks.begin()), CurrentChecks.end(),
                     OffsetOK))
      return false;

    // We have a series of f+1 checks as:
    //
    //   I+k_0 u< L   ... Chk_0
    //   I_k_1 u< L   ... Chk_1
    //   ...
    //   I_k_f u< L   ... Chk_(f+1)
    //
    //     with forall i in [0,f): k_f-k_i u< k_f-k_0  ... Precond_0
    //          k_f-k_0 u< INT_MIN+k_f                 ... Precond_1
    //          k_f != k_0                             ... Precond_2
    //
    // Claim:
    //   Chk_0 AND Chk_(f+1)  implies all the other checks
    //
    // Informal proof sketch:
    //
    // We will show that the integer range [I+k_0,I+k_f] does not unsigned-wrap
    // (i.e. going from I+k_0 to I+k_f does not cross the -1,0 boundary) and
    // thus I+k_f is the greatest unsigned value in that range.
    //
    // This combined with Ckh_(f+1) shows that everything in that range is u< L.
    // Via Precond_0 we know that all of the indices in Chk_0 through Chk_(f+1)
    // lie in [I+k_0,I+k_f], this proving our claim.
    //
    // To see that [I+k_0,I+k_f] is not a wrapping range, note that there are
    // two possibilities: I+k_0 u< I+k_f or I+k_0 >u I+k_f (they can't be equal
    // since k_0 != k_f).  In the former case, [I+k_0,I+k_f] is not a wrapping
    // range by definition, and the latter case is impossible:
    //
    //   0-----I+k_f---I+k_0----L---INT_MAX,INT_MIN------------------(-1)
    //   xxxxxx             xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
    //
    // For Chk_0 to succeed, we'd have to have k_f-k_0 (the range highlighted
    // with 'x' above) to be at least >u INT_MIN.

    RangeChecksOut.emplace_back(CurrentChecks.front());
    RangeChecksOut.emplace_back(CurrentChecks.back());
  }

  assert(RangeChecksOut.size() <= OldCount && "We pessimized!");
  return RangeChecksOut.size() != OldCount;
}
Esempio n. 7
0
/// \brief Recursively emit notes for each macro expansion and caret
/// diagnostics where appropriate.
///
/// Walks up the macro expansion stack printing expansion notes, the code
/// snippet, caret, underlines and FixItHint display as appropriate at each
/// level.
///
/// \param Loc The location for this caret.
/// \param Level The diagnostic level currently being emitted.
/// \param Ranges The underlined ranges for this code snippet.
/// \param Hints The FixIt hints active for this diagnostic.
void DiagnosticRenderer::emitMacroExpansions(SourceLocation Loc,
                                             DiagnosticsEngine::Level Level,
                                             ArrayRef<CharSourceRange> Ranges,
                                             ArrayRef<FixItHint> Hints,
                                             const SourceManager &SM) {
  assert(Loc.isValid() && "must have a valid source location here");

  // Produce a stack of macro backtraces.
  SmallVector<SourceLocation, 8> LocationStack;
  unsigned IgnoredEnd = 0;
  while (Loc.isMacroID()) {
    // If this is the expansion of a macro argument, point the caret at the
    // use of the argument in the definition of the macro, not the expansion.
    if (SM.isMacroArgExpansion(Loc))
      LocationStack.push_back(SM.getImmediateExpansionRange(Loc).first);
    else
      LocationStack.push_back(Loc);

    if (checkRangesForMacroArgExpansion(Loc, Ranges, SM))
      IgnoredEnd = LocationStack.size();

    Loc = SM.getImmediateMacroCallerLoc(Loc);

    // Once the location no longer points into a macro, try stepping through
    // the last found location.  This sometimes produces additional useful
    // backtraces.
    if (Loc.isFileID())
      Loc = SM.getImmediateMacroCallerLoc(LocationStack.back());
    assert(Loc.isValid() && "must have a valid source location here");
  }

  LocationStack.erase(LocationStack.begin(),
                      LocationStack.begin() + IgnoredEnd);

  unsigned MacroDepth = LocationStack.size();
  unsigned MacroLimit = DiagOpts->MacroBacktraceLimit;
  if (MacroDepth <= MacroLimit || MacroLimit == 0) {
    for (auto I = LocationStack.rbegin(), E = LocationStack.rend();
         I != E; ++I)
      emitSingleMacroExpansion(*I, Level, Ranges, SM);
    return;
  }

  unsigned MacroStartMessages = MacroLimit / 2;
  unsigned MacroEndMessages = MacroLimit / 2 + MacroLimit % 2;

  for (auto I = LocationStack.rbegin(),
            E = LocationStack.rbegin() + MacroStartMessages;
       I != E; ++I)
    emitSingleMacroExpansion(*I, Level, Ranges, SM);

  SmallString<200> MessageStorage;
  llvm::raw_svector_ostream Message(MessageStorage);
  Message << "(skipping " << (MacroDepth - MacroLimit)
          << " expansions in backtrace; use -fmacro-backtrace-limit=0 to "
             "see all)";
  emitBasicNote(Message.str());

  for (auto I = LocationStack.rend() - MacroEndMessages,
            E = LocationStack.rend();
       I != E; ++I)
    emitSingleMacroExpansion(*I, Level, Ranges, SM);
}
Esempio n. 8
0
static bool
inlineCallsImpl(CallGraphSCC &SCC, CallGraph &CG,
                std::function<AssumptionCache &(Function &)> GetAssumptionCache,
                ProfileSummaryInfo *PSI, TargetLibraryInfo &TLI,
                bool InsertLifetime,
                function_ref<InlineCost(CallSite CS)> GetInlineCost,
                function_ref<AAResults &(Function &)> AARGetter,
                ImportedFunctionsInliningStatistics &ImportedFunctionsStats) {
  SmallPtrSet<Function *, 8> SCCFunctions;
  DEBUG(dbgs() << "Inliner visiting SCC:");
  for (CallGraphNode *Node : SCC) {
    Function *F = Node->getFunction();
    if (F)
      SCCFunctions.insert(F);
    DEBUG(dbgs() << " " << (F ? F->getName() : "INDIRECTNODE"));
  }

  // Scan through and identify all call sites ahead of time so that we only
  // inline call sites in the original functions, not call sites that result
  // from inlining other functions.
  SmallVector<std::pair<CallSite, int>, 16> CallSites;

  // When inlining a callee produces new call sites, we want to keep track of
  // the fact that they were inlined from the callee.  This allows us to avoid
  // infinite inlining in some obscure cases.  To represent this, we use an
  // index into the InlineHistory vector.
  SmallVector<std::pair<Function *, int>, 8> InlineHistory;

  for (CallGraphNode *Node : SCC) {
    Function *F = Node->getFunction();
    if (!F || F->isDeclaration())
      continue;

    OptimizationRemarkEmitter ORE(F);
    for (BasicBlock &BB : *F)
      for (Instruction &I : BB) {
        CallSite CS(cast<Value>(&I));
        // If this isn't a call, or it is a call to an intrinsic, it can
        // never be inlined.
        if (!CS || isa<IntrinsicInst>(I))
          continue;

        // If this is a direct call to an external function, we can never inline
        // it.  If it is an indirect call, inlining may resolve it to be a
        // direct call, so we keep it.
        if (Function *Callee = CS.getCalledFunction())
          if (Callee->isDeclaration()) {
            using namespace ore;
            ORE.emit(OptimizationRemarkMissed(DEBUG_TYPE, "NoDefinition", &I)
                     << NV("Callee", Callee) << " will not be inlined into "
                     << NV("Caller", CS.getCaller())
                     << " because its definition is unavailable"
                     << setIsVerbose());
            continue;
          }

        CallSites.push_back(std::make_pair(CS, -1));
      }
  }

  DEBUG(dbgs() << ": " << CallSites.size() << " call sites.\n");

  // If there are no calls in this function, exit early.
  if (CallSites.empty())
    return false;

  // Now that we have all of the call sites, move the ones to functions in the
  // current SCC to the end of the list.
  unsigned FirstCallInSCC = CallSites.size();
  for (unsigned i = 0; i < FirstCallInSCC; ++i)
    if (Function *F = CallSites[i].first.getCalledFunction())
      if (SCCFunctions.count(F))
        std::swap(CallSites[i--], CallSites[--FirstCallInSCC]);

  InlinedArrayAllocasTy InlinedArrayAllocas;
  InlineFunctionInfo InlineInfo(&CG, &GetAssumptionCache);

  // Now that we have all of the call sites, loop over them and inline them if
  // it looks profitable to do so.
  bool Changed = false;
  bool LocalChange;
  do {
    LocalChange = false;
    // Iterate over the outer loop because inlining functions can cause indirect
    // calls to become direct calls.
    // CallSites may be modified inside so ranged for loop can not be used.
    for (unsigned CSi = 0; CSi != CallSites.size(); ++CSi) {
      CallSite CS = CallSites[CSi].first;

      Function *Caller = CS.getCaller();
      Function *Callee = CS.getCalledFunction();

      // If this call site is dead and it is to a readonly function, we should
      // just delete the call instead of trying to inline it, regardless of
      // size.  This happens because IPSCCP propagates the result out of the
      // call and then we're left with the dead call.
      if (isInstructionTriviallyDead(CS.getInstruction(), &TLI)) {
        DEBUG(dbgs() << "    -> Deleting dead call: " << *CS.getInstruction()
                     << "\n");
        // Update the call graph by deleting the edge from Callee to Caller.
        CG[Caller]->removeCallEdgeFor(CS);
        CS.getInstruction()->eraseFromParent();
        ++NumCallsDeleted;
      } else {
        // We can only inline direct calls to non-declarations.
        if (!Callee || Callee->isDeclaration())
          continue;

        // If this call site was obtained by inlining another function, verify
        // that the include path for the function did not include the callee
        // itself.  If so, we'd be recursively inlining the same function,
        // which would provide the same callsites, which would cause us to
        // infinitely inline.
        int InlineHistoryID = CallSites[CSi].second;
        if (InlineHistoryID != -1 &&
            InlineHistoryIncludes(Callee, InlineHistoryID, InlineHistory))
          continue;

        // Get DebugLoc to report. CS will be invalid after Inliner.
        DebugLoc DLoc = CS.getInstruction()->getDebugLoc();
        BasicBlock *Block = CS.getParent();
        // FIXME for new PM: because of the old PM we currently generate ORE and
        // in turn BFI on demand.  With the new PM, the ORE dependency should
        // just become a regular analysis dependency.
        OptimizationRemarkEmitter ORE(Caller);

        // If the policy determines that we should inline this function,
        // try to do so.
        using namespace ore;
        if (!shouldInline(CS, GetInlineCost, ORE)) {
          ORE.emit(
              OptimizationRemarkMissed(DEBUG_TYPE, "NotInlined", DLoc, Block)
              << NV("Callee", Callee) << " will not be inlined into "
              << NV("Caller", Caller));
          continue;
        }

        // Attempt to inline the function.
        if (!InlineCallIfPossible(CS, InlineInfo, InlinedArrayAllocas,
                                  InlineHistoryID, InsertLifetime, AARGetter,
                                  ImportedFunctionsStats)) {
          ORE.emit(
              OptimizationRemarkMissed(DEBUG_TYPE, "NotInlined", DLoc, Block)
              << NV("Callee", Callee) << " will not be inlined into "
              << NV("Caller", Caller));
          continue;
        }
        ++NumInlined;

        // Report the inline decision.
        ORE.emit(OptimizationRemark(DEBUG_TYPE, "Inlined", DLoc, Block)
                 << NV("Callee", Callee) << " inlined into "
                 << NV("Caller", Caller));

        // If inlining this function gave us any new call sites, throw them
        // onto our worklist to process.  They are useful inline candidates.
        if (!InlineInfo.InlinedCalls.empty()) {
          // Create a new inline history entry for this, so that we remember
          // that these new callsites came about due to inlining Callee.
          int NewHistoryID = InlineHistory.size();
          InlineHistory.push_back(std::make_pair(Callee, InlineHistoryID));

          for (Value *Ptr : InlineInfo.InlinedCalls)
            CallSites.push_back(std::make_pair(CallSite(Ptr), NewHistoryID));
        }
      }

      // If we inlined or deleted the last possible call site to the function,
      // delete the function body now.
      if (Callee && Callee->use_empty() && Callee->hasLocalLinkage() &&
          // TODO: Can remove if in SCC now.
          !SCCFunctions.count(Callee) &&

          // The function may be apparently dead, but if there are indirect
          // callgraph references to the node, we cannot delete it yet, this
          // could invalidate the CGSCC iterator.
          CG[Callee]->getNumReferences() == 0) {
        DEBUG(dbgs() << "    -> Deleting dead function: " << Callee->getName()
                     << "\n");
        CallGraphNode *CalleeNode = CG[Callee];

        // Remove any call graph edges from the callee to its callees.
        CalleeNode->removeAllCalledFunctions();

        // Removing the node for callee from the call graph and delete it.
        delete CG.removeFunctionFromModule(CalleeNode);
        ++NumDeleted;
      }

      // Remove this call site from the list.  If possible, use
      // swap/pop_back for efficiency, but do not use it if doing so would
      // move a call site to a function in this SCC before the
      // 'FirstCallInSCC' barrier.
      if (SCC.isSingular()) {
        CallSites[CSi] = CallSites.back();
        CallSites.pop_back();
      } else {
        CallSites.erase(CallSites.begin() + CSi);
      }
      --CSi;

      Changed = true;
      LocalChange = true;
    }
  } while (LocalChange);

  return Changed;
}
Esempio n. 9
0
// Propagate existing explicit probabilities from either profile data or
// 'expect' intrinsic processing.
bool BranchProbabilityInfo::calcMetadataWeights(const BasicBlock *BB) {
  const TerminatorInst *TI = BB->getTerminator();
  if (TI->getNumSuccessors() == 1)
    return false;
  if (!isa<BranchInst>(TI) && !isa<SwitchInst>(TI))
    return false;

  MDNode *WeightsNode = TI->getMetadata(LLVMContext::MD_prof);
  if (!WeightsNode)
    return false;

  // Check that the number of successors is manageable.
  assert(TI->getNumSuccessors() < UINT32_MAX && "Too many successors");

  // Ensure there are weights for all of the successors. Note that the first
  // operand to the metadata node is a name, not a weight.
  if (WeightsNode->getNumOperands() != TI->getNumSuccessors() + 1)
    return false;

  // Build up the final weights that will be used in a temporary buffer.
  // Compute the sum of all weights to later decide whether they need to
  // be scaled to fit in 32 bits.
  uint64_t WeightSum = 0;
  SmallVector<uint32_t, 2> Weights;
  Weights.reserve(TI->getNumSuccessors());
  for (unsigned i = 1, e = WeightsNode->getNumOperands(); i != e; ++i) {
    ConstantInt *Weight =
        mdconst::dyn_extract<ConstantInt>(WeightsNode->getOperand(i));
    if (!Weight)
      return false;
    assert(Weight->getValue().getActiveBits() <= 32 &&
           "Too many bits for uint32_t");
    Weights.push_back(Weight->getZExtValue());
    WeightSum += Weights.back();
  }
  assert(Weights.size() == TI->getNumSuccessors() && "Checked above");

  // If the sum of weights does not fit in 32 bits, scale every weight down
  // accordingly.
  uint64_t ScalingFactor =
      (WeightSum > UINT32_MAX) ? WeightSum / UINT32_MAX + 1 : 1;

  WeightSum = 0;
  for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) {
    Weights[i] /= ScalingFactor;
    WeightSum += Weights[i];
  }

  if (WeightSum == 0) {
    for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
      setEdgeProbability(BB, i, {1, e});
  } else {
    for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
      setEdgeProbability(BB, i, {Weights[i], static_cast<uint32_t>(WeightSum)});
  }

  assert(WeightSum <= UINT32_MAX &&
         "Expected weights to scale down to 32 bits");

  return true;
}
Esempio n. 10
0
void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
                                     const ObjCAtTryStmt &S,
                                     llvm::Constant *beginCatchFn,
                                     llvm::Constant *endCatchFn,
                                     llvm::Constant *exceptionRethrowFn) {
  // Jump destination for falling out of catch bodies.
  CodeGenFunction::JumpDest Cont;
  if (S.getNumCatchStmts())
    Cont = CGF.getJumpDestInCurrentScope("eh.cont");

  CodeGenFunction::FinallyInfo FinallyInfo;
  if (const ObjCAtFinallyStmt *Finally = S.getFinallyStmt())
    FinallyInfo.enter(CGF, Finally->getFinallyBody(),
                      beginCatchFn, endCatchFn, exceptionRethrowFn);

  SmallVector<CatchHandler, 8> Handlers;

  // Enter the catch, if there is one.
  if (S.getNumCatchStmts()) {
    for (unsigned I = 0, N = S.getNumCatchStmts(); I != N; ++I) {
      const ObjCAtCatchStmt *CatchStmt = S.getCatchStmt(I);
      const VarDecl *CatchDecl = CatchStmt->getCatchParamDecl();

      Handlers.push_back(CatchHandler());
      CatchHandler &Handler = Handlers.back();
      Handler.Variable = CatchDecl;
      Handler.Body = CatchStmt->getCatchBody();
      Handler.Block = CGF.createBasicBlock("catch");

      // @catch(...) always matches.
      if (!CatchDecl) {
        Handler.TypeInfo = 0; // catch-all
        // Don't consider any other catches.
        break;
      }

      Handler.TypeInfo = GetEHType(CatchDecl->getType());
    }

    EHCatchScope *Catch = CGF.EHStack.pushCatch(Handlers.size());
    for (unsigned I = 0, E = Handlers.size(); I != E; ++I)
      Catch->setHandler(I, Handlers[I].TypeInfo, Handlers[I].Block);
  }
  
  // Emit the try body.
  CGF.EmitStmt(S.getTryBody());

  // Leave the try.
  if (S.getNumCatchStmts())
    CGF.popCatchScope();

  // Remember where we were.
  CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP();

  // Emit the handlers.
  for (unsigned I = 0, E = Handlers.size(); I != E; ++I) {
    CatchHandler &Handler = Handlers[I];

    CGF.EmitBlock(Handler.Block);
    llvm::Value *RawExn = CGF.getExceptionFromSlot();

    // Enter the catch.
    llvm::Value *Exn = RawExn;
    if (beginCatchFn) {
      Exn = CGF.Builder.CreateCall(beginCatchFn, RawExn, "exn.adjusted");
      cast<llvm::CallInst>(Exn)->setDoesNotThrow();
    }

    CodeGenFunction::LexicalScope cleanups(CGF, Handler.Body->getSourceRange());

    if (endCatchFn) {
      // Add a cleanup to leave the catch.
      bool EndCatchMightThrow = (Handler.Variable == 0);

      CGF.EHStack.pushCleanup<CallObjCEndCatch>(NormalAndEHCleanup,
                                                EndCatchMightThrow,
                                                endCatchFn);
    }

    // Bind the catch parameter if it exists.
    if (const VarDecl *CatchParam = Handler.Variable) {
      llvm::Type *CatchType = CGF.ConvertType(CatchParam->getType());
      llvm::Value *CastExn = CGF.Builder.CreateBitCast(Exn, CatchType);

      CGF.EmitAutoVarDecl(*CatchParam);

      llvm::Value *CatchParamAddr = CGF.GetAddrOfLocalVar(CatchParam);

      switch (CatchParam->getType().getQualifiers().getObjCLifetime()) {
      case Qualifiers::OCL_Strong:
        CastExn = CGF.EmitARCRetainNonBlock(CastExn);
        // fallthrough

      case Qualifiers::OCL_None:
      case Qualifiers::OCL_ExplicitNone:
      case Qualifiers::OCL_Autoreleasing:
        CGF.Builder.CreateStore(CastExn, CatchParamAddr);
        break;

      case Qualifiers::OCL_Weak:
        CGF.EmitARCInitWeak(CatchParamAddr, CastExn);
        break;
      }
    }

    CGF.ObjCEHValueStack.push_back(Exn);
    CGF.EmitStmt(Handler.Body);
    CGF.ObjCEHValueStack.pop_back();

    // Leave any cleanups associated with the catch.
    cleanups.ForceCleanup();

    CGF.EmitBranchThroughCleanup(Cont);
  }  

  // Go back to the try-statement fallthrough.
  CGF.Builder.restoreIP(SavedIP);

  // Pop out of the finally.
  if (S.getFinallyStmt())
    FinallyInfo.exit(CGF);

  if (Cont.isValid())
    CGF.EmitBlock(Cont.getBlock());
}
Esempio n. 11
0
/// shrinkToUses - After removing some uses of a register, shrink its live
/// range to just the remaining uses. This method does not compute reaching
/// defs for new uses, and it doesn't remove dead defs.
bool LiveIntervals::shrinkToUses(LiveInterval *li,
                                 SmallVectorImpl<MachineInstr*> *dead) {
  DEBUG(dbgs() << "Shrink: " << *li << '\n');
  assert(TargetRegisterInfo::isVirtualRegister(li->reg)
         && "Can only shrink virtual registers");
  // Find all the values used, including PHI kills.
  SmallVector<std::pair<SlotIndex, VNInfo*>, 16> WorkList;

  // Blocks that have already been added to WorkList as live-out.
  SmallPtrSet<MachineBasicBlock*, 16> LiveOut;

  // Visit all instructions reading li->reg.
  for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(li->reg);
       MachineInstr *UseMI = I.skipInstruction();) {
    if (UseMI->isDebugValue() || !UseMI->readsVirtualRegister(li->reg))
      continue;
    SlotIndex Idx = getInstructionIndex(UseMI).getRegSlot();
    LiveQueryResult LRQ = li->Query(Idx);
    VNInfo *VNI = LRQ.valueIn();
    if (!VNI) {
      // This shouldn't happen: readsVirtualRegister returns true, but there is
      // no live value. It is likely caused by a target getting <undef> flags
      // wrong.
      DEBUG(dbgs() << Idx << '\t' << *UseMI
                   << "Warning: Instr claims to read non-existent value in "
                    << *li << '\n');
      continue;
    }
    // Special case: An early-clobber tied operand reads and writes the
    // register one slot early.
    if (VNInfo *DefVNI = LRQ.valueDefined())
      Idx = DefVNI->def;

    WorkList.push_back(std::make_pair(Idx, VNI));
  }

  // Create new live ranges with only minimal live segments per def.
  LiveRange NewLR;
  for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
       I != E; ++I) {
    VNInfo *VNI = *I;
    if (VNI->isUnused())
      continue;
    NewLR.addSegment(LiveRange::Segment(VNI->def, VNI->def.getDeadSlot(), VNI));
  }

  // Keep track of the PHIs that are in use.
  SmallPtrSet<VNInfo*, 8> UsedPHIs;

  // Extend intervals to reach all uses in WorkList.
  while (!WorkList.empty()) {
    SlotIndex Idx = WorkList.back().first;
    VNInfo *VNI = WorkList.back().second;
    WorkList.pop_back();
    const MachineBasicBlock *MBB = getMBBFromIndex(Idx.getPrevSlot());
    SlotIndex BlockStart = getMBBStartIdx(MBB);

    // Extend the live range for VNI to be live at Idx.
    if (VNInfo *ExtVNI = NewLR.extendInBlock(BlockStart, Idx)) {
      (void)ExtVNI;
      assert(ExtVNI == VNI && "Unexpected existing value number");
      // Is this a PHIDef we haven't seen before?
      if (!VNI->isPHIDef() || VNI->def != BlockStart || !UsedPHIs.insert(VNI))
        continue;
      // The PHI is live, make sure the predecessors are live-out.
      for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
           PE = MBB->pred_end(); PI != PE; ++PI) {
        if (!LiveOut.insert(*PI))
          continue;
        SlotIndex Stop = getMBBEndIdx(*PI);
        // A predecessor is not required to have a live-out value for a PHI.
        if (VNInfo *PVNI = li->getVNInfoBefore(Stop))
          WorkList.push_back(std::make_pair(Stop, PVNI));
      }
      continue;
    }

    // VNI is live-in to MBB.
    DEBUG(dbgs() << " live-in at " << BlockStart << '\n');
    NewLR.addSegment(LiveRange::Segment(BlockStart, Idx, VNI));

    // Make sure VNI is live-out from the predecessors.
    for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
         PE = MBB->pred_end(); PI != PE; ++PI) {
      if (!LiveOut.insert(*PI))
        continue;
      SlotIndex Stop = getMBBEndIdx(*PI);
      assert(li->getVNInfoBefore(Stop) == VNI &&
             "Wrong value out of predecessor");
      WorkList.push_back(std::make_pair(Stop, VNI));
    }
  }

  // Handle dead values.
  bool CanSeparate = false;
  for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
       I != E; ++I) {
    VNInfo *VNI = *I;
    if (VNI->isUnused())
      continue;
    LiveRange::iterator LRI = NewLR.FindSegmentContaining(VNI->def);
    assert(LRI != NewLR.end() && "Missing segment for PHI");
    if (LRI->end != VNI->def.getDeadSlot())
      continue;
    if (VNI->isPHIDef()) {
      // This is a dead PHI. Remove it.
      VNI->markUnused();
      NewLR.removeSegment(LRI->start, LRI->end);
      DEBUG(dbgs() << "Dead PHI at " << VNI->def << " may separate interval\n");
      CanSeparate = true;
    } else {
      // This is a dead def. Make sure the instruction knows.
      MachineInstr *MI = getInstructionFromIndex(VNI->def);
      assert(MI && "No instruction defining live value");
      MI->addRegisterDead(li->reg, TRI);
      if (dead && MI->allDefsAreDead()) {
        DEBUG(dbgs() << "All defs dead: " << VNI->def << '\t' << *MI);
        dead->push_back(MI);
      }
    }
  }

  // Move the trimmed segments back.
  li->segments.swap(NewLR.segments);
  DEBUG(dbgs() << "Shrunk: " << *li << '\n');
  return CanSeparate;
}
Esempio n. 12
0
void AArch64FrameLowering::processFunctionBeforeCalleeSavedScan(
    MachineFunction &MF, RegScavenger *RS) const {
  const AArch64RegisterInfo *RegInfo = static_cast<const AArch64RegisterInfo *>(
      MF.getSubtarget().getRegisterInfo());
  AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
  MachineRegisterInfo *MRI = &MF.getRegInfo();
  SmallVector<unsigned, 4> UnspilledCSGPRs;
  SmallVector<unsigned, 4> UnspilledCSFPRs;

  // The frame record needs to be created by saving the appropriate registers
  if (hasFP(MF)) {
    MRI->setPhysRegUsed(AArch64::FP);
    MRI->setPhysRegUsed(AArch64::LR);
  }

  // Spill the BasePtr if it's used. Do this first thing so that the
  // getCalleeSavedRegs() below will get the right answer.
  if (RegInfo->hasBasePointer(MF))
    MRI->setPhysRegUsed(RegInfo->getBaseRegister());

  // If any callee-saved registers are used, the frame cannot be eliminated.
  unsigned NumGPRSpilled = 0;
  unsigned NumFPRSpilled = 0;
  bool ExtraCSSpill = false;
  bool CanEliminateFrame = true;
  DEBUG(dbgs() << "*** processFunctionBeforeCalleeSavedScan\nUsed CSRs:");
  const MCPhysReg *CSRegs = RegInfo->getCalleeSavedRegs(&MF);

  // Check pairs of consecutive callee-saved registers.
  for (unsigned i = 0; CSRegs[i]; i += 2) {
    assert(CSRegs[i + 1] && "Odd number of callee-saved registers!");

    const unsigned OddReg = CSRegs[i];
    const unsigned EvenReg = CSRegs[i + 1];
    assert((AArch64::GPR64RegClass.contains(OddReg) &&
            AArch64::GPR64RegClass.contains(EvenReg)) ^
               (AArch64::FPR64RegClass.contains(OddReg) &&
                AArch64::FPR64RegClass.contains(EvenReg)) &&
           "Register class mismatch!");

    const bool OddRegUsed = MRI->isPhysRegUsed(OddReg);
    const bool EvenRegUsed = MRI->isPhysRegUsed(EvenReg);

    // Early exit if none of the registers in the register pair is actually
    // used.
    if (!OddRegUsed && !EvenRegUsed) {
      if (AArch64::GPR64RegClass.contains(OddReg)) {
        UnspilledCSGPRs.push_back(OddReg);
        UnspilledCSGPRs.push_back(EvenReg);
      } else {
        UnspilledCSFPRs.push_back(OddReg);
        UnspilledCSFPRs.push_back(EvenReg);
      }
      continue;
    }

    unsigned Reg = AArch64::NoRegister;
    // If only one of the registers of the register pair is used, make sure to
    // mark the other one as used as well.
    if (OddRegUsed ^ EvenRegUsed) {
      // Find out which register is the additional spill.
      Reg = OddRegUsed ? EvenReg : OddReg;
      MRI->setPhysRegUsed(Reg);
    }

    DEBUG(dbgs() << ' ' << PrintReg(OddReg, RegInfo));
    DEBUG(dbgs() << ' ' << PrintReg(EvenReg, RegInfo));

    assert(((OddReg == AArch64::LR && EvenReg == AArch64::FP) ||
            (RegInfo->getEncodingValue(OddReg) + 1 ==
             RegInfo->getEncodingValue(EvenReg))) &&
           "Register pair of non-adjacent registers!");
    if (AArch64::GPR64RegClass.contains(OddReg)) {
      NumGPRSpilled += 2;
      // If it's not a reserved register, we can use it in lieu of an
      // emergency spill slot for the register scavenger.
      // FIXME: It would be better to instead keep looking and choose another
      // unspilled register that isn't reserved, if there is one.
      if (Reg != AArch64::NoRegister && !RegInfo->isReservedReg(MF, Reg))
        ExtraCSSpill = true;
    } else
      NumFPRSpilled += 2;

    CanEliminateFrame = false;
  }

  // FIXME: Set BigStack if any stack slot references may be out of range.
  // For now, just conservatively guestimate based on unscaled indexing
  // range. We'll end up allocating an unnecessary spill slot a lot, but
  // realistically that's not a big deal at this stage of the game.
  // The CSR spill slots have not been allocated yet, so estimateStackSize
  // won't include them.
  MachineFrameInfo *MFI = MF.getFrameInfo();
  unsigned CFSize = estimateStackSize(MF) + 8 * (NumGPRSpilled + NumFPRSpilled);
  DEBUG(dbgs() << "Estimated stack frame size: " << CFSize << " bytes.\n");
  bool BigStack = (CFSize >= 256);
  if (BigStack || !CanEliminateFrame || RegInfo->cannotEliminateFrame(MF))
    AFI->setHasStackFrame(true);

  // Estimate if we might need to scavenge a register at some point in order
  // to materialize a stack offset. If so, either spill one additional
  // callee-saved register or reserve a special spill slot to facilitate
  // register scavenging. If we already spilled an extra callee-saved register
  // above to keep the number of spills even, we don't need to do anything else
  // here.
  if (BigStack && !ExtraCSSpill) {

    // If we're adding a register to spill here, we have to add two of them
    // to keep the number of regs to spill even.
    assert(((UnspilledCSGPRs.size() & 1) == 0) && "Odd number of registers!");
    unsigned Count = 0;
    while (!UnspilledCSGPRs.empty() && Count < 2) {
      unsigned Reg = UnspilledCSGPRs.back();
      UnspilledCSGPRs.pop_back();
      DEBUG(dbgs() << "Spilling " << PrintReg(Reg, RegInfo)
                   << " to get a scratch register.\n");
      MRI->setPhysRegUsed(Reg);
      ExtraCSSpill = true;
      ++Count;
    }

    // If we didn't find an extra callee-saved register to spill, create
    // an emergency spill slot.
    if (!ExtraCSSpill) {
      const TargetRegisterClass *RC = &AArch64::GPR64RegClass;
      int FI = MFI->CreateStackObject(RC->getSize(), RC->getAlignment(), false);
      RS->addScavengingFrameIndex(FI);
      DEBUG(dbgs() << "No available CS registers, allocated fi#" << FI
                   << " as the emergency spill slot.\n");
    }
  }
}
Esempio n. 13
0
/// SplitCriticalEdge - If this edge is a critical edge, insert a new node to
/// split the critical edge.  This will update DominatorTree and
/// DominatorFrontier information if it is available, thus calling this pass
/// will not invalidate either of them. This returns the new block if the edge
/// was split, null otherwise.
///
/// If MergeIdenticalEdges is true (not the default), *all* edges from TI to the
/// specified successor will be merged into the same critical edge block.  
/// This is most commonly interesting with switch instructions, which may 
/// have many edges to any one destination.  This ensures that all edges to that
/// dest go to one block instead of each going to a different block, but isn't 
/// the standard definition of a "critical edge".
///
/// It is invalid to call this function on a critical edge that starts at an
/// IndirectBrInst.  Splitting these edges will almost always create an invalid
/// program because the address of the new block won't be the one that is jumped
/// to.
///
BasicBlock *llvm::SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum,
                                    Pass *P, bool MergeIdenticalEdges) {
  if (!isCriticalEdge(TI, SuccNum, MergeIdenticalEdges)) return 0;
  
  assert(!isa<IndirectBrInst>(TI) &&
         "Cannot split critical edge from IndirectBrInst");
  
  BasicBlock *TIBB = TI->getParent();
  BasicBlock *DestBB = TI->getSuccessor(SuccNum);

  // Create a new basic block, linking it into the CFG.
  BasicBlock *NewBB = BasicBlock::Create(TI->getContext(),
                      TIBB->getName() + "." + DestBB->getName() + "_crit_edge");
  // Create our unconditional branch.
  BranchInst::Create(DestBB, NewBB);

  // Branch to the new block, breaking the edge.
  TI->setSuccessor(SuccNum, NewBB);

  // Insert the block into the function... right after the block TI lives in.
  Function &F = *TIBB->getParent();
  Function::iterator FBBI = TIBB;
  F.getBasicBlockList().insert(++FBBI, NewBB);
  
  // If there are any PHI nodes in DestBB, we need to update them so that they
  // merge incoming values from NewBB instead of from TIBB.
  if (PHINode *APHI = dyn_cast<PHINode>(DestBB->begin())) {
    // This conceptually does:
    //  foreach (PHINode *PN in DestBB)
    //    PN->setIncomingBlock(PN->getIncomingBlock(TIBB), NewBB);
    // but is optimized for two cases.
    
    if (APHI->getNumIncomingValues() <= 8) {  // Small # preds case.
      unsigned BBIdx = 0;
      for (BasicBlock::iterator I = DestBB->begin(); isa<PHINode>(I); ++I) {
        // We no longer enter through TIBB, now we come in through NewBB.
        // Revector exactly one entry in the PHI node that used to come from
        // TIBB to come from NewBB.
        PHINode *PN = cast<PHINode>(I);
        
        // Reuse the previous value of BBIdx if it lines up.  In cases where we
        // have multiple phi nodes with *lots* of predecessors, this is a speed
        // win because we don't have to scan the PHI looking for TIBB.  This
        // happens because the BB list of PHI nodes are usually in the same
        // order.
        if (PN->getIncomingBlock(BBIdx) != TIBB)
          BBIdx = PN->getBasicBlockIndex(TIBB);
        PN->setIncomingBlock(BBIdx, NewBB);
      }
    } else {
      // However, the foreach loop is slow for blocks with lots of predecessors
      // because PHINode::getIncomingBlock is O(n) in # preds.  Instead, walk
      // the user list of TIBB to find the PHI nodes.
      SmallPtrSet<PHINode*, 16> UpdatedPHIs;
    
      for (Value::use_iterator UI = TIBB->use_begin(), E = TIBB->use_end();
           UI != E; ) {
        Value::use_iterator Use = UI++;
        if (PHINode *PN = dyn_cast<PHINode>(*Use)) {
          // Remove one entry from each PHI.
          if (PN->getParent() == DestBB && UpdatedPHIs.insert(PN))
            PN->setOperand(Use.getOperandNo(), NewBB);
        }
      }
    }
  }
   
  // If there are any other edges from TIBB to DestBB, update those to go
  // through the split block, making those edges non-critical as well (and
  // reducing the number of phi entries in the DestBB if relevant).
  if (MergeIdenticalEdges) {
    for (unsigned i = SuccNum+1, e = TI->getNumSuccessors(); i != e; ++i) {
      if (TI->getSuccessor(i) != DestBB) continue;
      
      // Remove an entry for TIBB from DestBB phi nodes.
      DestBB->removePredecessor(TIBB);
      
      // We found another edge to DestBB, go to NewBB instead.
      TI->setSuccessor(i, NewBB);
    }
  }
  
  

  // If we don't have a pass object, we can't update anything...
  if (P == 0) return NewBB;
  
  DominatorTree *DT = P->getAnalysisIfAvailable<DominatorTree>();
  DominanceFrontier *DF = P->getAnalysisIfAvailable<DominanceFrontier>();
  LoopInfo *LI = P->getAnalysisIfAvailable<LoopInfo>();
  ProfileInfo *PI = P->getAnalysisIfAvailable<ProfileInfo>();
  
  // If we have nothing to update, just return.
  if (DT == 0 && DF == 0 && LI == 0 && PI == 0)
    return NewBB;

  // Now update analysis information.  Since the only predecessor of NewBB is
  // the TIBB, TIBB clearly dominates NewBB.  TIBB usually doesn't dominate
  // anything, as there are other successors of DestBB.  However, if all other
  // predecessors of DestBB are already dominated by DestBB (e.g. DestBB is a
  // loop header) then NewBB dominates DestBB.
  SmallVector<BasicBlock*, 8> OtherPreds;

  // If there is a PHI in the block, loop over predecessors with it, which is
  // faster than iterating pred_begin/end.
  if (PHINode *PN = dyn_cast<PHINode>(DestBB->begin())) {
    for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
      if (PN->getIncomingBlock(i) != NewBB)
        OtherPreds.push_back(PN->getIncomingBlock(i));
  } else {
    for (pred_iterator I = pred_begin(DestBB), E = pred_end(DestBB);
         I != E; ++I) {
      BasicBlock *P = *I;
      if (P != NewBB)
          OtherPreds.push_back(P);
    }
  }

  bool NewBBDominatesDestBB = true;
  
  // Should we update DominatorTree information?
  if (DT) {
    DomTreeNode *TINode = DT->getNode(TIBB);

    // The new block is not the immediate dominator for any other nodes, but
    // TINode is the immediate dominator for the new node.
    //
    if (TINode) {       // Don't break unreachable code!
      DomTreeNode *NewBBNode = DT->addNewBlock(NewBB, TIBB);
      DomTreeNode *DestBBNode = 0;
     
      // If NewBBDominatesDestBB hasn't been computed yet, do so with DT.
      if (!OtherPreds.empty()) {
        DestBBNode = DT->getNode(DestBB);
        while (!OtherPreds.empty() && NewBBDominatesDestBB) {
          if (DomTreeNode *OPNode = DT->getNode(OtherPreds.back()))
            NewBBDominatesDestBB = DT->dominates(DestBBNode, OPNode);
          OtherPreds.pop_back();
        }
        OtherPreds.clear();
      }
      
      // If NewBBDominatesDestBB, then NewBB dominates DestBB, otherwise it
      // doesn't dominate anything.
      if (NewBBDominatesDestBB) {
        if (!DestBBNode) DestBBNode = DT->getNode(DestBB);
        DT->changeImmediateDominator(DestBBNode, NewBBNode);
      }
    }
  }

  // Should we update DominanceFrontier information?
  if (DF) {
    // If NewBBDominatesDestBB hasn't been computed yet, do so with DF.
    if (!OtherPreds.empty()) {
      // FIXME: IMPLEMENT THIS!
      llvm_unreachable("Requiring domfrontiers but not idom/domtree/domset."
                       " not implemented yet!");
    }
    
    // Since the new block is dominated by its only predecessor TIBB,
    // it cannot be in any block's dominance frontier.  If NewBB dominates
    // DestBB, its dominance frontier is the same as DestBB's, otherwise it is
    // just {DestBB}.
    DominanceFrontier::DomSetType NewDFSet;
    if (NewBBDominatesDestBB) {
      DominanceFrontier::iterator I = DF->find(DestBB);
      if (I != DF->end()) {
        DF->addBasicBlock(NewBB, I->second);
        
        if (I->second.count(DestBB)) {
          // However NewBB's frontier does not include DestBB.
          DominanceFrontier::iterator NF = DF->find(NewBB);
          DF->removeFromFrontier(NF, DestBB);
        }
      }
      else
        DF->addBasicBlock(NewBB, DominanceFrontier::DomSetType());
    } else {
      DominanceFrontier::DomSetType NewDFSet;
      NewDFSet.insert(DestBB);
      DF->addBasicBlock(NewBB, NewDFSet);
    }
  }
  
  // Update LoopInfo if it is around.
  if (LI) {
    if (Loop *TIL = LI->getLoopFor(TIBB)) {
      // If one or the other blocks were not in a loop, the new block is not
      // either, and thus LI doesn't need to be updated.
      if (Loop *DestLoop = LI->getLoopFor(DestBB)) {
        if (TIL == DestLoop) {
          // Both in the same loop, the NewBB joins loop.
          DestLoop->addBasicBlockToLoop(NewBB, LI->getBase());
        } else if (TIL->contains(DestLoop)) {
          // Edge from an outer loop to an inner loop.  Add to the outer loop.
          TIL->addBasicBlockToLoop(NewBB, LI->getBase());
        } else if (DestLoop->contains(TIL)) {
          // Edge from an inner loop to an outer loop.  Add to the outer loop.
          DestLoop->addBasicBlockToLoop(NewBB, LI->getBase());
        } else {
          // Edge from two loops with no containment relation.  Because these
          // are natural loops, we know that the destination block must be the
          // header of its loop (adding a branch into a loop elsewhere would
          // create an irreducible loop).
          assert(DestLoop->getHeader() == DestBB &&
                 "Should not create irreducible loops!");
          if (Loop *P = DestLoop->getParentLoop())
            P->addBasicBlockToLoop(NewBB, LI->getBase());
        }
      }
      // If TIBB is in a loop and DestBB is outside of that loop, split the
      // other exit blocks of the loop that also have predecessors outside
      // the loop, to maintain a LoopSimplify guarantee.
      if (!TIL->contains(DestBB) &&
          P->mustPreserveAnalysisID(LoopSimplifyID)) {
        assert(!TIL->contains(NewBB) &&
               "Split point for loop exit is contained in loop!");

        // Update LCSSA form in the newly created exit block.
        if (P->mustPreserveAnalysisID(LCSSAID)) {
          SmallVector<BasicBlock *, 1> OrigPred;
          OrigPred.push_back(TIBB);
          CreatePHIsForSplitLoopExit(OrigPred, NewBB, DestBB);
        }

        // For each unique exit block...
        SmallVector<BasicBlock *, 4> ExitBlocks;
        TIL->getExitBlocks(ExitBlocks);
        for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) {
          // Collect all the preds that are inside the loop, and note
          // whether there are any preds outside the loop.
          SmallVector<BasicBlock *, 4> Preds;
          bool HasPredOutsideOfLoop = false;
          BasicBlock *Exit = ExitBlocks[i];
          for (pred_iterator I = pred_begin(Exit), E = pred_end(Exit);
               I != E; ++I) {
            BasicBlock *P = *I;
            if (TIL->contains(P))
              Preds.push_back(P);
            else
              HasPredOutsideOfLoop = true;
          }
          // If there are any preds not in the loop, we'll need to split
          // the edges. The Preds.empty() check is needed because a block
          // may appear multiple times in the list. We can't use
          // getUniqueExitBlocks above because that depends on LoopSimplify
          // form, which we're in the process of restoring!
          if (!Preds.empty() && HasPredOutsideOfLoop) {
            BasicBlock *NewExitBB =
              SplitBlockPredecessors(Exit, Preds.data(), Preds.size(),
                                     "split", P);
            if (P->mustPreserveAnalysisID(LCSSAID))
              CreatePHIsForSplitLoopExit(Preds, NewExitBB, Exit);
          }
        }
      }
      // LCSSA form was updated above for the case where LoopSimplify is
      // available, which means that all predecessors of loop exit blocks
      // are within the loop. Without LoopSimplify form, it would be
      // necessary to insert a new phi.
      assert((!P->mustPreserveAnalysisID(LCSSAID) ||
              P->mustPreserveAnalysisID(LoopSimplifyID)) &&
             "SplitCriticalEdge doesn't know how to update LCCSA form "
             "without LoopSimplify!");
    }
  }

  // Update ProfileInfo if it is around.
  if (PI)
    PI->splitEdge(TIBB, DestBB, NewBB, MergeIdenticalEdges);

  return NewBB;
}
Esempio n. 14
0
/// EmitSpecialNode - Generate machine code for a target-independent node and
/// needed dependencies.
void InstrEmitter::
EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned,
                DenseMap<SDValue, unsigned> &VRBaseMap) {
  switch (Node->getOpcode()) {
  default:
#ifndef NDEBUG
    Node->dump();
#endif
    llvm_unreachable("This target-independent node should have been selected!");
  case ISD::EntryToken:
    llvm_unreachable("EntryToken should have been excluded from the schedule!");
  case ISD::MERGE_VALUES:
  case ISD::TokenFactor: // fall thru
    break;
  case ISD::CopyToReg: {
    unsigned SrcReg;
    SDValue SrcVal = Node->getOperand(2);
    if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(SrcVal))
      SrcReg = R->getReg();
    else
      SrcReg = getVR(SrcVal, VRBaseMap);

    unsigned DestReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
    if (SrcReg == DestReg) // Coalesced away the copy? Ignore.
      break;

    BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TargetOpcode::COPY),
            DestReg).addReg(SrcReg);
    break;
  }
  case ISD::CopyFromReg: {
    unsigned SrcReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
    EmitCopyFromReg(Node, 0, IsClone, IsCloned, SrcReg, VRBaseMap);
    break;
  }
  case ISD::EH_LABEL:
  case ISD::ANNOTATION_LABEL: {
    unsigned Opc = (Node->getOpcode() == ISD::EH_LABEL)
                       ? TargetOpcode::EH_LABEL
                       : TargetOpcode::ANNOTATION_LABEL;
    MCSymbol *S = cast<LabelSDNode>(Node)->getLabel();
    BuildMI(*MBB, InsertPos, Node->getDebugLoc(),
            TII->get(Opc)).addSym(S);
    break;
  }

  case ISD::LIFETIME_START:
  case ISD::LIFETIME_END: {
    unsigned TarOp = (Node->getOpcode() == ISD::LIFETIME_START) ?
    TargetOpcode::LIFETIME_START : TargetOpcode::LIFETIME_END;

    FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Node->getOperand(1));
    BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TarOp))
    .addFrameIndex(FI->getIndex());
    break;
  }

  case ISD::INLINEASM: {
    unsigned NumOps = Node->getNumOperands();
    if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
      --NumOps;  // Ignore the glue operand.

    // Create the inline asm machine instruction.
    MachineInstrBuilder MIB = BuildMI(*MF, Node->getDebugLoc(),
                                      TII->get(TargetOpcode::INLINEASM));

    // Add the asm string as an external symbol operand.
    SDValue AsmStrV = Node->getOperand(InlineAsm::Op_AsmString);
    const char *AsmStr = cast<ExternalSymbolSDNode>(AsmStrV)->getSymbol();
    MIB.addExternalSymbol(AsmStr);

    // Add the HasSideEffect, isAlignStack, AsmDialect, MayLoad and MayStore
    // bits.
    int64_t ExtraInfo =
      cast<ConstantSDNode>(Node->getOperand(InlineAsm::Op_ExtraInfo))->
                          getZExtValue();
    MIB.addImm(ExtraInfo);

    // Remember to operand index of the group flags.
    SmallVector<unsigned, 8> GroupIdx;

    // Remember registers that are part of early-clobber defs.
    SmallVector<unsigned, 8> ECRegs;

    // Add all of the operand registers to the instruction.
    for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
      unsigned Flags =
        cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
      const unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);

      GroupIdx.push_back(MIB->getNumOperands());
      MIB.addImm(Flags);
      ++i;  // Skip the ID value.

      switch (InlineAsm::getKind(Flags)) {
      default: llvm_unreachable("Bad flags!");
        case InlineAsm::Kind_RegDef:
        for (unsigned j = 0; j != NumVals; ++j, ++i) {
          unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
          // FIXME: Add dead flags for physical and virtual registers defined.
          // For now, mark physical register defs as implicit to help fast
          // regalloc. This makes inline asm look a lot like calls.
          MIB.addReg(Reg, RegState::Define |
                  getImplRegState(TargetRegisterInfo::isPhysicalRegister(Reg)));
        }
        break;
      case InlineAsm::Kind_RegDefEarlyClobber:
      case InlineAsm::Kind_Clobber:
        for (unsigned j = 0; j != NumVals; ++j, ++i) {
          unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
          MIB.addReg(Reg, RegState::Define | RegState::EarlyClobber |
                  getImplRegState(TargetRegisterInfo::isPhysicalRegister(Reg)));
          ECRegs.push_back(Reg);
        }
        break;
      case InlineAsm::Kind_RegUse:  // Use of register.
      case InlineAsm::Kind_Imm:  // Immediate.
      case InlineAsm::Kind_Mem:  // Addressing mode.
        // The addressing mode has been selected, just add all of the
        // operands to the machine instruction.
        for (unsigned j = 0; j != NumVals; ++j, ++i)
          AddOperand(MIB, Node->getOperand(i), 0, nullptr, VRBaseMap,
                     /*IsDebug=*/false, IsClone, IsCloned);

        // Manually set isTied bits.
        if (InlineAsm::getKind(Flags) == InlineAsm::Kind_RegUse) {
          unsigned DefGroup = 0;
          if (InlineAsm::isUseOperandTiedToDef(Flags, DefGroup)) {
            unsigned DefIdx = GroupIdx[DefGroup] + 1;
            unsigned UseIdx = GroupIdx.back() + 1;
            for (unsigned j = 0; j != NumVals; ++j)
              MIB->tieOperands(DefIdx + j, UseIdx + j);
          }
        }
        break;
      }
    }

    // GCC inline assembly allows input operands to also be early-clobber
    // output operands (so long as the operand is written only after it's
    // used), but this does not match the semantics of our early-clobber flag.
    // If an early-clobber operand register is also an input operand register,
    // then remove the early-clobber flag.
    for (unsigned Reg : ECRegs) {
      if (MIB->readsRegister(Reg, TRI)) {
        MachineOperand *MO = MIB->findRegisterDefOperand(Reg, false, TRI);
        assert(MO && "No def operand for clobbered register?");
        MO->setIsEarlyClobber(false);
      }
    }

    // Get the mdnode from the asm if it exists and add it to the instruction.
    SDValue MDV = Node->getOperand(InlineAsm::Op_MDNode);
    const MDNode *MD = cast<MDNodeSDNode>(MDV)->getMD();
    if (MD)
      MIB.addMetadata(MD);

    MBB->insert(InsertPos, MIB);
    break;
  }
  }
}
Esempio n. 15
0
/// lowerAcrossUnwindEdges - Find all variables which are alive across an unwind
/// edge and spill them.
void SjLjEHPrepare::lowerAcrossUnwindEdges(Function &F,
                                           ArrayRef<InvokeInst *> Invokes) {
  // Finally, scan the code looking for instructions with bad live ranges.
  for (Function::iterator BB = F.begin(), BBE = F.end(); BB != BBE; ++BB) {
    for (BasicBlock::iterator II = BB->begin(), IIE = BB->end(); II != IIE;
         ++II) {
      // Ignore obvious cases we don't have to handle. In particular, most
      // instructions either have no uses or only have a single use inside the
      // current block. Ignore them quickly.
      Instruction *Inst = II;
      if (Inst->use_empty())
        continue;
      if (Inst->hasOneUse() &&
          cast<Instruction>(Inst->use_back())->getParent() == BB &&
          !isa<PHINode>(Inst->use_back()))
        continue;

      // If this is an alloca in the entry block, it's not a real register
      // value.
      if (AllocaInst *AI = dyn_cast<AllocaInst>(Inst))
        if (isa<ConstantInt>(AI->getArraySize()) && BB == F.begin())
          continue;

      // Avoid iterator invalidation by copying users to a temporary vector.
      SmallVector<Instruction *, 16> Users;
      for (Value::use_iterator UI = Inst->use_begin(), E = Inst->use_end();
           UI != E; ++UI) {
        Instruction *User = cast<Instruction>(*UI);
        if (User->getParent() != BB || isa<PHINode>(User))
          Users.push_back(User);
      }

      // Find all of the blocks that this value is live in.
      SmallPtrSet<BasicBlock *, 64> LiveBBs;
      LiveBBs.insert(Inst->getParent());
      while (!Users.empty()) {
        Instruction *U = Users.back();
        Users.pop_back();

        if (!isa<PHINode>(U)) {
          MarkBlocksLiveIn(U->getParent(), LiveBBs);
        } else {
          // Uses for a PHI node occur in their predecessor block.
          PHINode *PN = cast<PHINode>(U);
          for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
            if (PN->getIncomingValue(i) == Inst)
              MarkBlocksLiveIn(PN->getIncomingBlock(i), LiveBBs);
        }
      }

      // Now that we know all of the blocks that this thing is live in, see if
      // it includes any of the unwind locations.
      bool NeedsSpill = false;
      for (unsigned i = 0, e = Invokes.size(); i != e; ++i) {
        BasicBlock *UnwindBlock = Invokes[i]->getUnwindDest();
        if (UnwindBlock != BB && LiveBBs.count(UnwindBlock)) {
          DEBUG(dbgs() << "SJLJ Spill: " << *Inst << " around "
                       << UnwindBlock->getName() << "\n");
          NeedsSpill = true;
          break;
        }
      }

      // If we decided we need a spill, do it.
      // FIXME: Spilling this way is overkill, as it forces all uses of
      // the value to be reloaded from the stack slot, even those that aren't
      // in the unwind blocks. We should be more selective.
      if (NeedsSpill) {
        DemoteRegToStack(*Inst, true);
        ++NumSpilled;
      }
    }
  }

  // Go through the landing pads and remove any PHIs there.
  for (unsigned i = 0, e = Invokes.size(); i != e; ++i) {
    BasicBlock *UnwindBlock = Invokes[i]->getUnwindDest();
    LandingPadInst *LPI = UnwindBlock->getLandingPadInst();

    // Place PHIs into a set to avoid invalidating the iterator.
    SmallPtrSet<PHINode *, 8> PHIsToDemote;
    for (BasicBlock::iterator PN = UnwindBlock->begin(); isa<PHINode>(PN); ++PN)
      PHIsToDemote.insert(cast<PHINode>(PN));
    if (PHIsToDemote.empty())
      continue;

    // Demote the PHIs to the stack.
    for (SmallPtrSet<PHINode *, 8>::iterator I = PHIsToDemote.begin(),
                                             E = PHIsToDemote.end();
         I != E; ++I)
      DemotePHIToStack(*I);

    // Move the landingpad instruction back to the top of the landing pad block.
    LPI->moveBefore(UnwindBlock->begin());
  }
}