Ejemplo n.º 1
0
// recurseBasicBlock() - This calculates the ProfileInfo estimation for a
// single block and then recurses into the successors.
// The algorithm preserves the flow condition, meaning that the sum of the
// weight of the incoming edges must be equal the block weight which must in
// turn be equal to the sume of the weights of the outgoing edges.
// Since the flow of an block is deterimined from the current state of the
// flow, once an edge has a flow assigned this flow is never changed again,
// otherwise it would be possible to violate the flow condition in another
// block.
void ProfileEstimatorPass::recurseBasicBlock(BasicBlock *BB) {

  // Break the recursion if this BasicBlock was already visited.
  if (BBToVisit.find(BB) == BBToVisit.end()) return;

  // Read the LoopInfo for this block.
  bool  BBisHeader = LI->isLoopHeader(BB);
  Loop* BBLoop     = LI->getLoopFor(BB);

  // To get the block weight, read all incoming edges.
  double BBWeight = 0;
  std::set<BasicBlock*> ProcessedPreds;
  for ( pred_iterator bbi = pred_begin(BB), bbe = pred_end(BB);
        bbi != bbe; ++bbi ) {
    // If this block was not considered already, add weight.
    Edge edge = getEdge(*bbi,BB);
    double w = getEdgeWeight(edge);
    if (ProcessedPreds.insert(*bbi).second) {
      BBWeight += ignoreMissing(w);
    }
    // If this block is a loop header and the predecessor is contained in this
    // loop, thus the edge is a backedge, continue and do not check if the
    // value is valid.
    if (BBisHeader && BBLoop->contains(*bbi)) {
      printEdgeError(edge, "but is backedge, continueing");
      continue;
    }
    // If the edges value is missing (and this is no loop header, and this is
    // no backedge) return, this block is currently non estimatable.
    if (w == MissingValue) {
      printEdgeError(edge, "returning");
      return;
    }
  }
  if (getExecutionCount(BB) != MissingValue) {
    BBWeight = getExecutionCount(BB);
  }

  // Fetch all necessary information for current block.
  SmallVector<Edge, 8> ExitEdges;
  SmallVector<Edge, 8> Edges;
  if (BBLoop) {
    BBLoop->getExitEdges(ExitEdges);
  }

  // If this is a loop header, consider the following:
  // Exactly the flow that is entering this block, must exit this block too. So
  // do the following: 
  // *) get all the exit edges, read the flow that is already leaving this
  // loop, remember the edges that do not have any flow on them right now.
  // (The edges that have already flow on them are most likely exiting edges of
  // other loops, do not touch those flows because the previously caclulated
  // loopheaders would not be exact anymore.)
  // *) In case there is not a single exiting edge left, create one at the loop
  // latch to prevent the flow from building up in the loop.
  // *) Take the flow that is not leaving the loop already and distribute it on
  // the remaining exiting edges.
  // (This ensures that all flow that enters the loop also leaves it.)
  // *) Increase the flow into the loop by increasing the weight of this block.
  // There is at least one incoming backedge that will bring us this flow later
  // on. (So that the flow condition in this node is valid again.)
  if (BBisHeader) {
    double incoming = BBWeight;
    // Subtract the flow leaving the loop.
    std::set<Edge> ProcessedExits;
    for (SmallVector<Edge, 8>::iterator ei = ExitEdges.begin(),
         ee = ExitEdges.end(); ei != ee; ++ei) {
      if (ProcessedExits.insert(*ei).second) {
        double w = getEdgeWeight(*ei);
        if (w == MissingValue) {
          Edges.push_back(*ei);
          // Check if there is a necessary minimal weight, if yes, subtract it 
          // from weight.
          if (MinimalWeight.find(*ei) != MinimalWeight.end()) {
            incoming -= MinimalWeight[*ei];
            DEBUG(dbgs() << "Reserving " << format("%.20g",MinimalWeight[*ei]) << " at " << (*ei) << "\n");
          }
        } else {
          incoming -= w;
        }
      }
    }
    // If no exit edges, create one:
    if (Edges.size() == 0) {
      BasicBlock *Latch = BBLoop->getLoopLatch();
      if (Latch) {
        Edge edge = getEdge(Latch,0);
        EdgeInformation[BB->getParent()][edge] = BBWeight;
        printEdgeWeight(edge);
        edge = getEdge(Latch, BB);
        EdgeInformation[BB->getParent()][edge] = BBWeight * ExecCount;
        printEdgeWeight(edge);
      }
    }

    // Distribute remaining weight to the exting edges. To prevent fractions
    // from building up and provoking precision problems the weight which is to
    // be distributed is split and the rounded, the last edge gets a somewhat
    // bigger value, but we are close enough for an estimation.
    double fraction = floor(incoming/Edges.size());
    for (SmallVector<Edge, 8>::iterator ei = Edges.begin(), ee = Edges.end();
         ei != ee; ++ei) {
      double w = 0;
      if (ei != (ee-1)) {
        w = fraction;
        incoming -= fraction;
      } else {
        w = incoming;
      }
      EdgeInformation[BB->getParent()][*ei] += w;
      // Read necessary minimal weight.
      if (MinimalWeight.find(*ei) != MinimalWeight.end()) {
        EdgeInformation[BB->getParent()][*ei] += MinimalWeight[*ei];
        DEBUG(dbgs() << "Additionally " << format("%.20g",MinimalWeight[*ei]) << " at " << (*ei) << "\n");
      }
      printEdgeWeight(*ei);
      
      // Add minimal weight to paths to all exit edges, this is used to ensure
      // that enough flow is reaching this edges.
      Path p;
      const BasicBlock *Dest = GetPath(BB, (*ei).first, p, GetPathToDest);
      while (Dest != BB) {
        const BasicBlock *Parent = p.find(Dest)->second;
        Edge e = getEdge(Parent, Dest);
        if (MinimalWeight.find(e) == MinimalWeight.end()) {
          MinimalWeight[e] = 0;
        }
        MinimalWeight[e] += w;
        DEBUG(dbgs() << "Minimal Weight for " << e << ": " << format("%.20g",MinimalWeight[e]) << "\n");
        Dest = Parent;
      }
    }
    // Increase flow into the loop.
    BBWeight *= (ExecCount+1);
  }

  BlockInformation[BB->getParent()][BB] = BBWeight;
  // Up until now we considered only the loop exiting edges, now we have a
  // definite block weight and must distribute this onto the outgoing edges.
  // Since there may be already flow attached to some of the edges, read this
  // flow first and remember the edges that have still now flow attached.
  Edges.clear();
  std::set<BasicBlock*> ProcessedSuccs;

  succ_iterator bbi = succ_begin(BB), bbe = succ_end(BB);
  // Also check for (BB,0) edges that may already contain some flow. (But only
  // in case there are no successors.)
  if (bbi == bbe) {
    Edge edge = getEdge(BB,0);
    EdgeInformation[BB->getParent()][edge] = BBWeight;
    printEdgeWeight(edge);
  }
  for ( ; bbi != bbe; ++bbi ) {
    if (ProcessedSuccs.insert(*bbi).second) {
      Edge edge = getEdge(BB,*bbi);
      double w = getEdgeWeight(edge);
      if (w != MissingValue) {
        BBWeight -= getEdgeWeight(edge);
      } else {
        Edges.push_back(edge);
        // If minimal weight is necessary, reserve weight by subtracting weight
        // from block weight, this is readded later on.
        if (MinimalWeight.find(edge) != MinimalWeight.end()) {
          BBWeight -= MinimalWeight[edge];
          DEBUG(dbgs() << "Reserving " << format("%.20g",MinimalWeight[edge]) << " at " << edge << "\n");
        }
      }
    }
  }

  double fraction = floor(BBWeight/Edges.size());
  // Finally we know what flow is still not leaving the block, distribute this
  // flow onto the empty edges.
  for (SmallVector<Edge, 8>::iterator ei = Edges.begin(), ee = Edges.end();
       ei != ee; ++ei) {
    if (ei != (ee-1)) {
      EdgeInformation[BB->getParent()][*ei] += fraction;
      BBWeight -= fraction;
    } else {
      EdgeInformation[BB->getParent()][*ei] += BBWeight;
    }
    // Readd minial necessary weight.
    if (MinimalWeight.find(*ei) != MinimalWeight.end()) {
      EdgeInformation[BB->getParent()][*ei] += MinimalWeight[*ei];
      DEBUG(dbgs() << "Additionally " << format("%.20g",MinimalWeight[*ei]) << " at " << (*ei) << "\n");
    }
    printEdgeWeight(*ei);
  }

  // This block is visited, mark this before the recursion.
  BBToVisit.erase(BB);

  // Recurse into successors.
  for (succ_iterator bbi = succ_begin(BB), bbe = succ_end(BB);
       bbi != bbe; ++bbi) {
    recurseBasicBlock(*bbi);
  }
}
Ejemplo n.º 2
0
/// Parse a function definition signature.
///   func-signature:
///     func-arguments func-throws? func-signature-result?
///   func-signature-result:
///     '->' type
///
/// Note that this leaves retType as null if unspecified.
ParserStatus
Parser::parseFunctionSignature(Identifier SimpleName,
                               DeclName &FullName,
                               SmallVectorImpl<ParameterList*> &bodyParams,
                               DefaultArgumentInfo &defaultArgs,
                               SourceLoc &throwsLoc,
                               bool &rethrows,
                               TypeRepr *&retType) {
  SmallVector<Identifier, 4> NamePieces;
  NamePieces.push_back(SimpleName);
  FullName = SimpleName;
  
  ParserStatus Status;
  // We force first type of a func declaration to be a tuple for consistency.
  if (Tok.is(tok::l_paren)) {
    ParameterContextKind paramContext;
    if (SimpleName.isOperator())
      paramContext = ParameterContextKind::Operator;
    else
      paramContext = ParameterContextKind::Function;

    Status = parseFunctionArguments(NamePieces, bodyParams, paramContext,
                                    defaultArgs);
    FullName = DeclName(Context, SimpleName, 
                        llvm::makeArrayRef(NamePieces.begin() + 1,
                                           NamePieces.end()));

    if (bodyParams.empty()) {
      // If we didn't get anything, add a () pattern to avoid breaking
      // invariants.
      assert(Status.hasCodeCompletion() || Status.isError());
      bodyParams.push_back(ParameterList::createEmpty(Context));
    }
  } else {
    diagnose(Tok, diag::func_decl_without_paren);
    Status = makeParserError();

    // Recover by creating a '() -> ?' signature.
    bodyParams.push_back(ParameterList::createEmpty(Context, PreviousLoc,
                                                    PreviousLoc));
    FullName = DeclName(Context, SimpleName, bodyParams.back());
  }
  
  // Check for the 'throws' keyword.
  rethrows = false;
  if (Tok.is(tok::kw_throws)) {
    throwsLoc = consumeToken();
  } else if (Tok.is(tok::kw_rethrows)) {
    throwsLoc = consumeToken();
    rethrows = true;
  } else if (Tok.is(tok::kw_throw)) {
    throwsLoc = consumeToken();
    diagnose(throwsLoc, diag::throw_in_function_type)
      .fixItReplace(throwsLoc, "throws");
  }

  SourceLoc arrowLoc;
  // If there's a trailing arrow, parse the rest as the result type.
  if (Tok.isAny(tok::arrow, tok::colon)) {
    if (!consumeIf(tok::arrow, arrowLoc)) {
      // FixIt ':' to '->'.
      diagnose(Tok, diag::func_decl_expected_arrow)
          .fixItReplace(SourceRange(Tok.getLoc()), "->");
      arrowLoc = consumeToken(tok::colon);
    }

    ParserResult<TypeRepr> ResultType =
      parseType(diag::expected_type_function_result);
    if (ResultType.hasCodeCompletion())
      return ResultType;
    retType = ResultType.getPtrOrNull();
    if (!retType) {
      Status.setIsParseError();
      return Status;
    }
  } else {
    // Otherwise, we leave retType null.
    retType = nullptr;
  }

  // Check for 'throws' and 'rethrows' after the type and correct it.
  if (!throwsLoc.isValid()) {
    if (Tok.is(tok::kw_throws)) {
      throwsLoc = consumeToken();
    } else if (Tok.is(tok::kw_rethrows)) {
      throwsLoc = consumeToken();
      rethrows = true;
    }

    if (throwsLoc.isValid()) {
      assert(arrowLoc.isValid());
      assert(retType);
      auto diag = rethrows ? diag::rethrows_after_function_result
                           : diag::throws_after_function_result;
      SourceLoc typeEndLoc = Lexer::getLocForEndOfToken(SourceMgr,
                                                        retType->getEndLoc());
      SourceLoc throwsEndLoc = Lexer::getLocForEndOfToken(SourceMgr, throwsLoc);
      diagnose(Tok, diag)
        .fixItInsert(arrowLoc, rethrows ? "rethrows " : "throws ")
        .fixItRemoveChars(typeEndLoc, throwsEndLoc);
    }
  }

  return Status;
}
Ejemplo n.º 3
0
bool LoopInterchangeTransform::adjustLoopBranches() {

    DEBUG(dbgs() << "adjustLoopBranches called\n");
    // Adjust the loop preheader
    BasicBlock *InnerLoopHeader = InnerLoop->getHeader();
    BasicBlock *OuterLoopHeader = OuterLoop->getHeader();
    BasicBlock *InnerLoopLatch = InnerLoop->getLoopLatch();
    BasicBlock *OuterLoopLatch = OuterLoop->getLoopLatch();
    BasicBlock *OuterLoopPreHeader = OuterLoop->getLoopPreheader();
    BasicBlock *InnerLoopPreHeader = InnerLoop->getLoopPreheader();
    BasicBlock *OuterLoopPredecessor = OuterLoopPreHeader->getUniquePredecessor();
    BasicBlock *InnerLoopLatchPredecessor =
        InnerLoopLatch->getUniquePredecessor();
    BasicBlock *InnerLoopLatchSuccessor;
    BasicBlock *OuterLoopLatchSuccessor;

    BranchInst *OuterLoopLatchBI =
        dyn_cast<BranchInst>(OuterLoopLatch->getTerminator());
    BranchInst *InnerLoopLatchBI =
        dyn_cast<BranchInst>(InnerLoopLatch->getTerminator());
    BranchInst *OuterLoopHeaderBI =
        dyn_cast<BranchInst>(OuterLoopHeader->getTerminator());
    BranchInst *InnerLoopHeaderBI =
        dyn_cast<BranchInst>(InnerLoopHeader->getTerminator());

    if (!OuterLoopPredecessor || !InnerLoopLatchPredecessor ||
            !OuterLoopLatchBI || !InnerLoopLatchBI || !OuterLoopHeaderBI ||
            !InnerLoopHeaderBI)
        return false;

    BranchInst *InnerLoopLatchPredecessorBI =
        dyn_cast<BranchInst>(InnerLoopLatchPredecessor->getTerminator());
    BranchInst *OuterLoopPredecessorBI =
        dyn_cast<BranchInst>(OuterLoopPredecessor->getTerminator());

    if (!OuterLoopPredecessorBI || !InnerLoopLatchPredecessorBI)
        return false;
    BasicBlock *InnerLoopHeaderSuccessor = InnerLoopHeader->getUniqueSuccessor();
    if (!InnerLoopHeaderSuccessor)
        return false;

    // Adjust Loop Preheader and headers

    unsigned NumSucc = OuterLoopPredecessorBI->getNumSuccessors();
    for (unsigned i = 0; i < NumSucc; ++i) {
        if (OuterLoopPredecessorBI->getSuccessor(i) == OuterLoopPreHeader)
            OuterLoopPredecessorBI->setSuccessor(i, InnerLoopPreHeader);
    }

    NumSucc = OuterLoopHeaderBI->getNumSuccessors();
    for (unsigned i = 0; i < NumSucc; ++i) {
        if (OuterLoopHeaderBI->getSuccessor(i) == OuterLoopLatch)
            OuterLoopHeaderBI->setSuccessor(i, LoopExit);
        else if (OuterLoopHeaderBI->getSuccessor(i) == InnerLoopPreHeader)
            OuterLoopHeaderBI->setSuccessor(i, InnerLoopHeaderSuccessor);
    }

    // Adjust reduction PHI's now that the incoming block has changed.
    updateIncomingBlock(InnerLoopHeaderSuccessor, InnerLoopHeader,
                        OuterLoopHeader);

    BranchInst::Create(OuterLoopPreHeader, InnerLoopHeaderBI);
    InnerLoopHeaderBI->eraseFromParent();

    // -------------Adjust loop latches-----------
    if (InnerLoopLatchBI->getSuccessor(0) == InnerLoopHeader)
        InnerLoopLatchSuccessor = InnerLoopLatchBI->getSuccessor(1);
    else
        InnerLoopLatchSuccessor = InnerLoopLatchBI->getSuccessor(0);

    NumSucc = InnerLoopLatchPredecessorBI->getNumSuccessors();
    for (unsigned i = 0; i < NumSucc; ++i) {
        if (InnerLoopLatchPredecessorBI->getSuccessor(i) == InnerLoopLatch)
            InnerLoopLatchPredecessorBI->setSuccessor(i, InnerLoopLatchSuccessor);
    }

    // Adjust PHI nodes in InnerLoopLatchSuccessor. Update all uses of PHI with
    // the value and remove this PHI node from inner loop.
    SmallVector<PHINode *, 8> LcssaVec;
    for (auto I = InnerLoopLatchSuccessor->begin(); isa<PHINode>(I); ++I) {
        PHINode *LcssaPhi = cast<PHINode>(I);
        LcssaVec.push_back(LcssaPhi);
    }
    for (auto I = LcssaVec.begin(), E = LcssaVec.end(); I != E; ++I) {
        PHINode *P = *I;
        Value *Incoming = P->getIncomingValueForBlock(InnerLoopLatch);
        P->replaceAllUsesWith(Incoming);
        P->eraseFromParent();
    }

    if (OuterLoopLatchBI->getSuccessor(0) == OuterLoopHeader)
        OuterLoopLatchSuccessor = OuterLoopLatchBI->getSuccessor(1);
    else
        OuterLoopLatchSuccessor = OuterLoopLatchBI->getSuccessor(0);

    if (InnerLoopLatchBI->getSuccessor(1) == InnerLoopLatchSuccessor)
        InnerLoopLatchBI->setSuccessor(1, OuterLoopLatchSuccessor);
    else
        InnerLoopLatchBI->setSuccessor(0, OuterLoopLatchSuccessor);

    updateIncomingBlock(OuterLoopLatchSuccessor, OuterLoopLatch, InnerLoopLatch);

    if (OuterLoopLatchBI->getSuccessor(0) == OuterLoopLatchSuccessor) {
        OuterLoopLatchBI->setSuccessor(0, InnerLoopLatch);
    } else {
        OuterLoopLatchBI->setSuccessor(1, InnerLoopLatch);
    }

    return true;
}
Ejemplo n.º 4
0
/// InsertUnwindResumeCalls - Convert the ResumeInsts that are still present
/// into calls to the appropriate _Unwind_Resume function.
bool DwarfEHPrepare::InsertUnwindResumeCalls(Function &Fn) {
    bool UsesNewEH = false;
    SmallVector<ResumeInst*, 16> Resumes;
    for (Function::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I) {
        TerminatorInst *TI = I->getTerminator();
        if (ResumeInst *RI = dyn_cast<ResumeInst>(TI))
            Resumes.push_back(RI);
        else if (InvokeInst *II = dyn_cast<InvokeInst>(TI))
            UsesNewEH = II->getUnwindDest()->isLandingPad();
    }

    if (Resumes.empty())
        return UsesNewEH;

    // Find the rewind function if we didn't already.
    if (!RewindFunction) {
        LLVMContext &Ctx = Resumes[0]->getContext();
        FunctionType *FTy = FunctionType::get(Type::getVoidTy(Ctx),
                                              Type::getInt8PtrTy(Ctx), false);
        const char *RewindName = TLI->getLibcallName(RTLIB::UNWIND_RESUME);
        RewindFunction = Fn.getParent()->getOrInsertFunction(RewindName, FTy);
    }

    // Create the basic block where the _Unwind_Resume call will live.
    LLVMContext &Ctx = Fn.getContext();
    unsigned ResumesSize = Resumes.size();

    if (ResumesSize == 1) {
        // Instead of creating a new BB and PHI node, just append the call to
        // _Unwind_Resume to the end of the single resume block.
        ResumeInst *RI = Resumes.front();
        BasicBlock *UnwindBB = RI->getParent();
        Value *ExnObj = GetExceptionObject(RI);

        // Call the _Unwind_Resume function.
        CallInst *CI = CallInst::Create(RewindFunction, ExnObj, "", UnwindBB);
        CI->setCallingConv(TLI->getLibcallCallingConv(RTLIB::UNWIND_RESUME));

        // We never expect _Unwind_Resume to return.
        new UnreachableInst(Ctx, UnwindBB);
        return true;
    }

    BasicBlock *UnwindBB = BasicBlock::Create(Ctx, "unwind_resume", &Fn);
    PHINode *PN = PHINode::Create(Type::getInt8PtrTy(Ctx), ResumesSize,
                                  "exn.obj", UnwindBB);

    // Extract the exception object from the ResumeInst and add it to the PHI node
    // that feeds the _Unwind_Resume call.
    for (SmallVectorImpl<ResumeInst*>::iterator
            I = Resumes.begin(), E = Resumes.end(); I != E; ++I) {
        ResumeInst *RI = *I;
        BasicBlock *Parent = RI->getParent();
        BranchInst::Create(UnwindBB, Parent);

        Value *ExnObj = GetExceptionObject(RI);
        PN->addIncoming(ExnObj, Parent);

        ++NumResumesLowered;
    }

    // Call the function.
    CallInst *CI = CallInst::Create(RewindFunction, PN, "", UnwindBB);
    CI->setCallingConv(TLI->getLibcallCallingConv(RTLIB::UNWIND_RESUME));

    // We never expect _Unwind_Resume to return.
    new UnreachableInst(Ctx, UnwindBB);
    return true;
}
Ejemplo n.º 5
0
unsigned char* JITDwarfEmitter::EmitExceptionTable(MachineFunction* MF,
                                         unsigned char* StartFunction,
                                         unsigned char* EndFunction) const {
  assert(MMI && "MachineModuleInfo not registered!");

  // Map all labels and get rid of any dead landing pads.
  MMI->TidyLandingPads(JCE->getLabelLocations());

  const std::vector<const GlobalVariable *> &TypeInfos = MMI->getTypeInfos();
  const std::vector<unsigned> &FilterIds = MMI->getFilterIds();
  const std::vector<LandingPadInfo> &PadInfos = MMI->getLandingPads();
  if (PadInfos.empty()) return 0;

  // Sort the landing pads in order of their type ids.  This is used to fold
  // duplicate actions.
  SmallVector<const LandingPadInfo *, 64> LandingPads;
  LandingPads.reserve(PadInfos.size());
  for (unsigned i = 0, N = PadInfos.size(); i != N; ++i)
    LandingPads.push_back(&PadInfos[i]);
  std::sort(LandingPads.begin(), LandingPads.end(), PadLT);

  // Negative type ids index into FilterIds, positive type ids index into
  // TypeInfos.  The value written for a positive type id is just the type
  // id itself.  For a negative type id, however, the value written is the
  // (negative) byte offset of the corresponding FilterIds entry.  The byte
  // offset is usually equal to the type id, because the FilterIds entries
  // are written using a variable width encoding which outputs one byte per
  // entry as long as the value written is not too large, but can differ.
  // This kind of complication does not occur for positive type ids because
  // type infos are output using a fixed width encoding.
  // FilterOffsets[i] holds the byte offset corresponding to FilterIds[i].
  SmallVector<int, 16> FilterOffsets;
  FilterOffsets.reserve(FilterIds.size());
  int Offset = -1;
  for(std::vector<unsigned>::const_iterator I = FilterIds.begin(),
    E = FilterIds.end(); I != E; ++I) {
    FilterOffsets.push_back(Offset);
    Offset -= MCAsmInfo::getULEB128Size(*I);
  }

  // Compute the actions table and gather the first action index for each
  // landing pad site.
  SmallVector<ActionEntry, 32> Actions;
  SmallVector<unsigned, 64> FirstActions;
  FirstActions.reserve(LandingPads.size());

  int FirstAction = 0;
  unsigned SizeActions = 0;
  for (unsigned i = 0, N = LandingPads.size(); i != N; ++i) {
    const LandingPadInfo *LP = LandingPads[i];
    const std::vector<int> &TypeIds = LP->TypeIds;
    const unsigned NumShared = i ? SharedTypeIds(LP, LandingPads[i-1]) : 0;
    unsigned SizeSiteActions = 0;

    if (NumShared < TypeIds.size()) {
      unsigned SizeAction = 0;
      ActionEntry *PrevAction = 0;

      if (NumShared) {
        const unsigned SizePrevIds = LandingPads[i-1]->TypeIds.size();
        assert(Actions.size());
        PrevAction = &Actions.back();
        SizeAction = MCAsmInfo::getSLEB128Size(PrevAction->NextAction) +
          MCAsmInfo::getSLEB128Size(PrevAction->ValueForTypeID);
        for (unsigned j = NumShared; j != SizePrevIds; ++j) {
          SizeAction -= MCAsmInfo::getSLEB128Size(PrevAction->ValueForTypeID);
          SizeAction += -PrevAction->NextAction;
          PrevAction = PrevAction->Previous;
        }
      }

      // Compute the actions.
      for (unsigned I = NumShared, M = TypeIds.size(); I != M; ++I) {
        int TypeID = TypeIds[I];
        assert(-1-TypeID < (int)FilterOffsets.size() && "Unknown filter id!");
        int ValueForTypeID = TypeID < 0 ? FilterOffsets[-1 - TypeID] : TypeID;
        unsigned SizeTypeID = MCAsmInfo::getSLEB128Size(ValueForTypeID);

        int NextAction = SizeAction ? -(SizeAction + SizeTypeID) : 0;
        SizeAction = SizeTypeID + MCAsmInfo::getSLEB128Size(NextAction);
        SizeSiteActions += SizeAction;

        ActionEntry Action = {ValueForTypeID, NextAction, PrevAction};
        Actions.push_back(Action);

        PrevAction = &Actions.back();
      }

      // Record the first action of the landing pad site.
      FirstAction = SizeActions + SizeSiteActions - SizeAction + 1;
    } // else identical - re-use previous FirstAction

    FirstActions.push_back(FirstAction);

    // Compute this sites contribution to size.
    SizeActions += SizeSiteActions;
  }

  // Compute the call-site table.  Entries must be ordered by address.
  SmallVector<CallSiteEntry, 64> CallSites;

  RangeMapType PadMap;
  for (unsigned i = 0, N = LandingPads.size(); i != N; ++i) {
    const LandingPadInfo *LandingPad = LandingPads[i];
    for (unsigned j=0, E = LandingPad->BeginLabels.size(); j != E; ++j) {
      MCSymbol *BeginLabel = LandingPad->BeginLabels[j];
      assert(!PadMap.count(BeginLabel) && "Duplicate landing pad labels!");
      PadRange P = { i, j };
      PadMap[BeginLabel] = P;
    }
  }

  bool MayThrow = false;
  MCSymbol *LastLabel = 0;
  for (MachineFunction::const_iterator I = MF->begin(), E = MF->end();
        I != E; ++I) {
    for (MachineBasicBlock::const_iterator MI = I->begin(), E = I->end();
          MI != E; ++MI) {
      if (!MI->isLabel()) {
        MayThrow |= MI->isCall();
        continue;
      }

      MCSymbol *BeginLabel = MI->getOperand(0).getMCSymbol();
      assert(BeginLabel && "Invalid label!");

      if (BeginLabel == LastLabel)
        MayThrow = false;

      RangeMapType::iterator L = PadMap.find(BeginLabel);

      if (L == PadMap.end())
        continue;

      PadRange P = L->second;
      const LandingPadInfo *LandingPad = LandingPads[P.PadIndex];

      assert(BeginLabel == LandingPad->BeginLabels[P.RangeIndex] &&
              "Inconsistent landing pad map!");

      // If some instruction between the previous try-range and this one may
      // throw, create a call-site entry with no landing pad for the region
      // between the try-ranges.
      if (MayThrow) {
        CallSiteEntry Site = {LastLabel, BeginLabel, 0, 0};
        CallSites.push_back(Site);
      }

      LastLabel = LandingPad->EndLabels[P.RangeIndex];
      CallSiteEntry Site = {BeginLabel, LastLabel,
        LandingPad->LandingPadLabel, FirstActions[P.PadIndex]};

      assert(Site.BeginLabel && Site.EndLabel && Site.PadLabel &&
              "Invalid landing pad!");

      // Try to merge with the previous call-site.
      if (CallSites.size()) {
        CallSiteEntry &Prev = CallSites.back();
        if (Site.PadLabel == Prev.PadLabel && Site.Action == Prev.Action) {
          // Extend the range of the previous entry.
          Prev.EndLabel = Site.EndLabel;
          continue;
        }
      }

      // Otherwise, create a new call-site.
      CallSites.push_back(Site);
    }
  }
  // If some instruction between the previous try-range and the end of the
  // function may throw, create a call-site entry with no landing pad for the
  // region following the try-range.
  if (MayThrow) {
    CallSiteEntry Site = {LastLabel, 0, 0, 0};
    CallSites.push_back(Site);
  }

  // Final tallies.
  unsigned SizeSites = CallSites.size() * (sizeof(int32_t) + // Site start.
                                            sizeof(int32_t) + // Site length.
                                            sizeof(int32_t)); // Landing pad.
  for (unsigned i = 0, e = CallSites.size(); i < e; ++i)
    SizeSites += MCAsmInfo::getULEB128Size(CallSites[i].Action);

  unsigned SizeTypes = TypeInfos.size() * TD->getPointerSize();

  unsigned TypeOffset = sizeof(int8_t) + // Call site format
                        // Call-site table length
                        MCAsmInfo::getULEB128Size(SizeSites) +
                        SizeSites + SizeActions + SizeTypes;

  // Begin the exception table.
  JCE->emitAlignmentWithFill(4, 0);
  // Asm->EOL("Padding");

  unsigned char* DwarfExceptionTable = (unsigned char*)JCE->getCurrentPCValue();

  // Emit the header.
  JCE->emitByte(dwarf::DW_EH_PE_omit);
  // Asm->EOL("LPStart format (DW_EH_PE_omit)");
  JCE->emitByte(dwarf::DW_EH_PE_absptr);
  // Asm->EOL("TType format (DW_EH_PE_absptr)");
  JCE->emitULEB128Bytes(TypeOffset);
  // Asm->EOL("TType base offset");
  JCE->emitByte(dwarf::DW_EH_PE_udata4);
  // Asm->EOL("Call site format (DW_EH_PE_udata4)");
  JCE->emitULEB128Bytes(SizeSites);
  // Asm->EOL("Call-site table length");

  // Emit the landing pad site information.
  for (unsigned i = 0; i < CallSites.size(); ++i) {
    CallSiteEntry &S = CallSites[i];
    intptr_t BeginLabelPtr = 0;
    intptr_t EndLabelPtr = 0;

    if (!S.BeginLabel) {
      BeginLabelPtr = (intptr_t)StartFunction;
      JCE->emitInt32(0);
    } else {
      BeginLabelPtr = JCE->getLabelAddress(S.BeginLabel);
      JCE->emitInt32(BeginLabelPtr - (intptr_t)StartFunction);
    }

    // Asm->EOL("Region start");

    if (!S.EndLabel)
      EndLabelPtr = (intptr_t)EndFunction;
    else
      EndLabelPtr = JCE->getLabelAddress(S.EndLabel);

    JCE->emitInt32(EndLabelPtr - BeginLabelPtr);
    //Asm->EOL("Region length");

    if (!S.PadLabel) {
      JCE->emitInt32(0);
    } else {
      unsigned PadLabelPtr = JCE->getLabelAddress(S.PadLabel);
      JCE->emitInt32(PadLabelPtr - (intptr_t)StartFunction);
    }
    // Asm->EOL("Landing pad");

    JCE->emitULEB128Bytes(S.Action);
    // Asm->EOL("Action");
  }

  // Emit the actions.
  for (unsigned I = 0, N = Actions.size(); I != N; ++I) {
    ActionEntry &Action = Actions[I];

    JCE->emitSLEB128Bytes(Action.ValueForTypeID);
    //Asm->EOL("TypeInfo index");
    JCE->emitSLEB128Bytes(Action.NextAction);
    //Asm->EOL("Next action");
  }

  // Emit the type ids.
  for (unsigned M = TypeInfos.size(); M; --M) {
    const GlobalVariable *GV = TypeInfos[M - 1];

    if (GV) {
      if (TD->getPointerSize() == sizeof(int32_t))
        JCE->emitInt32((intptr_t)Jit.getOrEmitGlobalVariable(GV));
      else
        JCE->emitInt64((intptr_t)Jit.getOrEmitGlobalVariable(GV));
    } else {
      if (TD->getPointerSize() == sizeof(int32_t))
        JCE->emitInt32(0);
      else
        JCE->emitInt64(0);
    }
    // Asm->EOL("TypeInfo");
  }

  // Emit the filter typeids.
  for (unsigned j = 0, M = FilterIds.size(); j < M; ++j) {
    unsigned TypeID = FilterIds[j];
    JCE->emitULEB128Bytes(TypeID);
    //Asm->EOL("Filter TypeInfo index");
  }

  JCE->emitAlignmentWithFill(4, 0);

  return DwarfExceptionTable;
}
Ejemplo n.º 6
0
static void lookupInModule(ModuleDecl *module, ModuleDecl::AccessPathTy accessPath,
                           SmallVectorImpl<ValueDecl *> &decls,
                           ResolutionKind resolutionKind, bool canReturnEarly,
                           LazyResolver *typeResolver,
                           ModuleLookupCache &cache,
                           const DeclContext *moduleScopeContext,
                           bool respectAccessControl,
                           ArrayRef<ModuleDecl::ImportedModule> extraImports,
                           CallbackTy callback) {
  assert(module);
  assert(std::none_of(extraImports.begin(), extraImports.end(),
                      [](ModuleDecl::ImportedModule import) -> bool {
    return !import.second;
  }));

  ModuleLookupCache::iterator iter;
  bool isNew;
  std::tie(iter, isNew) = cache.insert({{accessPath, module}, {}});
  if (!isNew) {
    decls.append(iter->second.begin(), iter->second.end());
    return;
  }

  size_t initialCount = decls.size();

  SmallVector<ValueDecl *, 4> localDecls;
  callback(module, accessPath, localDecls);
  if (respectAccessControl) {
    auto newEndIter = std::remove_if(localDecls.begin(), localDecls.end(),
                                    [=](ValueDecl *VD) {
      return !VD->isAccessibleFrom(moduleScopeContext);
    });
    localDecls.erase(newEndIter, localDecls.end());

    // This only applies to immediate imports of the top-level module.
    if (moduleScopeContext && moduleScopeContext->getParentModule() != module)
      moduleScopeContext = nullptr;
  }

  OverloadSetTy overloads;
  resolutionKind = recordImportDecls(typeResolver, decls, localDecls,
                                     overloads, resolutionKind);

  bool foundDecls = decls.size() > initialCount;
  if (!foundDecls || !canReturnEarly ||
      resolutionKind == ResolutionKind::Overloadable) {
    SmallVector<ModuleDecl::ImportedModule, 8> reexports;
    module->getImportedModulesForLookup(reexports);
    assert(std::none_of(reexports.begin(), reexports.end(),
                        [](ModuleDecl::ImportedModule import) -> bool {
      return !import.second;
    }));
    reexports.append(extraImports.begin(), extraImports.end());

    // Prefer scoped imports (import func Swift.max) to whole-module imports.
    SmallVector<ValueDecl *, 8> unscopedValues;
    SmallVector<ValueDecl *, 8> scopedValues;
    for (auto next : reexports) {
      // Filter any whole-module imports, and skip specific-decl imports if the
      // import path doesn't match exactly.
      ModuleDecl::AccessPathTy combinedAccessPath;
      if (accessPath.empty()) {
        combinedAccessPath = next.first;
      } else if (!next.first.empty() &&
                 !ModuleDecl::isSameAccessPath(next.first, accessPath)) {
        // If we ever allow importing non-top-level decls, it's possible the
        // rule above isn't what we want.
        assert(next.first.size() == 1 && "import of non-top-level decl");
        continue;
      } else {
        combinedAccessPath = accessPath;
      }

      auto &resultSet = next.first.empty() ? unscopedValues : scopedValues;
      lookupInModule<OverloadSetTy>(next.second, combinedAccessPath,
                                    resultSet, resolutionKind, canReturnEarly,
                                    typeResolver, cache, moduleScopeContext,
                                    respectAccessControl, {}, callback);
    }

    // Add the results from scoped imports.
    resolutionKind = recordImportDecls(typeResolver, decls, scopedValues,
                                       overloads, resolutionKind);

    // Add the results from unscoped imports.
    foundDecls = decls.size() > initialCount;
    if (!foundDecls || !canReturnEarly ||
        resolutionKind == ResolutionKind::Overloadable) {
      resolutionKind = recordImportDecls(typeResolver, decls, unscopedValues,
                                         overloads, resolutionKind);
    }
  }

  // Remove duplicated declarations.
  llvm::SmallPtrSet<ValueDecl *, 4> knownDecls;
  decls.erase(std::remove_if(decls.begin() + initialCount, decls.end(),
                             [&](ValueDecl *d) -> bool { 
                               return !knownDecls.insert(d).second;
                             }),
              decls.end());

  auto &cachedValues = cache[{accessPath, module}];
  cachedValues.insert(cachedValues.end(),
                      decls.begin() + initialCount,
                      decls.end());
}
Ejemplo n.º 7
0
/// Emit landing pads and actions.
///
/// The general organization of the table is complex, but the basic concepts are
/// easy.  First there is a header which describes the location and organization
/// of the three components that follow.
///
///  1. The landing pad site information describes the range of code covered by
///     the try.  In our case it's an accumulation of the ranges covered by the
///     invokes in the try.  There is also a reference to the landing pad that
///     handles the exception once processed.  Finally an index into the actions
///     table.
///  2. The action table, in our case, is composed of pairs of type IDs and next
///     action offset.  Starting with the action index from the landing pad
///     site, each type ID is checked for a match to the current exception.  If
///     it matches then the exception and type id are passed on to the landing
///     pad.  Otherwise the next action is looked up.  This chain is terminated
///     with a next action of zero.  If no type id is found then the frame is
///     unwound and handling continues.
///  3. Type ID table contains references to all the C++ typeinfo for all
///     catches in the function.  This tables is reverse indexed base 1.
void EHStreamer::emitExceptionTable() {
  const std::vector<const GlobalValue *> &TypeInfos = MMI->getTypeInfos();
  const std::vector<unsigned> &FilterIds = MMI->getFilterIds();
  const std::vector<LandingPadInfo> &PadInfos = MMI->getLandingPads();

  // Sort the landing pads in order of their type ids.  This is used to fold
  // duplicate actions.
  SmallVector<const LandingPadInfo *, 64> LandingPads;
  LandingPads.reserve(PadInfos.size());

  for (unsigned i = 0, N = PadInfos.size(); i != N; ++i)
    LandingPads.push_back(&PadInfos[i]);

  // Order landing pads lexicographically by type id.
  std::sort(LandingPads.begin(), LandingPads.end(),
            [](const LandingPadInfo *L,
               const LandingPadInfo *R) { return L->TypeIds < R->TypeIds; });

  // Compute the actions table and gather the first action index for each
  // landing pad site.
  SmallVector<ActionEntry, 32> Actions;
  SmallVector<unsigned, 64> FirstActions;
  unsigned SizeActions =
    computeActionsTable(LandingPads, Actions, FirstActions);

  // Compute the call-site table.
  SmallVector<CallSiteEntry, 64> CallSites;
  computeCallSiteTable(CallSites, LandingPads, FirstActions);

  // Final tallies.

  // Call sites.
  bool IsSJLJ = Asm->MAI->getExceptionHandlingType() == ExceptionHandling::SjLj;
  bool HaveTTData = IsSJLJ ? (!TypeInfos.empty() || !FilterIds.empty()) : true;

  unsigned CallSiteTableLength;
  if (IsSJLJ)
    CallSiteTableLength = 0;
  else {
    unsigned SiteStartSize  = 4; // dwarf::DW_EH_PE_udata4
    unsigned SiteLengthSize = 4; // dwarf::DW_EH_PE_udata4
    unsigned LandingPadSize = 4; // dwarf::DW_EH_PE_udata4
    CallSiteTableLength =
      CallSites.size() * (SiteStartSize + SiteLengthSize + LandingPadSize);
  }

  for (unsigned i = 0, e = CallSites.size(); i < e; ++i) {
    CallSiteTableLength += getULEB128Size(CallSites[i].Action);
    if (IsSJLJ)
      CallSiteTableLength += getULEB128Size(i);
  }

  // Type infos.
  const MCSection *LSDASection = Asm->getObjFileLowering().getLSDASection();
  unsigned TTypeEncoding;
  unsigned TypeFormatSize;

  if (!HaveTTData) {
    // For SjLj exceptions, if there is no TypeInfo, then we just explicitly say
    // that we're omitting that bit.
    TTypeEncoding = dwarf::DW_EH_PE_omit;
    // dwarf::DW_EH_PE_absptr
    TypeFormatSize = Asm->getDataLayout().getPointerSize();
  } else {
    // Okay, we have actual filters or typeinfos to emit.  As such, we need to
    // pick a type encoding for them.  We're about to emit a list of pointers to
    // typeinfo objects at the end of the LSDA.  However, unless we're in static
    // mode, this reference will require a relocation by the dynamic linker.
    //
    // Because of this, we have a couple of options:
    //
    //   1) If we are in -static mode, we can always use an absolute reference
    //      from the LSDA, because the static linker will resolve it.
    //
    //   2) Otherwise, if the LSDA section is writable, we can output the direct
    //      reference to the typeinfo and allow the dynamic linker to relocate
    //      it.  Since it is in a writable section, the dynamic linker won't
    //      have a problem.
    //
    //   3) Finally, if we're in PIC mode and the LDSA section isn't writable,
    //      we need to use some form of indirection.  For example, on Darwin,
    //      we can output a statically-relocatable reference to a dyld stub. The
    //      offset to the stub is constant, but the contents are in a section
    //      that is updated by the dynamic linker.  This is easy enough, but we
    //      need to tell the personality function of the unwinder to indirect
    //      through the dyld stub.
    //
    // FIXME: When (3) is actually implemented, we'll have to emit the stubs
    // somewhere.  This predicate should be moved to a shared location that is
    // in target-independent code.
    //
    TTypeEncoding = Asm->getObjFileLowering().getTTypeEncoding();
    TypeFormatSize = Asm->GetSizeOfEncodedValue(TTypeEncoding);
  }

  // Begin the exception table.
  // Sometimes we want not to emit the data into separate section (e.g. ARM
  // EHABI). In this case LSDASection will be NULL.
  if (LSDASection)
    Asm->OutStreamer.SwitchSection(LSDASection);
  Asm->EmitAlignment(2);

  // Emit the LSDA.
  MCSymbol *GCCETSym =
    Asm->OutContext.GetOrCreateSymbol(Twine("GCC_except_table")+
                                      Twine(Asm->getFunctionNumber()));
  Asm->OutStreamer.EmitLabel(GCCETSym);
  Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("exception",
                                                Asm->getFunctionNumber()));

  if (IsSJLJ)
    Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("_LSDA_",
                                                  Asm->getFunctionNumber()));

  // Emit the LSDA header.
  Asm->EmitEncodingByte(dwarf::DW_EH_PE_omit, "@LPStart");
  Asm->EmitEncodingByte(TTypeEncoding, "@TType");

  // The type infos need to be aligned. GCC does this by inserting padding just
  // before the type infos. However, this changes the size of the exception
  // table, so you need to take this into account when you output the exception
  // table size. However, the size is output using a variable length encoding.
  // So by increasing the size by inserting padding, you may increase the number
  // of bytes used for writing the size. If it increases, say by one byte, then
  // you now need to output one less byte of padding to get the type infos
  // aligned. However this decreases the size of the exception table. This
  // changes the value you have to output for the exception table size. Due to
  // the variable length encoding, the number of bytes used for writing the
  // length may decrease. If so, you then have to increase the amount of
  // padding. And so on. If you look carefully at the GCC code you will see that
  // it indeed does this in a loop, going on and on until the values stabilize.
  // We chose another solution: don't output padding inside the table like GCC
  // does, instead output it before the table.
  unsigned SizeTypes = TypeInfos.size() * TypeFormatSize;
  unsigned CallSiteTableLengthSize = getULEB128Size(CallSiteTableLength);
  unsigned TTypeBaseOffset =
    sizeof(int8_t) +                            // Call site format
    CallSiteTableLengthSize +                   // Call site table length size
    CallSiteTableLength +                       // Call site table length
    SizeActions +                               // Actions size
    SizeTypes;
  unsigned TTypeBaseOffsetSize = getULEB128Size(TTypeBaseOffset);
  unsigned TotalSize =
    sizeof(int8_t) +                            // LPStart format
    sizeof(int8_t) +                            // TType format
    (HaveTTData ? TTypeBaseOffsetSize : 0) +    // TType base offset size
    TTypeBaseOffset;                            // TType base offset
  unsigned SizeAlign = (4 - TotalSize) & 3;

  if (HaveTTData) {
    // Account for any extra padding that will be added to the call site table
    // length.
    Asm->EmitULEB128(TTypeBaseOffset, "@TType base offset", SizeAlign);
    SizeAlign = 0;
  }

  bool VerboseAsm = Asm->OutStreamer.isVerboseAsm();

  // SjLj Exception handling
  if (IsSJLJ) {
    Asm->EmitEncodingByte(dwarf::DW_EH_PE_udata4, "Call site");

    // Add extra padding if it wasn't added to the TType base offset.
    Asm->EmitULEB128(CallSiteTableLength, "Call site table length", SizeAlign);

    // Emit the landing pad site information.
    unsigned idx = 0;
    for (SmallVectorImpl<CallSiteEntry>::const_iterator
         I = CallSites.begin(), E = CallSites.end(); I != E; ++I, ++idx) {
      const CallSiteEntry &S = *I;

      // Offset of the landing pad, counted in 16-byte bundles relative to the
      // @LPStart address.
      if (VerboseAsm) {
        Asm->OutStreamer.AddComment(">> Call Site " + Twine(idx) + " <<");
        Asm->OutStreamer.AddComment("  On exception at call site "+Twine(idx));
      }
      Asm->EmitULEB128(idx);

      // Offset of the first associated action record, relative to the start of
      // the action table. This value is biased by 1 (1 indicates the start of
      // the action table), and 0 indicates that there are no actions.
      if (VerboseAsm) {
        if (S.Action == 0)
          Asm->OutStreamer.AddComment("  Action: cleanup");
        else
          Asm->OutStreamer.AddComment("  Action: " +
                                      Twine((S.Action - 1) / 2 + 1));
      }
      Asm->EmitULEB128(S.Action);
    }
  } else {
    // Itanium LSDA exception handling

    // The call-site table is a list of all call sites that may throw an
    // exception (including C++ 'throw' statements) in the procedure
    // fragment. It immediately follows the LSDA header. Each entry indicates,
    // for a given call, the first corresponding action record and corresponding
    // landing pad.
    //
    // The table begins with the number of bytes, stored as an LEB128
    // compressed, unsigned integer. The records immediately follow the record
    // count. They are sorted in increasing call-site address. Each record
    // indicates:
    //
    //   * The position of the call-site.
    //   * The position of the landing pad.
    //   * The first action record for that call site.
    //
    // A missing entry in the call-site table indicates that a call is not
    // supposed to throw.

    // Emit the landing pad call site table.
    Asm->EmitEncodingByte(dwarf::DW_EH_PE_udata4, "Call site");

    // Add extra padding if it wasn't added to the TType base offset.
    Asm->EmitULEB128(CallSiteTableLength, "Call site table length", SizeAlign);

    unsigned Entry = 0;
    for (SmallVectorImpl<CallSiteEntry>::const_iterator
         I = CallSites.begin(), E = CallSites.end(); I != E; ++I) {
      const CallSiteEntry &S = *I;

      MCSymbol *EHFuncBeginSym =
        Asm->GetTempSymbol("eh_func_begin", Asm->getFunctionNumber());

      MCSymbol *BeginLabel = S.BeginLabel;
      if (!BeginLabel)
        BeginLabel = EHFuncBeginSym;
      MCSymbol *EndLabel = S.EndLabel;
      if (!EndLabel)
        EndLabel = Asm->GetTempSymbol("eh_func_end", Asm->getFunctionNumber());


      // Offset of the call site relative to the previous call site, counted in
      // number of 16-byte bundles. The first call site is counted relative to
      // the start of the procedure fragment.
      if (VerboseAsm)
        Asm->OutStreamer.AddComment(">> Call Site " + Twine(++Entry) + " <<");
      Asm->EmitLabelDifference(BeginLabel, EHFuncBeginSym, 4);
      if (VerboseAsm)
        Asm->OutStreamer.AddComment(Twine("  Call between ") +
                                    BeginLabel->getName() + " and " +
                                    EndLabel->getName());
      Asm->EmitLabelDifference(EndLabel, BeginLabel, 4);

      // Offset of the landing pad, counted in 16-byte bundles relative to the
      // @LPStart address.
      if (!S.PadLabel) {
        if (VerboseAsm)
          Asm->OutStreamer.AddComment("    has no landing pad");
        Asm->OutStreamer.EmitIntValue(0, 4/*size*/);
      } else {
        if (VerboseAsm)
          Asm->OutStreamer.AddComment(Twine("    jumps to ") +
                                      S.PadLabel->getName());
        Asm->EmitLabelDifference(S.PadLabel, EHFuncBeginSym, 4);
      }

      // Offset of the first associated action record, relative to the start of
      // the action table. This value is biased by 1 (1 indicates the start of
      // the action table), and 0 indicates that there are no actions.
      if (VerboseAsm) {
        if (S.Action == 0)
          Asm->OutStreamer.AddComment("  On action: cleanup");
        else
          Asm->OutStreamer.AddComment("  On action: " +
                                      Twine((S.Action - 1) / 2 + 1));
      }
      Asm->EmitULEB128(S.Action);
    }
  }

  // Emit the Action Table.
  int Entry = 0;
  for (SmallVectorImpl<ActionEntry>::const_iterator
         I = Actions.begin(), E = Actions.end(); I != E; ++I) {
    const ActionEntry &Action = *I;

    if (VerboseAsm) {
      // Emit comments that decode the action table.
      Asm->OutStreamer.AddComment(">> Action Record " + Twine(++Entry) + " <<");
    }

    // Type Filter
    //
    //   Used by the runtime to match the type of the thrown exception to the
    //   type of the catch clauses or the types in the exception specification.
    if (VerboseAsm) {
      if (Action.ValueForTypeID > 0)
        Asm->OutStreamer.AddComment("  Catch TypeInfo " +
                                    Twine(Action.ValueForTypeID));
      else if (Action.ValueForTypeID < 0)
        Asm->OutStreamer.AddComment("  Filter TypeInfo " +
                                    Twine(Action.ValueForTypeID));
      else
        Asm->OutStreamer.AddComment("  Cleanup");
    }
    Asm->EmitSLEB128(Action.ValueForTypeID);

    // Action Record
    //
    //   Self-relative signed displacement in bytes of the next action record,
    //   or 0 if there is no next action record.
    if (VerboseAsm) {
      if (Action.NextAction == 0) {
        Asm->OutStreamer.AddComment("  No further actions");
      } else {
        unsigned NextAction = Entry + (Action.NextAction + 1) / 2;
        Asm->OutStreamer.AddComment("  Continue to action "+Twine(NextAction));
      }
    }
    Asm->EmitSLEB128(Action.NextAction);
  }

  emitTypeInfos(TTypeEncoding);

  Asm->EmitAlignment(2);
}
Ejemplo n.º 8
0
static void emitImplicitValueConstructor(SILGenFunction &gen,
                                         ConstructorDecl *ctor) {
  RegularLocation Loc(ctor);
  Loc.markAutoGenerated();
  // FIXME: Handle 'self' along with the other arguments.
  auto *paramList = ctor->getParameterList(1);
  auto selfTyCan = ctor->getImplicitSelfDecl()->getType()->getInOutObjectType();
  SILType selfTy = gen.getLoweredType(selfTyCan);

  // Emit the indirect return argument, if any.
  SILValue resultSlot;
  if (selfTy.isAddressOnly(gen.SGM.M)) {
    auto &AC = gen.getASTContext();
    auto VD = new (AC) ParamDecl(/*IsLet*/ false, SourceLoc(),
                                 AC.getIdentifier("$return_value"),
                                 SourceLoc(),
                                 AC.getIdentifier("$return_value"), selfTyCan,
                                 ctor);
    resultSlot = new (gen.F.getModule()) SILArgument(gen.F.begin(), selfTy, VD);
  }

  // Emit the elementwise arguments.
  SmallVector<RValue, 4> elements;
  for (size_t i = 0, size = paramList->size(); i < size; ++i) {
    auto &param = paramList->get(i);

    elements.push_back(
      emitImplicitValueConstructorArg(gen, Loc,
                                      param.decl->getType()->getCanonicalType(),
                                      ctor));
  }

  emitConstructorMetatypeArg(gen, ctor);

  auto *decl = selfTy.getStructOrBoundGenericStruct();
  assert(decl && "not a struct?!");

  // If we have an indirect return slot, initialize it in-place.
  if (resultSlot) {

    auto elti = elements.begin(), eltEnd = elements.end();
    for (VarDecl *field : decl->getStoredProperties()) {
      auto fieldTy = selfTy.getFieldType(field, gen.SGM.M);
      auto &fieldTL = gen.getTypeLowering(fieldTy);
      SILValue slot = gen.B.createStructElementAddr(Loc, resultSlot, field,
                                                    fieldTL.getLoweredType().getAddressType());
      InitializationPtr init(new KnownAddressInitialization(slot));

      // An initialized 'let' property has a single value specified by the
      // initializer - it doesn't come from an argument.
      if (!field->isStatic() && field->isLet() &&
          field->getParentInitializer()) {
        assert(field->getType()->isEqual(field->getParentInitializer()
                                         ->getType()) && "Checked by sema");

        // Cleanup after this initialization.
        FullExpr scope(gen.Cleanups, field->getParentPatternBinding());
        gen.emitRValue(field->getParentInitializer())
          .forwardInto(gen, init.get(), Loc);
        continue;
      }

      assert(elti != eltEnd && "number of args does not match number of fields");
      (void)eltEnd;
      std::move(*elti).forwardInto(gen, init.get(), Loc);
      ++elti;
    }
    gen.B.createReturn(ImplicitReturnLocation::getImplicitReturnLoc(Loc),
                       gen.emitEmptyTuple(Loc));
    return;
  }

  // Otherwise, build a struct value directly from the elements.
  SmallVector<SILValue, 4> eltValues;

  auto elti = elements.begin(), eltEnd = elements.end();
  for (VarDecl *field : decl->getStoredProperties()) {
    auto fieldTy = selfTy.getFieldType(field, gen.SGM.M);
    SILValue v;

    // An initialized 'let' property has a single value specified by the
    // initializer - it doesn't come from an argument.
    if (!field->isStatic() && field->isLet() && field->getParentInitializer()) {
      // Cleanup after this initialization.
      FullExpr scope(gen.Cleanups, field->getParentPatternBinding());
      v = gen.emitRValue(field->getParentInitializer())
             .forwardAsSingleStorageValue(gen, fieldTy, Loc);
    } else {
      assert(elti != eltEnd && "number of args does not match number of fields");
      (void)eltEnd;
      v = std::move(*elti).forwardAsSingleStorageValue(gen, fieldTy, Loc);
      ++elti;
    }

    eltValues.push_back(v);
  }

  SILValue selfValue = gen.B.createStruct(Loc, selfTy, eltValues);
  gen.B.createReturn(ImplicitReturnLocation::getImplicitReturnLoc(Loc),
                     selfValue);
  return;
}
Ejemplo n.º 9
0
bool RecurrenceDescriptor::AddReductionVar(PHINode *Phi, RecurrenceKind Kind,
        Loop *TheLoop, bool HasFunNoNaNAttr,
        RecurrenceDescriptor &RedDes) {
    if (Phi->getNumIncomingValues() != 2)
        return false;

    // Reduction variables are only found in the loop header block.
    if (Phi->getParent() != TheLoop->getHeader())
        return false;

    // Obtain the reduction start value from the value that comes from the loop
    // preheader.
    Value *RdxStart = Phi->getIncomingValueForBlock(TheLoop->getLoopPreheader());

    // ExitInstruction is the single value which is used outside the loop.
    // We only allow for a single reduction value to be used outside the loop.
    // This includes users of the reduction, variables (which form a cycle
    // which ends in the phi node).
    Instruction *ExitInstruction = nullptr;
    // Indicates that we found a reduction operation in our scan.
    bool FoundReduxOp = false;

    // We start with the PHI node and scan for all of the users of this
    // instruction. All users must be instructions that can be used as reduction
    // variables (such as ADD). We must have a single out-of-block user. The cycle
    // must include the original PHI.
    bool FoundStartPHI = false;

    // To recognize min/max patterns formed by a icmp select sequence, we store
    // the number of instruction we saw from the recognized min/max pattern,
    //  to make sure we only see exactly the two instructions.
    unsigned NumCmpSelectPatternInst = 0;
    InstDesc ReduxDesc(false, nullptr);

    // Data used for determining if the recurrence has been type-promoted.
    Type *RecurrenceType = Phi->getType();
    SmallPtrSet<Instruction *, 4> CastInsts;
    Instruction *Start = Phi;
    bool IsSigned = false;

    SmallPtrSet<Instruction *, 8> VisitedInsts;
    SmallVector<Instruction *, 8> Worklist;

    // Return early if the recurrence kind does not match the type of Phi. If the
    // recurrence kind is arithmetic, we attempt to look through AND operations
    // resulting from the type promotion performed by InstCombine.  Vector
    // operations are not limited to the legal integer widths, so we may be able
    // to evaluate the reduction in the narrower width.
    if (RecurrenceType->isFloatingPointTy()) {
        if (!isFloatingPointRecurrenceKind(Kind))
            return false;
    } else {
        if (!isIntegerRecurrenceKind(Kind))
            return false;
        if (isArithmeticRecurrenceKind(Kind))
            Start = lookThroughAnd(Phi, RecurrenceType, VisitedInsts, CastInsts);
    }

    Worklist.push_back(Start);
    VisitedInsts.insert(Start);

    // A value in the reduction can be used:
    //  - By the reduction:
    //      - Reduction operation:
    //        - One use of reduction value (safe).
    //        - Multiple use of reduction value (not safe).
    //      - PHI:
    //        - All uses of the PHI must be the reduction (safe).
    //        - Otherwise, not safe.
    //  - By one instruction outside of the loop (safe).
    //  - By further instructions outside of the loop (not safe).
    //  - By an instruction that is not part of the reduction (not safe).
    //    This is either:
    //      * An instruction type other than PHI or the reduction operation.
    //      * A PHI in the header other than the initial PHI.
    while (!Worklist.empty()) {
        Instruction *Cur = Worklist.back();
        Worklist.pop_back();

        // No Users.
        // If the instruction has no users then this is a broken chain and can't be
        // a reduction variable.
        if (Cur->use_empty())
            return false;

        bool IsAPhi = isa<PHINode>(Cur);

        // A header PHI use other than the original PHI.
        if (Cur != Phi && IsAPhi && Cur->getParent() == Phi->getParent())
            return false;

        // Reductions of instructions such as Div, and Sub is only possible if the
        // LHS is the reduction variable.
        if (!Cur->isCommutative() && !IsAPhi && !isa<SelectInst>(Cur) &&
                !isa<ICmpInst>(Cur) && !isa<FCmpInst>(Cur) &&
                !VisitedInsts.count(dyn_cast<Instruction>(Cur->getOperand(0))))
            return false;

        // Any reduction instruction must be of one of the allowed kinds. We ignore
        // the starting value (the Phi or an AND instruction if the Phi has been
        // type-promoted).
        if (Cur != Start) {
            ReduxDesc = isRecurrenceInstr(Cur, Kind, ReduxDesc, HasFunNoNaNAttr);
            if (!ReduxDesc.isRecurrence())
                return false;
        }

        // A reduction operation must only have one use of the reduction value.
        if (!IsAPhi && Kind != RK_IntegerMinMax && Kind != RK_FloatMinMax &&
                hasMultipleUsesOf(Cur, VisitedInsts))
            return false;

        // All inputs to a PHI node must be a reduction value.
        if (IsAPhi && Cur != Phi && !areAllUsesIn(Cur, VisitedInsts))
            return false;

        if (Kind == RK_IntegerMinMax &&
                (isa<ICmpInst>(Cur) || isa<SelectInst>(Cur)))
            ++NumCmpSelectPatternInst;
        if (Kind == RK_FloatMinMax && (isa<FCmpInst>(Cur) || isa<SelectInst>(Cur)))
            ++NumCmpSelectPatternInst;

        // Check  whether we found a reduction operator.
        FoundReduxOp |= !IsAPhi && Cur != Start;

        // Process users of current instruction. Push non-PHI nodes after PHI nodes
        // onto the stack. This way we are going to have seen all inputs to PHI
        // nodes once we get to them.
        SmallVector<Instruction *, 8> NonPHIs;
        SmallVector<Instruction *, 8> PHIs;
        for (User *U : Cur->users()) {
            Instruction *UI = cast<Instruction>(U);

            // Check if we found the exit user.
            BasicBlock *Parent = UI->getParent();
            if (!TheLoop->contains(Parent)) {
                // Exit if you find multiple outside users or if the header phi node is
                // being used. In this case the user uses the value of the previous
                // iteration, in which case we would loose "VF-1" iterations of the
                // reduction operation if we vectorize.
                if (ExitInstruction != nullptr || Cur == Phi)
                    return false;

                // The instruction used by an outside user must be the last instruction
                // before we feed back to the reduction phi. Otherwise, we loose VF-1
                // operations on the value.
                if (!is_contained(Phi->operands(), Cur))
                    return false;

                ExitInstruction = Cur;
                continue;
            }

            // Process instructions only once (termination). Each reduction cycle
            // value must only be used once, except by phi nodes and min/max
            // reductions which are represented as a cmp followed by a select.
            InstDesc IgnoredVal(false, nullptr);
            if (VisitedInsts.insert(UI).second) {
                if (isa<PHINode>(UI))
                    PHIs.push_back(UI);
                else
                    NonPHIs.push_back(UI);
            } else if (!isa<PHINode>(UI) &&
                       ((!isa<FCmpInst>(UI) && !isa<ICmpInst>(UI) &&
                         !isa<SelectInst>(UI)) ||
                        !isMinMaxSelectCmpPattern(UI, IgnoredVal).isRecurrence()))
                return false;

            // Remember that we completed the cycle.
            if (UI == Phi)
                FoundStartPHI = true;
        }
        Worklist.append(PHIs.begin(), PHIs.end());
        Worklist.append(NonPHIs.begin(), NonPHIs.end());
    }

    // This means we have seen one but not the other instruction of the
    // pattern or more than just a select and cmp.
    if ((Kind == RK_IntegerMinMax || Kind == RK_FloatMinMax) &&
            NumCmpSelectPatternInst != 2)
        return false;

    if (!FoundStartPHI || !FoundReduxOp || !ExitInstruction)
        return false;

    // If we think Phi may have been type-promoted, we also need to ensure that
    // all source operands of the reduction are either SExtInsts or ZEstInsts. If
    // so, we will be able to evaluate the reduction in the narrower bit width.
    if (Start != Phi)
        if (!getSourceExtensionKind(Start, ExitInstruction, RecurrenceType,
                                    IsSigned, VisitedInsts, CastInsts))
            return false;

    // We found a reduction var if we have reached the original phi node and we
    // only have a single instruction with out-of-loop users.

    // The ExitInstruction(Instruction which is allowed to have out-of-loop users)
    // is saved as part of the RecurrenceDescriptor.

    // Save the description of this reduction variable.
    RecurrenceDescriptor RD(
        RdxStart, ExitInstruction, Kind, ReduxDesc.getMinMaxKind(),
        ReduxDesc.getUnsafeAlgebraInst(), RecurrenceType, IsSigned, CastInsts);
    RedDes = RD;

    return true;
}
Ejemplo n.º 10
0
int main(int argc_, const char **argv_) {
  llvm::sys::PrintStackTraceOnErrorSignal();
  llvm::PrettyStackTraceProgram X(argc_, argv_);

  std::set<std::string> SavedStrings;
  SmallVector<const char*, 256> argv;

  ExpandArgv(argc_, argv_, argv, SavedStrings);

  // Handle -cc1 integrated tools.
  if (argv.size() > 1 && StringRef(argv[1]).startswith("-cc1")) {
    StringRef Tool = argv[1] + 4;

    if (Tool == "")
      return cc1_main(argv.data()+2, argv.data()+argv.size(), argv[0],
                      (void*) (intptr_t) GetExecutablePath);
    if (Tool == "as")
      return cc1as_main(argv.data()+2, argv.data()+argv.size(), argv[0],
                      (void*) (intptr_t) GetExecutablePath);

    // Reject unknown tools.
    llvm::errs() << "error: unknown integrated tool '" << Tool << "'\n";
    return 1;
  }

  bool CanonicalPrefixes = true;
  for (int i = 1, size = argv.size(); i < size; ++i) {
    if (StringRef(argv[i]) == "-no-canonical-prefixes") {
      CanonicalPrefixes = false;
      break;
    }
  }

  llvm::sys::Path Path = GetExecutablePath(argv[0], CanonicalPrefixes);

  IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts = new DiagnosticOptions;
  {
    // Note that ParseDiagnosticArgs() uses the cc1 option table.
    OwningPtr<OptTable> CC1Opts(createDriverOptTable());
    unsigned MissingArgIndex, MissingArgCount;
    OwningPtr<InputArgList> Args(CC1Opts->ParseArgs(argv.begin()+1, argv.end(),
                                            MissingArgIndex, MissingArgCount));
    // We ignore MissingArgCount and the return value of ParseDiagnosticArgs.
    // Any errors that would be diagnosed here will also be diagnosed later,
    // when the DiagnosticsEngine actually exists.
    (void) ParseDiagnosticArgs(*DiagOpts, *Args);
  }
  // Now we can create the DiagnosticsEngine with a properly-filled-out
  // DiagnosticOptions instance.
  TextDiagnosticPrinter *DiagClient
    = new TextDiagnosticPrinter(llvm::errs(), &*DiagOpts);
  DiagClient->setPrefix(llvm::sys::path::stem(Path.str()));
  IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());

  DiagnosticsEngine Diags(DiagID, &*DiagOpts, DiagClient);
  ProcessWarningOptions(Diags, *DiagOpts);

#ifdef CLANG_IS_PRODUCTION
  const bool IsProduction = true;
#else
  const bool IsProduction = false;
#endif
  Driver TheDriver(Path.str(), llvm::sys::getDefaultTargetTriple(),
                   "a.out", IsProduction, Diags);

  // Attempt to find the original path used to invoke the driver, to determine
  // the installed path. We do this manually, because we want to support that
  // path being a symlink.
  {
    SmallString<128> InstalledPath(argv[0]);

    // Do a PATH lookup, if there are no directory components.
    if (llvm::sys::path::filename(InstalledPath) == InstalledPath) {
      llvm::sys::Path Tmp = llvm::sys::Program::FindProgramByName(
        llvm::sys::path::filename(InstalledPath.str()));
      if (!Tmp.empty())
        InstalledPath = Tmp.str();
    }
    llvm::sys::fs::make_absolute(InstalledPath);
    InstalledPath = llvm::sys::path::parent_path(InstalledPath);
    bool exists;
    if (!llvm::sys::fs::exists(InstalledPath.str(), exists) && exists)
      TheDriver.setInstalledDir(InstalledPath);
  }

  llvm::InitializeAllTargets();
  ParseProgName(argv, SavedStrings, TheDriver);

  // Handle CC_PRINT_OPTIONS and CC_PRINT_OPTIONS_FILE.
  TheDriver.CCPrintOptions = !!::getenv("CC_PRINT_OPTIONS");
  if (TheDriver.CCPrintOptions)
    TheDriver.CCPrintOptionsFilename = ::getenv("CC_PRINT_OPTIONS_FILE");

  // Handle CC_PRINT_HEADERS and CC_PRINT_HEADERS_FILE.
  TheDriver.CCPrintHeaders = !!::getenv("CC_PRINT_HEADERS");
  if (TheDriver.CCPrintHeaders)
    TheDriver.CCPrintHeadersFilename = ::getenv("CC_PRINT_HEADERS_FILE");

  // Handle CC_LOG_DIAGNOSTICS and CC_LOG_DIAGNOSTICS_FILE.
  TheDriver.CCLogDiagnostics = !!::getenv("CC_LOG_DIAGNOSTICS");
  if (TheDriver.CCLogDiagnostics)
    TheDriver.CCLogDiagnosticsFilename = ::getenv("CC_LOG_DIAGNOSTICS_FILE");

  // Handle QA_OVERRIDE_GCC3_OPTIONS and CCC_ADD_ARGS, used for editing a
  // command line behind the scenes.
  if (const char *OverrideStr = ::getenv("QA_OVERRIDE_GCC3_OPTIONS")) {
    // FIXME: Driver shouldn't take extra initial argument.
    ApplyQAOverride(argv, OverrideStr, SavedStrings);
  } else if (const char *Cur = ::getenv("CCC_ADD_ARGS")) {
    // FIXME: Driver shouldn't take extra initial argument.
    std::vector<const char*> ExtraArgs;

    for (;;) {
      const char *Next = strchr(Cur, ',');

      if (Next) {
        ExtraArgs.push_back(SaveStringInSet(SavedStrings,
                                            std::string(Cur, Next)));
        Cur = Next + 1;
      } else {
        if (*Cur != '\0')
          ExtraArgs.push_back(SaveStringInSet(SavedStrings, Cur));
        break;
      }
    }

    argv.insert(&argv[1], ExtraArgs.begin(), ExtraArgs.end());
  }

  OwningPtr<Compilation> C(TheDriver.BuildCompilation(argv));
  int Res = 0;
  const Command *FailingCommand = 0;
  if (C.get())
    Res = TheDriver.ExecuteCompilation(*C, FailingCommand);

  // Force a crash to test the diagnostics.
  if(::getenv("FORCE_CLANG_DIAGNOSTICS_CRASH"))
     Res = -1;

  // If result status is < 0, then the driver command signalled an error.
  // In this case, generate additional diagnostic information if possible.
  if (Res < 0)
    TheDriver.generateCompilationDiagnostics(*C, FailingCommand);

  // If any timers were active but haven't been destroyed yet, print their
  // results now.  This happens in -disable-free mode.
  llvm::TimerGroup::printAll(llvm::errs());
  
  llvm::llvm_shutdown();

#ifdef _WIN32
  // Exit status should not be negative on Win32, unless abnormal termination.
  // Once abnormal termiation was caught, negative status should not be
  // propagated.
  if (Res < 0)
    Res = 1;
#endif

  return Res;
}
Ejemplo n.º 11
0
/// optimizeExtInstr - If instruction is a copy-like instruction, i.e. it reads
/// a single register and writes a single register and it does not modify the
/// source, and if the source value is preserved as a sub-register of the
/// result, then replace all reachable uses of the source with the subreg of the
/// result.
///
/// Do not generate an EXTRACT that is used only in a debug use, as this changes
/// the code. Since this code does not currently share EXTRACTs, just ignore all
/// debug uses.
bool PeepholeOptimizer::
optimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB,
                 SmallPtrSet<MachineInstr*, 8> &LocalMIs) {
  unsigned SrcReg, DstReg, SubIdx;
  if (!TII->isCoalescableExtInstr(*MI, SrcReg, DstReg, SubIdx))
    return false;

  if (TargetRegisterInfo::isPhysicalRegister(DstReg) ||
      TargetRegisterInfo::isPhysicalRegister(SrcReg))
    return false;

  MachineRegisterInfo::use_nodbg_iterator UI = MRI->use_nodbg_begin(SrcReg);
  if (++UI == MRI->use_nodbg_end())
    // No other uses.
    return false;

  // The source has other uses. See if we can replace the other uses with use of
  // the result of the extension.
  SmallPtrSet<MachineBasicBlock*, 4> ReachedBBs;
  UI = MRI->use_nodbg_begin(DstReg);
  for (MachineRegisterInfo::use_nodbg_iterator UE = MRI->use_nodbg_end();
       UI != UE; ++UI)
    ReachedBBs.insert(UI->getParent());

  // Uses that are in the same BB of uses of the result of the instruction.
  SmallVector<MachineOperand*, 8> Uses;

  // Uses that the result of the instruction can reach.
  SmallVector<MachineOperand*, 8> ExtendedUses;

  bool ExtendLife = true;
  UI = MRI->use_nodbg_begin(SrcReg);
  for (MachineRegisterInfo::use_nodbg_iterator UE = MRI->use_nodbg_end();
       UI != UE; ++UI) {
    MachineOperand &UseMO = UI.getOperand();
    MachineInstr *UseMI = &*UI;
    if (UseMI == MI)
      continue;

    if (UseMI->isPHI()) {
      ExtendLife = false;
      continue;
    }

    // It's an error to translate this:
    //
    //    %reg1025 = <sext> %reg1024
    //     ...
    //    %reg1026 = SUBREG_TO_REG 0, %reg1024, 4
    //
    // into this:
    //
    //    %reg1025 = <sext> %reg1024
    //     ...
    //    %reg1027 = COPY %reg1025:4
    //    %reg1026 = SUBREG_TO_REG 0, %reg1027, 4
    //
    // The problem here is that SUBREG_TO_REG is there to assert that an
    // implicit zext occurs. It doesn't insert a zext instruction. If we allow
    // the COPY here, it will give us the value after the <sext>, not the
    // original value of %reg1024 before <sext>.
    if (UseMI->getOpcode() == TargetOpcode::SUBREG_TO_REG)
      continue;

    MachineBasicBlock *UseMBB = UseMI->getParent();
    if (UseMBB == MBB) {
      // Local uses that come after the extension.
      if (!LocalMIs.count(UseMI))
        Uses.push_back(&UseMO);
    } else if (ReachedBBs.count(UseMBB)) {
      // Non-local uses where the result of the extension is used. Always
      // replace these unless it's a PHI.
      Uses.push_back(&UseMO);
    } else if (Aggressive && DT->dominates(MBB, UseMBB)) {
      // We may want to extend the live range of the extension result in order
      // to replace these uses.
      ExtendedUses.push_back(&UseMO);
    } else {
      // Both will be live out of the def MBB anyway. Don't extend live range of
      // the extension result.
      ExtendLife = false;
      break;
    }
  }

  if (ExtendLife && !ExtendedUses.empty())
    // Extend the liveness of the extension result.
    std::copy(ExtendedUses.begin(), ExtendedUses.end(),
              std::back_inserter(Uses));

  // Now replace all uses.
  bool Changed = false;
  if (!Uses.empty()) {
    SmallPtrSet<MachineBasicBlock*, 4> PHIBBs;

    // Look for PHI uses of the extended result, we don't want to extend the
    // liveness of a PHI input. It breaks all kinds of assumptions down
    // stream. A PHI use is expected to be the kill of its source values.
    UI = MRI->use_nodbg_begin(DstReg);
    for (MachineRegisterInfo::use_nodbg_iterator
           UE = MRI->use_nodbg_end(); UI != UE; ++UI)
      if (UI->isPHI())
        PHIBBs.insert(UI->getParent());

    const TargetRegisterClass *RC = MRI->getRegClass(SrcReg);
    for (unsigned i = 0, e = Uses.size(); i != e; ++i) {
      MachineOperand *UseMO = Uses[i];
      MachineInstr *UseMI = UseMO->getParent();
      MachineBasicBlock *UseMBB = UseMI->getParent();
      if (PHIBBs.count(UseMBB))
        continue;

      // About to add uses of DstReg, clear DstReg's kill flags.
      if (!Changed)
        MRI->clearKillFlags(DstReg);

      unsigned NewVR = MRI->createVirtualRegister(RC);
      BuildMI(*UseMBB, UseMI, UseMI->getDebugLoc(),
              TII->get(TargetOpcode::COPY), NewVR)
        .addReg(DstReg, 0, SubIdx);

      UseMO->setReg(NewVR);
      ++NumReuse;
      Changed = true;
    }
  }

  return Changed;
}
/// DetermineInsertionPoint - At this point, we're committed to promoting the
/// alloca using IDF's, and the standard SSA construction algorithm.  Determine
/// which blocks need phi nodes and see if we can optimize out some work by
/// avoiding insertion of dead phi nodes.
void PromoteMem2Reg::DetermineInsertionPoint(AllocaInst *AI, unsigned AllocaNum,
                                             AllocaInfo &Info) {
  // Unique the set of defining blocks for efficient lookup.
  SmallPtrSet<BasicBlock*, 32> DefBlocks;
  DefBlocks.insert(Info.DefiningBlocks.begin(), Info.DefiningBlocks.end());

  // Determine which blocks the value is live in.  These are blocks which lead
  // to uses.
  SmallPtrSet<BasicBlock*, 32> LiveInBlocks;
  ComputeLiveInBlocks(AI, Info, DefBlocks, LiveInBlocks);

  // Use a priority queue keyed on dominator tree level so that inserted nodes
  // are handled from the bottom of the dominator tree upwards.
  typedef std::priority_queue<DomTreeNodePair, SmallVector<DomTreeNodePair, 32>,
                              DomTreeNodeCompare> IDFPriorityQueue;
  IDFPriorityQueue PQ;

  for (SmallPtrSet<BasicBlock*, 32>::const_iterator I = DefBlocks.begin(),
       E = DefBlocks.end(); I != E; ++I) {
    if (DomTreeNode *Node = DT.getNode(*I))
      PQ.push(std::make_pair(Node, DomLevels[Node]));
  }

  SmallVector<std::pair<unsigned, BasicBlock*>, 32> DFBlocks;
  SmallPtrSet<DomTreeNode*, 32> Visited;
  SmallVector<DomTreeNode*, 32> Worklist;
  while (!PQ.empty()) {
    DomTreeNodePair RootPair = PQ.top();
    PQ.pop();
    DomTreeNode *Root = RootPair.first;
    unsigned RootLevel = RootPair.second;

    // Walk all dominator tree children of Root, inspecting their CFG edges with
    // targets elsewhere on the dominator tree. Only targets whose level is at
    // most Root's level are added to the iterated dominance frontier of the
    // definition set.

    Worklist.clear();
    Worklist.push_back(Root);

    while (!Worklist.empty()) {
      DomTreeNode *Node = Worklist.pop_back_val();
      BasicBlock *BB = Node->getBlock();

      for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB); SI != SE;
           ++SI) {
        DomTreeNode *SuccNode = DT.getNode(*SI);

        // Quickly skip all CFG edges that are also dominator tree edges instead
        // of catching them below.
        if (SuccNode->getIDom() == Node)
          continue;

        unsigned SuccLevel = DomLevels[SuccNode];
        if (SuccLevel > RootLevel)
          continue;

        if (!Visited.insert(SuccNode))
          continue;

        BasicBlock *SuccBB = SuccNode->getBlock();
        if (!LiveInBlocks.count(SuccBB))
          continue;

        DFBlocks.push_back(std::make_pair(BBNumbers[SuccBB], SuccBB));
        if (!DefBlocks.count(SuccBB))
          PQ.push(std::make_pair(SuccNode, SuccLevel));
      }

      for (DomTreeNode::iterator CI = Node->begin(), CE = Node->end(); CI != CE;
           ++CI) {
        if (!Visited.count(*CI))
          Worklist.push_back(*CI);
      }
    }
  }

  if (DFBlocks.size() > 1)
    std::sort(DFBlocks.begin(), DFBlocks.end());

  unsigned CurrentVersion = 0;
  for (unsigned i = 0, e = DFBlocks.size(); i != e; ++i)
    QueuePhiNode(DFBlocks[i].second, AllocaNum, CurrentVersion);
}
Ejemplo n.º 13
0
/// Unroll the given loop by Count. The loop must be in LCSSA form. Returns true
/// if unrolling was successful, or false if the loop was unmodified. Unrolling
/// can only fail when the loop's latch block is not terminated by a conditional
/// branch instruction. However, if the trip count (and multiple) are not known,
/// loop unrolling will mostly produce more code that is no faster.
///
/// TripCount is generally defined as the number of times the loop header
/// executes. UnrollLoop relaxes the definition to permit early exits: here
/// TripCount is the iteration on which control exits LatchBlock if no early
/// exits were taken. Note that UnrollLoop assumes that the loop counter test
/// terminates LatchBlock in order to remove unnecesssary instances of the
/// test. In other words, control may exit the loop prior to TripCount
/// iterations via an early branch, but control may not exit the loop from the
/// LatchBlock's terminator prior to TripCount iterations.
///
/// Similarly, TripMultiple divides the number of times that the LatchBlock may
/// execute without exiting the loop.
///
/// If AllowRuntime is true then UnrollLoop will consider unrolling loops that
/// have a runtime (i.e. not compile time constant) trip count.  Unrolling these
/// loops require a unroll "prologue" that runs "RuntimeTripCount % Count"
/// iterations before branching into the unrolled loop.  UnrollLoop will not
/// runtime-unroll the loop if computing RuntimeTripCount will be expensive and
/// AllowExpensiveTripCount is false.
///
/// The LoopInfo Analysis that is passed will be kept consistent.
///
/// This utility preserves LoopInfo. It will also preserve ScalarEvolution and
/// DominatorTree if they are non-null.
bool llvm::UnrollLoop(Loop *L, unsigned Count, unsigned TripCount, bool Force,
                      bool AllowRuntime, bool AllowExpensiveTripCount,
                      unsigned TripMultiple, LoopInfo *LI, ScalarEvolution *SE,
                      DominatorTree *DT, AssumptionCache *AC,
                      bool PreserveLCSSA) {
  BasicBlock *Preheader = L->getLoopPreheader();
  if (!Preheader) {
    DEBUG(dbgs() << "  Can't unroll; loop preheader-insertion failed.\n");
    return false;
  }

  BasicBlock *LatchBlock = L->getLoopLatch();
  if (!LatchBlock) {
    DEBUG(dbgs() << "  Can't unroll; loop exit-block-insertion failed.\n");
    return false;
  }

  // Loops with indirectbr cannot be cloned.
  if (!L->isSafeToClone()) {
    DEBUG(dbgs() << "  Can't unroll; Loop body cannot be cloned.\n");
    return false;
  }

  BasicBlock *Header = L->getHeader();
  BranchInst *BI = dyn_cast<BranchInst>(LatchBlock->getTerminator());

  if (!BI || BI->isUnconditional()) {
    // The loop-rotate pass can be helpful to avoid this in many cases.
    DEBUG(dbgs() <<
             "  Can't unroll; loop not terminated by a conditional branch.\n");
    return false;
  }

  if (Header->hasAddressTaken()) {
    // The loop-rotate pass can be helpful to avoid this in many cases.
    DEBUG(dbgs() <<
          "  Won't unroll loop: address of header block is taken.\n");
    return false;
  }

  if (TripCount != 0)
    DEBUG(dbgs() << "  Trip Count = " << TripCount << "\n");
  if (TripMultiple != 1)
    DEBUG(dbgs() << "  Trip Multiple = " << TripMultiple << "\n");

  // Effectively "DCE" unrolled iterations that are beyond the tripcount
  // and will never be executed.
  if (TripCount != 0 && Count > TripCount)
    Count = TripCount;

  // Don't enter the unroll code if there is nothing to do. This way we don't
  // need to support "partial unrolling by 1".
  if (TripCount == 0 && Count < 2)
    return false;

  assert(Count > 0);
  assert(TripMultiple > 0);
  assert(TripCount == 0 || TripCount % TripMultiple == 0);

  // Are we eliminating the loop control altogether?
  bool CompletelyUnroll = Count == TripCount;
  SmallVector<BasicBlock *, 4> ExitBlocks;
  L->getExitBlocks(ExitBlocks);
  std::vector<BasicBlock*> OriginalLoopBlocks = L->getBlocks();

  // Go through all exits of L and see if there are any phi-nodes there. We just
  // conservatively assume that they're inserted to preserve LCSSA form, which
  // means that complete unrolling might break this form. We need to either fix
  // it in-place after the transformation, or entirely rebuild LCSSA. TODO: For
  // now we just recompute LCSSA for the outer loop, but it should be possible
  // to fix it in-place.
  bool NeedToFixLCSSA = PreserveLCSSA && CompletelyUnroll &&
      std::any_of(ExitBlocks.begin(), ExitBlocks.end(),
                  [&](BasicBlock *BB) { return isa<PHINode>(BB->begin()); });

  // We assume a run-time trip count if the compiler cannot
  // figure out the loop trip count and the unroll-runtime
  // flag is specified.
  bool RuntimeTripCount = (TripCount == 0 && Count > 0 && AllowRuntime);

  // Loops containing convergent instructions must have a count that divides
  // their TripMultiple.
  DEBUG(
      {
        bool HasConvergent = false;
        for (auto &BB : L->blocks())
          for (auto &I : *BB)
            if (auto CS = CallSite(&I))
              HasConvergent |= CS.isConvergent();
        assert((!HasConvergent || TripMultiple % Count == 0) &&
               "Unroll count must divide trip multiple if loop contains a "
               "convergent operation.");
      });
Ejemplo n.º 14
0
void SelectionDAGBuilder::LowerStatepoint(
    ImmutableStatepoint ISP, MachineBasicBlock *LandingPad /*=nullptr*/) {
  // The basic scheme here is that information about both the original call and
  // the safepoint is encoded in the CallInst.  We create a temporary call and
  // lower it, then reverse engineer the calling sequence.

  NumOfStatepoints++;
  // Clear state
  StatepointLowering.startNewStatepoint(*this);

  ImmutableCallSite CS(ISP.getCallSite());

#ifndef NDEBUG
  // Consistency check. Don't do this for invokes. It would be too
  // expensive to preserve this information across different basic blocks
  if (!CS.isInvoke()) {
    for (const User *U : CS->users()) {
      const CallInst *Call = cast<CallInst>(U);
      if (isGCRelocate(Call))
        StatepointLowering.scheduleRelocCall(*Call);
    }
  }
#endif

#ifndef NDEBUG
  // If this is a malformed statepoint, report it early to simplify debugging.
  // This should catch any IR level mistake that's made when constructing or
  // transforming statepoints.
  ISP.verify();

  // Check that the associated GCStrategy expects to encounter statepoints.
  assert(GFI->getStrategy().useStatepoints() &&
         "GCStrategy does not expect to encounter statepoints");
#endif

  // Lower statepoint vmstate and gcstate arguments
  SmallVector<SDValue, 10> LoweredMetaArgs;
  lowerStatepointMetaArgs(LoweredMetaArgs, ISP, *this);

  // Get call node, we will replace it later with statepoint
  SDNode *CallNode =
      lowerCallFromStatepoint(ISP, LandingPad, *this, PendingExports);

  // Construct the actual GC_TRANSITION_START, STATEPOINT, and GC_TRANSITION_END
  // nodes with all the appropriate arguments and return values.

  // Call Node: Chain, Target, {Args}, RegMask, [Glue]
  SDValue Chain = CallNode->getOperand(0);

  SDValue Glue;
  bool CallHasIncomingGlue = CallNode->getGluedNode();
  if (CallHasIncomingGlue) {
    // Glue is always last operand
    Glue = CallNode->getOperand(CallNode->getNumOperands() - 1);
  }

  // Build the GC_TRANSITION_START node if necessary.
  //
  // The operands to the GC_TRANSITION_{START,END} nodes are laid out in the
  // order in which they appear in the call to the statepoint intrinsic. If
  // any of the operands is a pointer-typed, that operand is immediately
  // followed by a SRCVALUE for the pointer that may be used during lowering
  // (e.g. to form MachinePointerInfo values for loads/stores).
  const bool IsGCTransition =
      (ISP.getFlags() & (uint64_t)StatepointFlags::GCTransition) ==
          (uint64_t)StatepointFlags::GCTransition;
  if (IsGCTransition) {
    SmallVector<SDValue, 8> TSOps;

    // Add chain
    TSOps.push_back(Chain);

    // Add GC transition arguments
    for (const Value *V : ISP.gc_transition_args()) {
      TSOps.push_back(getValue(V));
      if (V->getType()->isPointerTy())
        TSOps.push_back(DAG.getSrcValue(V));
    }

    // Add glue if necessary
    if (CallHasIncomingGlue)
      TSOps.push_back(Glue);

    SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);

    SDValue GCTransitionStart =
        DAG.getNode(ISD::GC_TRANSITION_START, getCurSDLoc(), NodeTys, TSOps);

    Chain = GCTransitionStart.getValue(0);
    Glue = GCTransitionStart.getValue(1);
  }

  // TODO: Currently, all of these operands are being marked as read/write in
  // PrologEpilougeInserter.cpp, we should special case the VMState arguments
  // and flags to be read-only.
  SmallVector<SDValue, 40> Ops;

  // Add the <id> and <numBytes> constants.
  Ops.push_back(DAG.getTargetConstant(ISP.getID(), getCurSDLoc(), MVT::i64));
  Ops.push_back(
      DAG.getTargetConstant(ISP.getNumPatchBytes(), getCurSDLoc(), MVT::i32));

  // Calculate and push starting position of vmstate arguments
  // Get number of arguments incoming directly into call node
  unsigned NumCallRegArgs =
      CallNode->getNumOperands() - (CallHasIncomingGlue ? 4 : 3);
  Ops.push_back(DAG.getTargetConstant(NumCallRegArgs, getCurSDLoc(), MVT::i32));

  // Add call target
  SDValue CallTarget = SDValue(CallNode->getOperand(1).getNode(), 0);
  Ops.push_back(CallTarget);

  // Add call arguments
  // Get position of register mask in the call
  SDNode::op_iterator RegMaskIt;
  if (CallHasIncomingGlue)
    RegMaskIt = CallNode->op_end() - 2;
  else
    RegMaskIt = CallNode->op_end() - 1;
  Ops.insert(Ops.end(), CallNode->op_begin() + 2, RegMaskIt);

  // Add a constant argument for the calling convention
  pushStackMapConstant(Ops, *this, CS.getCallingConv());

  // Add a constant argument for the flags
  uint64_t Flags = ISP.getFlags();
  assert(
      ((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0)
          && "unknown flag used");
  pushStackMapConstant(Ops, *this, Flags);

  // Insert all vmstate and gcstate arguments
  Ops.insert(Ops.end(), LoweredMetaArgs.begin(), LoweredMetaArgs.end());

  // Add register mask from call node
  Ops.push_back(*RegMaskIt);

  // Add chain
  Ops.push_back(Chain);

  // Same for the glue, but we add it only if original call had it
  if (Glue.getNode())
    Ops.push_back(Glue);

  // Compute return values.  Provide a glue output since we consume one as
  // input.  This allows someone else to chain off us as needed.
  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);

  SDNode *StatepointMCNode =
      DAG.getMachineNode(TargetOpcode::STATEPOINT, getCurSDLoc(), NodeTys, Ops);

  SDNode *SinkNode = StatepointMCNode;

  // Build the GC_TRANSITION_END node if necessary.
  //
  // See the comment above regarding GC_TRANSITION_START for the layout of
  // the operands to the GC_TRANSITION_END node.
  if (IsGCTransition) {
    SmallVector<SDValue, 8> TEOps;

    // Add chain
    TEOps.push_back(SDValue(StatepointMCNode, 0));

    // Add GC transition arguments
    for (const Value *V : ISP.gc_transition_args()) {
      TEOps.push_back(getValue(V));
      if (V->getType()->isPointerTy())
        TEOps.push_back(DAG.getSrcValue(V));
    }

    // Add glue
    TEOps.push_back(SDValue(StatepointMCNode, 1));

    SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);

    SDValue GCTransitionStart =
        DAG.getNode(ISD::GC_TRANSITION_END, getCurSDLoc(), NodeTys, TEOps);

    SinkNode = GCTransitionStart.getNode();
  }

  // Replace original call
  DAG.ReplaceAllUsesWith(CallNode, SinkNode); // This may update Root
  // Remove originall call node
  DAG.DeleteNode(CallNode);

  // DON'T set the root - under the assumption that it's already set past the
  // inserted node we created.

  // TODO: A better future implementation would be to emit a single variable
  // argument, variable return value STATEPOINT node here and then hookup the
  // return value of each gc.relocate to the respective output of the
  // previously emitted STATEPOINT value.  Unfortunately, this doesn't appear
  // to actually be possible today.
}
Ejemplo n.º 15
0
// A soft instruction can be changed to work in other domains given by mask.
void ExeDepsFix::visitSoftInstr(MachineInstr *mi, unsigned mask) {
  // Bitmask of available domains for this instruction after taking collapsed
  // operands into account.
  unsigned available = mask;

  // Scan the explicit use operands for incoming domains.
  SmallVector<int, 4> used;
  if (LiveRegs)
    for (unsigned i = mi->getDesc().getNumDefs(),
                  e = mi->getDesc().getNumOperands(); i != e; ++i) {
      MachineOperand &mo = mi->getOperand(i);
      if (!mo.isReg()) continue;
      for (int rx : regIndices(mo.getReg())) {
        DomainValue *dv = LiveRegs[rx].Value;
        if (dv == nullptr)
          continue;
        // Bitmask of domains that dv and available have in common.
        unsigned common = dv->getCommonDomains(available);
        // Is it possible to use this collapsed register for free?
        if (dv->isCollapsed()) {
          // Restrict available domains to the ones in common with the operand.
          // If there are no common domains, we must pay the cross-domain
          // penalty for this operand.
          if (common) available = common;
        } else if (common)
          // Open DomainValue is compatible, save it for merging.
          used.push_back(rx);
        else
          // Open DomainValue is not compatible with instruction. It is useless
          // now.
          kill(rx);
      }
    }

  // If the collapsed operands force a single domain, propagate the collapse.
  if (isPowerOf2_32(available)) {
    unsigned domain = countTrailingZeros(available);
    TII->setExecutionDomain(*mi, domain);
    visitHardInstr(mi, domain);
    return;
  }

  // Kill off any remaining uses that don't match available, and build a list of
  // incoming DomainValues that we want to merge.
  SmallVector<LiveReg, 4> Regs;
  for (SmallVectorImpl<int>::iterator i=used.begin(), e=used.end(); i!=e; ++i) {
    int rx = *i;
    assert(LiveRegs && "no space allocated for live registers");
    const LiveReg &LR = LiveRegs[rx];
    // This useless DomainValue could have been missed above.
    if (!LR.Value->getCommonDomains(available)) {
      kill(rx);
      continue;
    }
    // Sorted insertion.
    bool Inserted = false;
    for (SmallVectorImpl<LiveReg>::iterator i = Regs.begin(), e = Regs.end();
           i != e && !Inserted; ++i) {
      if (LR.Def < i->Def) {
        Inserted = true;
        Regs.insert(i, LR);
      }
    }
    if (!Inserted)
      Regs.push_back(LR);
  }

  // doms are now sorted in order of appearance. Try to merge them all, giving
  // priority to the latest ones.
  DomainValue *dv = nullptr;
  while (!Regs.empty()) {
    if (!dv) {
      dv = Regs.pop_back_val().Value;
      // Force the first dv to match the current instruction.
      dv->AvailableDomains = dv->getCommonDomains(available);
      assert(dv->AvailableDomains && "Domain should have been filtered");
      continue;
    }

    DomainValue *Latest = Regs.pop_back_val().Value;
    // Skip already merged values.
    if (Latest == dv || Latest->Next)
      continue;
    if (merge(dv, Latest))
      continue;

    // If latest didn't merge, it is useless now. Kill all registers using it.
    for (int i : used) {
      assert(LiveRegs && "no space allocated for live registers");
      if (LiveRegs[i].Value == Latest)
        kill(i);
    }
  }

  // dv is the DomainValue we are going to use for this instruction.
  if (!dv) {
    dv = alloc();
    dv->AvailableDomains = available;
  }
  dv->Instrs.push_back(mi);

  // Finally set all defs and non-collapsed uses to dv. We must iterate through
  // all the operators, including imp-def ones.
  for (MachineInstr::mop_iterator ii = mi->operands_begin(),
                                  ee = mi->operands_end();
                                  ii != ee; ++ii) {
    MachineOperand &mo = *ii;
    if (!mo.isReg()) continue;
    for (int rx : regIndices(mo.getReg())) {
      if (!LiveRegs[rx].Value || (mo.isDef() && LiveRegs[rx].Value != dv)) {
        kill(rx);
        setLiveReg(rx, dv);
      }
    }
  }
}
Ejemplo n.º 16
0
/// findLoopBackEdges - Do a DFS walk to find loop back edges.
///
void CodeGenPrepare::findLoopBackEdges(const Function &F) {
  SmallVector<std::pair<const BasicBlock*,const BasicBlock*>, 32> Edges;
  FindFunctionBackedges(F, Edges);
  
  BackEdges.insert(Edges.begin(), Edges.end());
}
Ejemplo n.º 17
0
void PlistDiagnostics::FlushDiagnosticsImpl(
                                    std::vector<const PathDiagnostic *> &Diags,
                                    FilesMade *filesMade) {
  // Build up a set of FIDs that we use by scanning the locations and
  // ranges of the diagnostics.
  FIDMap FM;
  SmallVector<FileID, 10> Fids;
  const SourceManager* SM = nullptr;

  if (!Diags.empty())
    SM = &(*(*Diags.begin())->path.begin())->getLocation().getManager();

  
  for (std::vector<const PathDiagnostic*>::iterator DI = Diags.begin(),
       DE = Diags.end(); DI != DE; ++DI) {

    const PathDiagnostic *D = *DI;

    SmallVector<const PathPieces *, 5> WorkList;
    WorkList.push_back(&D->path);

    while (!WorkList.empty()) {
      const PathPieces &path = *WorkList.pop_back_val();

      for (PathPieces::const_iterator I = path.begin(), E = path.end(); I != E;
           ++I) {
        const PathDiagnosticPiece *piece = I->getPtr();
        AddFID(FM, Fids, *SM, piece->getLocation().asLocation());
        ArrayRef<SourceRange> Ranges = piece->getRanges();
        for (ArrayRef<SourceRange>::iterator I = Ranges.begin(),
                                             E = Ranges.end(); I != E; ++I) {
          AddFID(FM, Fids, *SM, I->getBegin());
          AddFID(FM, Fids, *SM, I->getEnd());
        }

        if (const PathDiagnosticCallPiece *call =
            dyn_cast<PathDiagnosticCallPiece>(piece)) {
          IntrusiveRefCntPtr<PathDiagnosticEventPiece>
            callEnterWithin = call->getCallEnterWithinCallerEvent();
          if (callEnterWithin)
            AddFID(FM, Fids, *SM, callEnterWithin->getLocation().asLocation());

          WorkList.push_back(&call->path);
        }
        else if (const PathDiagnosticMacroPiece *macro =
                 dyn_cast<PathDiagnosticMacroPiece>(piece)) {
          WorkList.push_back(&macro->subPieces);
        }
      }
    }
  }

  // Open the file.
  std::string ErrMsg;
  llvm::raw_fd_ostream o(OutputFile.c_str(), ErrMsg, llvm::sys::fs::F_Text);
  if (!ErrMsg.empty()) {
    llvm::errs() << "warning: could not create file: " << OutputFile << '\n';
    return;
  }

  // Write the plist header.
  o << PlistHeader;

  // Write the root object: a <dict> containing...
  //  - "clang_version", the string representation of clang version
  //  - "files", an <array> mapping from FIDs to file names
  //  - "diagnostics", an <array> containing the path diagnostics
  o << "<dict>\n" <<
       " <key>clang_version</key>\n";
  EmitString(o, getClangFullVersion()) << '\n';
  o << " <key>files</key>\n"
       " <array>\n";

  for (SmallVectorImpl<FileID>::iterator I=Fids.begin(), E=Fids.end();
       I!=E; ++I) {
    o << "  ";
    EmitString(o, SM->getFileEntryForID(*I)->getName()) << '\n';
  }

  o << " </array>\n"
       " <key>diagnostics</key>\n"
       " <array>\n";

  for (std::vector<const PathDiagnostic*>::iterator DI=Diags.begin(),
       DE = Diags.end(); DI!=DE; ++DI) {

    o << "  <dict>\n"
         "   <key>path</key>\n";

    const PathDiagnostic *D = *DI;

    o << "   <array>\n";

    for (PathPieces::const_iterator I = D->path.begin(), E = D->path.end(); 
         I != E; ++I)
      ReportDiag(o, **I, FM, *SM, LangOpts);

    o << "   </array>\n";

    // Output the bug type and bug category.
    o << "   <key>description</key>";
    EmitString(o, D->getShortDescription()) << '\n';
    o << "   <key>category</key>";
    EmitString(o, D->getCategory()) << '\n';
    o << "   <key>type</key>";
    EmitString(o, D->getBugType()) << '\n';
    
    // Output information about the semantic context where
    // the issue occurred.
    if (const Decl *DeclWithIssue = D->getDeclWithIssue()) {
      // FIXME: handle blocks, which have no name.
      if (const NamedDecl *ND = dyn_cast<NamedDecl>(DeclWithIssue)) {
        StringRef declKind;
        switch (ND->getKind()) {
          case Decl::CXXRecord:
            declKind = "C++ class";
            break;
          case Decl::CXXMethod:
            declKind = "C++ method";
            break;
          case Decl::ObjCMethod:
            declKind = "Objective-C method";
            break;
          case Decl::Function:
            declKind = "function";
            break;
          default:
            break;
        }
        if (!declKind.empty()) {
          const std::string &declName = ND->getDeclName().getAsString();
          o << "  <key>issue_context_kind</key>";
          EmitString(o, declKind) << '\n';
          o << "  <key>issue_context</key>";
          EmitString(o, declName) << '\n';
        }

        // Output the bug hash for issue unique-ing. Currently, it's just an
        // offset from the beginning of the function.
        if (const Stmt *Body = DeclWithIssue->getBody()) {
          
          // If the bug uniqueing location exists, use it for the hash.
          // For example, this ensures that two leaks reported on the same line
          // will have different issue_hashes and that the hash will identify
          // the leak location even after code is added between the allocation
          // site and the end of scope (leak report location).
          PathDiagnosticLocation UPDLoc = D->getUniqueingLoc();
          if (UPDLoc.isValid()) {
            FullSourceLoc UL(SM->getExpansionLoc(UPDLoc.asLocation()),
                             *SM);
            FullSourceLoc UFunL(SM->getExpansionLoc(
              D->getUniqueingDecl()->getBody()->getLocStart()), *SM);
            o << "  <key>issue_hash</key><string>"
              << UL.getExpansionLineNumber() - UFunL.getExpansionLineNumber()
              << "</string>\n";

          // Otherwise, use the location on which the bug is reported.
          } else {
            FullSourceLoc L(SM->getExpansionLoc(D->getLocation().asLocation()),
                            *SM);
            FullSourceLoc FunL(SM->getExpansionLoc(Body->getLocStart()), *SM);
            o << "  <key>issue_hash</key><string>"
              << L.getExpansionLineNumber() - FunL.getExpansionLineNumber()
              << "</string>\n";
          }

        }
      }
    }

    // Output the location of the bug.
    o << "  <key>location</key>\n";
    EmitLocation(o, *SM, LangOpts, D->getLocation().asLocation(), FM, 2);

    // Output the diagnostic to the sub-diagnostic client, if any.
    if (!filesMade->empty()) {
      StringRef lastName;
      PDFileEntry::ConsumerFiles *files = filesMade->getFiles(*D);
      if (files) {
        for (PDFileEntry::ConsumerFiles::const_iterator CI = files->begin(),
                CE = files->end(); CI != CE; ++CI) {
          StringRef newName = CI->first;
          if (newName != lastName) {
            if (!lastName.empty()) {
              o << "  </array>\n";
            }
            lastName = newName;
            o <<  "  <key>" << lastName << "_files</key>\n";
            o << "  <array>\n";
          }
          o << "   <string>" << CI->second << "</string>\n";
        }
        o << "  </array>\n";
      }
    }

    // Close up the entry.
    o << "  </dict>\n";
  }

  o << " </array>\n";

  // Finish.
  o << "</dict>\n</plist>";  
}
/// handleEndBlock - Remove dead stores to stack-allocated locations in the
/// function end block.  Ex:
/// %A = alloca i32
/// ...
/// store i32 1, i32* %A
/// ret void
bool DSE::handleEndBlock(BasicBlock &BB) {
  bool MadeChange = false;

  // Keep track of all of the stack objects that are dead at the end of the
  // function.
  SmallSetVector<Value*, 16> DeadStackObjects;

  // Find all of the alloca'd pointers in the entry block.
  BasicBlock *Entry = BB.getParent()->begin();
  for (BasicBlock::iterator I = Entry->begin(), E = Entry->end(); I != E; ++I) {
    if (isa<AllocaInst>(I))
      DeadStackObjects.insert(I);

    // Okay, so these are dead heap objects, but if the pointer never escapes
    // then it's leaked by this function anyways.
    else if (isAllocLikeFn(I, TLI) && !PointerMayBeCaptured(I, true, true))
      DeadStackObjects.insert(I);
  }

  // Treat byval arguments the same, stores to them are dead at the end of the
  // function.
  for (Function::arg_iterator AI = BB.getParent()->arg_begin(),
       AE = BB.getParent()->arg_end(); AI != AE; ++AI)
    if (AI->hasByValAttr())
      DeadStackObjects.insert(AI);

  // Scan the basic block backwards
  for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ){
    --BBI;

    // If we find a store, check to see if it points into a dead stack value.
    if (hasMemoryWrite(BBI, TLI) && isRemovable(BBI)) {
      // See through pointer-to-pointer bitcasts
      SmallVector<Value *, 4> Pointers;
      GetUnderlyingObjects(getStoredPointerOperand(BBI), Pointers);

      // Stores to stack values are valid candidates for removal.
      bool AllDead = true;
      for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(),
           E = Pointers.end(); I != E; ++I)
        if (!DeadStackObjects.count(*I)) {
          AllDead = false;
          break;
        }

      if (AllDead) {
        Instruction *Dead = BBI++;

        DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n  DEAD: "
                     << *Dead << "\n  Objects: ";
              for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(),
                   E = Pointers.end(); I != E; ++I) {
                dbgs() << **I;
                if (llvm::next(I) != E)
                  dbgs() << ", ";
              }
              dbgs() << '\n');

        // DCE instructions only used to calculate that store.
        DeleteDeadInstruction(Dead, *MD, TLI, &DeadStackObjects);
        ++NumFastStores;
        MadeChange = true;
        continue;
      }
    }

    // Remove any dead non-memory-mutating instructions.
    if (isInstructionTriviallyDead(BBI, TLI)) {
      Instruction *Inst = BBI++;
      DeleteDeadInstruction(Inst, *MD, TLI, &DeadStackObjects);
      ++NumFastOther;
      MadeChange = true;
      continue;
    }

    if (isa<AllocaInst>(BBI)) {
      // Remove allocas from the list of dead stack objects; there can't be
      // any references before the definition.
      DeadStackObjects.remove(BBI);
      continue;
    }

    if (CallSite CS = cast<Value>(BBI)) {
      // Remove allocation function calls from the list of dead stack objects; 
      // there can't be any references before the definition.
      if (isAllocLikeFn(BBI, TLI))
        DeadStackObjects.remove(BBI);

      // If this call does not access memory, it can't be loading any of our
      // pointers.
      if (AA->doesNotAccessMemory(CS))
        continue;

      // If the call might load from any of our allocas, then any store above
      // the call is live.
      CouldRef Pred = { CS, AA };
      DeadStackObjects.remove_if(Pred);

      // If all of the allocas were clobbered by the call then we're not going
      // to find anything else to process.
      if (DeadStackObjects.empty())
        break;

      continue;
    }

    AliasAnalysis::Location LoadedLoc;

    // If we encounter a use of the pointer, it is no longer considered dead
    if (LoadInst *L = dyn_cast<LoadInst>(BBI)) {
      if (!L->isUnordered()) // Be conservative with atomic/volatile load
        break;
      LoadedLoc = AA->getLocation(L);
    } else if (VAArgInst *V = dyn_cast<VAArgInst>(BBI)) {
      LoadedLoc = AA->getLocation(V);
    } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(BBI)) {
      LoadedLoc = AA->getLocationForSource(MTI);
    } else if (!BBI->mayReadFromMemory()) {
      // Instruction doesn't read memory.  Note that stores that weren't removed
      // above will hit this case.
      continue;
    } else {
      // Unknown inst; assume it clobbers everything.
      break;
    }

    // Remove any allocas from the DeadPointer set that are loaded, as this
    // makes any stores above the access live.
    RemoveAccessedObjects(LoadedLoc, DeadStackObjects);

    // If all of the allocas were clobbered by the access then we're not going
    // to find anything else to process.
    if (DeadStackObjects.empty())
      break;
  }
Ejemplo n.º 19
0
bool MemDepPrinter::runOnFunction(Function &F) {
  this->F = &F;
  AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
  MemoryDependenceAnalysis &MDA = getAnalysis<MemoryDependenceAnalysis>();

  // All this code uses non-const interfaces because MemDep is not
  // const-friendly, though nothing is actually modified.
  for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) {
    Instruction *Inst = &*I;

    if (!Inst->mayReadFromMemory() && !Inst->mayWriteToMemory())
      continue;

    MemDepResult Res = MDA.getDependency(Inst);
    if (!Res.isNonLocal()) {
      Deps[Inst].insert(std::make_pair(getInstTypePair(Res),
                                       static_cast<BasicBlock *>(nullptr)));
    } else if (CallSite CS = cast<Value>(Inst)) {
      const MemoryDependenceAnalysis::NonLocalDepInfo &NLDI =
        MDA.getNonLocalCallDependency(CS);

      DepSet &InstDeps = Deps[Inst];
      for (MemoryDependenceAnalysis::NonLocalDepInfo::const_iterator
           I = NLDI.begin(), E = NLDI.end(); I != E; ++I) {
        const MemDepResult &Res = I->getResult();
        InstDeps.insert(std::make_pair(getInstTypePair(Res), I->getBB()));
      }
    } else {
      SmallVector<NonLocalDepResult, 4> NLDI;
      if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
        if (!LI->isUnordered()) {
          // FIXME: Handle atomic/volatile loads.
          Deps[Inst].insert(std::make_pair(getInstTypePair(nullptr, Unknown),
                                           static_cast<BasicBlock *>(nullptr)));
          continue;
        }
        AliasAnalysis::Location Loc = AA.getLocation(LI);
        MDA.getNonLocalPointerDependency(Loc, true, LI->getParent(), NLDI);
      } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
        if (!SI->isUnordered()) {
          // FIXME: Handle atomic/volatile stores.
          Deps[Inst].insert(std::make_pair(getInstTypePair(nullptr, Unknown),
                                           static_cast<BasicBlock *>(nullptr)));
          continue;
        }
        AliasAnalysis::Location Loc = AA.getLocation(SI);
        MDA.getNonLocalPointerDependency(Loc, false, SI->getParent(), NLDI);
      } else if (VAArgInst *VI = dyn_cast<VAArgInst>(Inst)) {
        AliasAnalysis::Location Loc = AA.getLocation(VI);
        MDA.getNonLocalPointerDependency(Loc, false, VI->getParent(), NLDI);
      } else {
        llvm_unreachable("Unknown memory instruction!");
      }

      DepSet &InstDeps = Deps[Inst];
      for (SmallVectorImpl<NonLocalDepResult>::const_iterator
           I = NLDI.begin(), E = NLDI.end(); I != E; ++I) {
        const MemDepResult &Res = I->getResult();
        InstDeps.insert(std::make_pair(getInstTypePair(Res), I->getBB()));
      }
    }
  }

  return false;
}
Ejemplo n.º 20
0
/// IsProfitableToFoldIntoAddressingMode - It is possible for the addressing
/// mode of the machine to fold the specified instruction into a load or store
/// that ultimately uses it.  However, the specified instruction has multiple
/// uses.  Given this, it may actually increase register pressure to fold it
/// into the load.  For example, consider this code:
///
///     X = ...
///     Y = X+1
///     use(Y)   -> nonload/store
///     Z = Y+1
///     load Z
///
/// In this case, Y has multiple uses, and can be folded into the load of Z
/// (yielding load [X+2]).  However, doing this will cause both "X" and "X+1" to
/// be live at the use(Y) line.  If we don't fold Y into load Z, we use one
/// fewer register.  Since Y can't be folded into "use(Y)" we don't increase the
/// number of computations either.
///
/// Note that this (like most of CodeGenPrepare) is just a rough heuristic.  If
/// X was live across 'load Z' for other reasons, we actually *would* want to
/// fold the addressing mode in the Z case.  This would make Y die earlier.
bool AddressingModeMatcher::
IsProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore,
                                     ExtAddrMode &AMAfter) {
  if (IgnoreProfitability) return true;
  
  // AMBefore is the addressing mode before this instruction was folded into it,
  // and AMAfter is the addressing mode after the instruction was folded.  Get
  // the set of registers referenced by AMAfter and subtract out those
  // referenced by AMBefore: this is the set of values which folding in this
  // address extends the lifetime of.
  //
  // Note that there are only two potential values being referenced here,
  // BaseReg and ScaleReg (global addresses are always available, as are any
  // folded immediates).
  Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg;
  
  // If the BaseReg or ScaledReg was referenced by the previous addrmode, their
  // lifetime wasn't extended by adding this instruction.
  if (ValueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg))
    BaseReg = 0;
  if (ValueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg))
    ScaledReg = 0;

  // If folding this instruction (and it's subexprs) didn't extend any live
  // ranges, we're ok with it.
  if (BaseReg == 0 && ScaledReg == 0)
    return true;

  // If all uses of this instruction are ultimately load/store/inlineasm's,
  // check to see if their addressing modes will include this instruction.  If
  // so, we can fold it into all uses, so it doesn't matter if it has multiple
  // uses.
  SmallVector<std::pair<Instruction*,unsigned>, 16> MemoryUses;
  SmallPtrSet<Instruction*, 16> ConsideredInsts;
  if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI))
    return false;  // Has a non-memory, non-foldable use!
  
  // Now that we know that all uses of this instruction are part of a chain of
  // computation involving only operations that could theoretically be folded
  // into a memory use, loop over each of these uses and see if they could
  // *actually* fold the instruction.
  SmallVector<Instruction*, 32> MatchedAddrModeInsts;
  for (unsigned i = 0, e = MemoryUses.size(); i != e; ++i) {
    Instruction *User = MemoryUses[i].first;
    unsigned OpNo = MemoryUses[i].second;
    
    // Get the access type of this use.  If the use isn't a pointer, we don't
    // know what it accesses.
    Value *Address = User->getOperand(OpNo);
    if (!Address->getType()->isPointerTy())
      return false;
    const Type *AddressAccessTy =
      cast<PointerType>(Address->getType())->getElementType();
    
    // Do a match against the root of this address, ignoring profitability. This
    // will tell us if the addressing mode for the memory operation will
    // *actually* cover the shared instruction.
    ExtAddrMode Result;
    AddressingModeMatcher Matcher(MatchedAddrModeInsts, TLI, AddressAccessTy,
                                  MemoryInst, Result);
    Matcher.IgnoreProfitability = true;
    bool Success = Matcher.MatchAddr(Address, 0);
    Success = Success; assert(Success && "Couldn't select *anything*?");

    // If the match didn't cover I, then it won't be shared by it.
    if (std::find(MatchedAddrModeInsts.begin(), MatchedAddrModeInsts.end(),
                  I) == MatchedAddrModeInsts.end())
      return false;
    
    MatchedAddrModeInsts.clear();
  }
  
  return true;
}
Ejemplo n.º 21
0
bool AArch64FrameLowering::restoreCalleeSavedRegisters(
    MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
    const std::vector<CalleeSavedInfo> &CSI,
    const TargetRegisterInfo *TRI) const {
  MachineFunction &MF = *MBB.getParent();
  const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
  DebugLoc DL;
  SmallVector<RegPairInfo, 8> RegPairs;

  if (MI != MBB.end())
    DL = MI->getDebugLoc();

  computeCalleeSaveRegisterPairs(MF, CSI, TRI, RegPairs);

  for (auto RPII = RegPairs.begin(), RPIE = RegPairs.end(); RPII != RPIE;
       ++RPII) {
    RegPairInfo RPI = *RPII;
    unsigned Reg1 = RPI.Reg1;
    unsigned Reg2 = RPI.Reg2;

    // Issue sequence of non-sp increment and sp-pi restores for cs regs. Only
    // the last load is sp-pi post-increment and de-allocates the stack:
    // For example:
    //    ldp     fp, lr, [sp, #32]       // addImm(+4)
    //    ldp     x20, x19, [sp, #16]     // addImm(+2)
    //    ldp     x22, x21, [sp], #48     // addImm(+6)
    // Note: see comment in spillCalleeSavedRegisters()
    unsigned LdrOpc;
    bool BumpSP = RPII == std::prev(RegPairs.end());
    if (RPI.IsGPR) {
      if (BumpSP)
        LdrOpc = RPI.isPaired() ? AArch64::LDPXpost : AArch64::LDRXpost;
      else
        LdrOpc = RPI.isPaired() ? AArch64::LDPXi : AArch64::LDRXui;
    } else {
      if (BumpSP)
        LdrOpc = RPI.isPaired() ? AArch64::LDPDpost : AArch64::LDRDpost;
      else
        LdrOpc = RPI.isPaired() ? AArch64::LDPDi : AArch64::LDRDui;
    }
    DEBUG(dbgs() << "CSR restore: (" << TRI->getName(Reg1);
          if (RPI.isPaired())
            dbgs() << ", " << TRI->getName(Reg2);
          dbgs() << ") -> fi#(" << RPI.FrameIdx;
          if (RPI.isPaired())
            dbgs() << ", " << RPI.FrameIdx+1;
          dbgs() << ")\n");

    const int Offset = RPI.Offset;
    MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(LdrOpc));
    if (BumpSP)
      MIB.addReg(AArch64::SP, RegState::Define);

    if (RPI.isPaired())
      MIB.addReg(Reg2, getDefRegState(true))
        .addReg(Reg1, getDefRegState(true))
        .addReg(AArch64::SP)
        .addImm(Offset) // [sp], #offset * 8  or [sp, #offset * 8]
                        // where the factor * 8 is implicit
        .setMIFlag(MachineInstr::FrameDestroy);
    else
      MIB.addReg(Reg1, getDefRegState(true))
        .addReg(AArch64::SP)
        .addImm(BumpSP ? Offset * 8 : Offset) // post-dec version is unscaled
        .setMIFlag(MachineInstr::FrameDestroy);
  }
  return true;
}
Ejemplo n.º 22
0
/// Split - Splits a string of comma separated items in to a vector of strings.
///
static void Split(std::vector<std::string> &V, StringRef S) {
  SmallVector<StringRef, 3> Tmp;
  S.split(Tmp, ',', -1, false /* KeepEmpty */);
  V.assign(Tmp.begin(), Tmp.end());
}
Ejemplo n.º 23
0
/// calculateFrameObjectOffsets - Calculate actual frame offsets for all of the
/// abstract stack objects.
///
void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
  const TargetFrameLowering &TFI = *Fn.getSubtarget().getFrameLowering();
  StackProtector *SP = &getAnalysis<StackProtector>();

  bool StackGrowsDown =
    TFI.getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;

  // Loop over all of the stack objects, assigning sequential addresses...
  MachineFrameInfo *MFI = Fn.getFrameInfo();

  // Start at the beginning of the local area.
  // The Offset is the distance from the stack top in the direction
  // of stack growth -- so it's always nonnegative.
  int LocalAreaOffset = TFI.getOffsetOfLocalArea();
  if (StackGrowsDown)
    LocalAreaOffset = -LocalAreaOffset;
  assert(LocalAreaOffset >= 0
         && "Local area offset should be in direction of stack growth");
  int64_t Offset = LocalAreaOffset;

  // If there are fixed sized objects that are preallocated in the local area,
  // non-fixed objects can't be allocated right at the start of local area.
  // We currently don't support filling in holes in between fixed sized
  // objects, so we adjust 'Offset' to point to the end of last fixed sized
  // preallocated object.
  for (int i = MFI->getObjectIndexBegin(); i != 0; ++i) {
    int64_t FixedOff;
    if (StackGrowsDown) {
      // The maximum distance from the stack pointer is at lower address of
      // the object -- which is given by offset. For down growing stack
      // the offset is negative, so we negate the offset to get the distance.
      FixedOff = -MFI->getObjectOffset(i);
    } else {
      // The maximum distance from the start pointer is at the upper
      // address of the object.
      FixedOff = MFI->getObjectOffset(i) + MFI->getObjectSize(i);
    }
    if (FixedOff > Offset) Offset = FixedOff;
  }

  // First assign frame offsets to stack objects that are used to spill
  // callee saved registers.
  if (StackGrowsDown) {
    for (unsigned i = MinCSFrameIndex; i <= MaxCSFrameIndex; ++i) {
      // If the stack grows down, we need to add the size to find the lowest
      // address of the object.
      Offset += MFI->getObjectSize(i);

      unsigned Align = MFI->getObjectAlignment(i);
      // Adjust to alignment boundary
      Offset = RoundUpToAlignment(Offset, Align);

      MFI->setObjectOffset(i, -Offset);        // Set the computed offset
    }
  } else {
    int MaxCSFI = MaxCSFrameIndex, MinCSFI = MinCSFrameIndex;
    for (int i = MaxCSFI; i >= MinCSFI ; --i) {
      unsigned Align = MFI->getObjectAlignment(i);
      // Adjust to alignment boundary
      Offset = RoundUpToAlignment(Offset, Align);

      MFI->setObjectOffset(i, Offset);
      Offset += MFI->getObjectSize(i);
    }
  }

  unsigned MaxAlign = MFI->getMaxAlignment();

  // Make sure the special register scavenging spill slot is closest to the
  // incoming stack pointer if a frame pointer is required and is closer
  // to the incoming rather than the final stack pointer.
  const TargetRegisterInfo *RegInfo = Fn.getSubtarget().getRegisterInfo();
  bool EarlyScavengingSlots = (TFI.hasFP(Fn) &&
                               TFI.isFPCloseToIncomingSP() &&
                               RegInfo->useFPForScavengingIndex(Fn) &&
                               !RegInfo->needsStackRealignment(Fn));
  if (RS && EarlyScavengingSlots) {
    SmallVector<int, 2> SFIs;
    RS->getScavengingFrameIndices(SFIs);
    for (SmallVectorImpl<int>::iterator I = SFIs.begin(),
           IE = SFIs.end(); I != IE; ++I)
      AdjustStackOffset(MFI, *I, StackGrowsDown, Offset, MaxAlign);
  }

  // FIXME: Once this is working, then enable flag will change to a target
  // check for whether the frame is large enough to want to use virtual
  // frame index registers. Functions which don't want/need this optimization
  // will continue to use the existing code path.
  if (MFI->getUseLocalStackAllocationBlock()) {
    unsigned Align = MFI->getLocalFrameMaxAlign();

    // Adjust to alignment boundary.
    Offset = RoundUpToAlignment(Offset, Align);

    DEBUG(dbgs() << "Local frame base offset: " << Offset << "\n");

    // Resolve offsets for objects in the local block.
    for (unsigned i = 0, e = MFI->getLocalFrameObjectCount(); i != e; ++i) {
      std::pair<int, int64_t> Entry = MFI->getLocalFrameObjectMap(i);
      int64_t FIOffset = (StackGrowsDown ? -Offset : Offset) + Entry.second;
      DEBUG(dbgs() << "alloc FI(" << Entry.first << ") at SP[" <<
            FIOffset << "]\n");
      MFI->setObjectOffset(Entry.first, FIOffset);
    }
    // Allocate the local block
    Offset += MFI->getLocalFrameSize();

    MaxAlign = std::max(Align, MaxAlign);
  }

  // Make sure that the stack protector comes before the local variables on the
  // stack.
  SmallSet<int, 16> ProtectedObjs;
  if (MFI->getStackProtectorIndex() >= 0) {
    StackObjSet LargeArrayObjs;
    StackObjSet SmallArrayObjs;
    StackObjSet AddrOfObjs;

    AdjustStackOffset(MFI, MFI->getStackProtectorIndex(), StackGrowsDown,
                      Offset, MaxAlign);

    // Assign large stack objects first.
    for (unsigned i = 0, e = MFI->getObjectIndexEnd(); i != e; ++i) {
      if (MFI->isObjectPreAllocated(i) &&
          MFI->getUseLocalStackAllocationBlock())
        continue;
      if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex)
        continue;
      if (RS && RS->isScavengingFrameIndex((int)i))
        continue;
      if (MFI->isDeadObjectIndex(i))
        continue;
      if (MFI->getStackProtectorIndex() == (int)i)
        continue;

      switch (SP->getSSPLayout(MFI->getObjectAllocation(i))) {
      case StackProtector::SSPLK_None:
        continue;
      case StackProtector::SSPLK_SmallArray:
        SmallArrayObjs.insert(i);
        continue;
      case StackProtector::SSPLK_AddrOf:
        AddrOfObjs.insert(i);
        continue;
      case StackProtector::SSPLK_LargeArray:
        LargeArrayObjs.insert(i);
        continue;
      }
      llvm_unreachable("Unexpected SSPLayoutKind.");
    }

    AssignProtectedObjSet(LargeArrayObjs, ProtectedObjs, MFI, StackGrowsDown,
                          Offset, MaxAlign);
    AssignProtectedObjSet(SmallArrayObjs, ProtectedObjs, MFI, StackGrowsDown,
                          Offset, MaxAlign);
    AssignProtectedObjSet(AddrOfObjs, ProtectedObjs, MFI, StackGrowsDown,
                          Offset, MaxAlign);
  }

  // Then assign frame offsets to stack objects that are not used to spill
  // callee saved registers.
  for (unsigned i = 0, e = MFI->getObjectIndexEnd(); i != e; ++i) {
    if (MFI->isObjectPreAllocated(i) &&
        MFI->getUseLocalStackAllocationBlock())
      continue;
    if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex)
      continue;
    if (RS && RS->isScavengingFrameIndex((int)i))
      continue;
    if (MFI->isDeadObjectIndex(i))
      continue;
    if (MFI->getStackProtectorIndex() == (int)i)
      continue;
    if (ProtectedObjs.count(i))
      continue;

    AdjustStackOffset(MFI, i, StackGrowsDown, Offset, MaxAlign);
  }

  // Make sure the special register scavenging spill slot is closest to the
  // stack pointer.
  if (RS && !EarlyScavengingSlots) {
    SmallVector<int, 2> SFIs;
    RS->getScavengingFrameIndices(SFIs);
    for (SmallVectorImpl<int>::iterator I = SFIs.begin(),
           IE = SFIs.end(); I != IE; ++I)
      AdjustStackOffset(MFI, *I, StackGrowsDown, Offset, MaxAlign);
  }

  if (!TFI.targetHandlesStackFrameRounding()) {
    // If we have reserved argument space for call sites in the function
    // immediately on entry to the current function, count it as part of the
    // overall stack size.
    if (MFI->adjustsStack() && TFI.hasReservedCallFrame(Fn))
      Offset += MFI->getMaxCallFrameSize();

    // Round up the size to a multiple of the alignment.  If the function has
    // any calls or alloca's, align to the target's StackAlignment value to
    // ensure that the callee's frame or the alloca data is suitably aligned;
    // otherwise, for leaf functions, align to the TransientStackAlignment
    // value.
    unsigned StackAlign;
    if (MFI->adjustsStack() || MFI->hasVarSizedObjects() ||
        (RegInfo->needsStackRealignment(Fn) && MFI->getObjectIndexEnd() != 0))
      StackAlign = TFI.getStackAlignment();
    else
      StackAlign = TFI.getTransientStackAlignment();

    // If the frame pointer is eliminated, all frame offsets will be relative to
    // SP not FP. Align to MaxAlign so this works.
    StackAlign = std::max(StackAlign, MaxAlign);
    Offset = RoundUpToAlignment(Offset, StackAlign);
  }

  // Update frame info to pretend that this is part of the stack...
  int64_t StackSize = Offset - LocalAreaOffset;
  MFI->setStackSize(StackSize);
  NumBytesStackSpace += StackSize;
}
Ejemplo n.º 24
0
Value *SSAUpdater::GetValueInMiddleOfBlock(BasicBlock *BB) {
  // If there is no definition of the renamed variable in this block, just use
  // GetValueAtEndOfBlock to do our work.
  if (!HasValueForBlock(BB))
    return GetValueAtEndOfBlock(BB);

  // Otherwise, we have the hard case.  Get the live-in values for each
  // predecessor.
  SmallVector<std::pair<BasicBlock*, Value*>, 8> PredValues;
  Value *SingularValue = nullptr;

  // We can get our predecessor info by walking the pred_iterator list, but it
  // is relatively slow.  If we already have PHI nodes in this block, walk one
  // of them to get the predecessor list instead.
  if (PHINode *SomePhi = dyn_cast<PHINode>(BB->begin())) {
    for (unsigned i = 0, e = SomePhi->getNumIncomingValues(); i != e; ++i) {
      BasicBlock *PredBB = SomePhi->getIncomingBlock(i);
      Value *PredVal = GetValueAtEndOfBlock(PredBB);
      PredValues.push_back(std::make_pair(PredBB, PredVal));

      // Compute SingularValue.
      if (i == 0)
        SingularValue = PredVal;
      else if (PredVal != SingularValue)
        SingularValue = nullptr;
    }
  } else {
    bool isFirstPred = true;
    for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
      BasicBlock *PredBB = *PI;
      Value *PredVal = GetValueAtEndOfBlock(PredBB);
      PredValues.push_back(std::make_pair(PredBB, PredVal));

      // Compute SingularValue.
      if (isFirstPred) {
        SingularValue = PredVal;
        isFirstPred = false;
      } else if (PredVal != SingularValue)
        SingularValue = nullptr;
    }
  }

  // If there are no predecessors, just return undef.
  if (PredValues.empty())
    return UndefValue::get(ProtoType);

  // Otherwise, if all the merged values are the same, just use it.
  if (SingularValue)
    return SingularValue;

  // Otherwise, we do need a PHI: check to see if we already have one available
  // in this block that produces the right value.
  if (isa<PHINode>(BB->begin())) {
    SmallDenseMap<BasicBlock*, Value*, 8> ValueMapping(PredValues.begin(),
                                                       PredValues.end());
    PHINode *SomePHI;
    for (BasicBlock::iterator It = BB->begin();
         (SomePHI = dyn_cast<PHINode>(It)); ++It) {
      if (IsEquivalentPHI(SomePHI, ValueMapping))
        return SomePHI;
    }
  }

  // Ok, we have no way out, insert a new one now.
  PHINode *InsertedPHI = PHINode::Create(ProtoType, PredValues.size(),
                                         ProtoName, &BB->front());

  // Fill in all the predecessors of the PHI.
  for (unsigned i = 0, e = PredValues.size(); i != e; ++i)
    InsertedPHI->addIncoming(PredValues[i].second, PredValues[i].first);

  // See if the PHI node can be merged to a single value.  This can happen in
  // loop cases when we get a PHI of itself and one other value.
  if (Value *V =
          SimplifyInstruction(InsertedPHI, BB->getModule()->getDataLayout())) {
    InsertedPHI->eraseFromParent();
    return V;
  }

  // Set the DebugLoc of the inserted PHI, if available.
  DebugLoc DL;
  if (const Instruction *I = BB->getFirstNonPHI())
      DL = I->getDebugLoc();
  InsertedPHI->setDebugLoc(DL);

  // If the client wants to know about all new instructions, tell it.
  if (InsertedPHIs) InsertedPHIs->push_back(InsertedPHI);

  DEBUG(dbgs() << "  Inserted PHI: " << *InsertedPHI << "\n");
  return InsertedPHI;
}
Ejemplo n.º 25
0
bool LoopInstSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
  DominatorTree *DT = getAnalysisIfAvailable<DominatorTree>();
  LoopInfo *LI = &getAnalysis<LoopInfo>();
  const DataLayout *TD = getAnalysisIfAvailable<DataLayout>();
  const TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfo>();

  SmallVector<BasicBlock*, 8> ExitBlocks;
  L->getUniqueExitBlocks(ExitBlocks);
  array_pod_sort(ExitBlocks.begin(), ExitBlocks.end());

  SmallPtrSet<const Instruction*, 8> S1, S2, *ToSimplify = &S1, *Next = &S2;

  // The bit we are stealing from the pointer represents whether this basic
  // block is the header of a subloop, in which case we only process its phis.
  typedef PointerIntPair<BasicBlock*, 1> WorklistItem;
  SmallVector<WorklistItem, 16> VisitStack;
  SmallPtrSet<BasicBlock*, 32> Visited;

  bool Changed = false;
  bool LocalChanged;
  do {
    LocalChanged = false;

    VisitStack.clear();
    Visited.clear();

    VisitStack.push_back(WorklistItem(L->getHeader(), false));

    while (!VisitStack.empty()) {
      WorklistItem Item = VisitStack.pop_back_val();
      BasicBlock *BB = Item.getPointer();
      bool IsSubloopHeader = Item.getInt();

      // Simplify instructions in the current basic block.
      for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); BI != BE;) {
        Instruction *I = BI++;

        // The first time through the loop ToSimplify is empty and we try to
        // simplify all instructions. On later iterations ToSimplify is not
        // empty and we only bother simplifying instructions that are in it.
        if (!ToSimplify->empty() && !ToSimplify->count(I))
          continue;

        // Don't bother simplifying unused instructions.
        if (!I->use_empty()) {
          Value *V = SimplifyInstruction(I, TD, TLI, DT);
          if (V && LI->replacementPreservesLCSSAForm(I, V)) {
            // Mark all uses for resimplification next time round the loop.
            for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
                 UI != UE; ++UI)
              Next->insert(cast<Instruction>(*UI));

            I->replaceAllUsesWith(V);
            LocalChanged = true;
            ++NumSimplified;
          }
        }
        LocalChanged |= RecursivelyDeleteTriviallyDeadInstructions(I, TLI);

        if (IsSubloopHeader && !isa<PHINode>(I))
          break;
      }

      // Add all successors to the worklist, except for loop exit blocks and the
      // bodies of subloops. We visit the headers of loops so that we can process
      // their phis, but we contract the rest of the subloop body and only follow
      // edges leading back to the original loop.
      for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB); SI != SE;
           ++SI) {
        BasicBlock *SuccBB = *SI;
        if (!Visited.insert(SuccBB))
          continue;

        const Loop *SuccLoop = LI->getLoopFor(SuccBB);
        if (SuccLoop && SuccLoop->getHeader() == SuccBB
                     && L->contains(SuccLoop)) {
          VisitStack.push_back(WorklistItem(SuccBB, true));

          SmallVector<BasicBlock*, 8> SubLoopExitBlocks;
          SuccLoop->getExitBlocks(SubLoopExitBlocks);

          for (unsigned i = 0; i < SubLoopExitBlocks.size(); ++i) {
            BasicBlock *ExitBB = SubLoopExitBlocks[i];
            if (LI->getLoopFor(ExitBB) == L && Visited.insert(ExitBB))
              VisitStack.push_back(WorklistItem(ExitBB, false));
          }

          continue;
        }

        bool IsExitBlock = std::binary_search(ExitBlocks.begin(),
                                              ExitBlocks.end(), SuccBB);
        if (IsExitBlock)
          continue;

        VisitStack.push_back(WorklistItem(SuccBB, false));
      }
    }

    // Place the list of instructions to simplify on the next loop iteration
    // into ToSimplify.
    std::swap(ToSimplify, Next);
    Next->clear();

    Changed |= LocalChanged;
  } while (LocalChanged);

  return Changed;
}
Ejemplo n.º 26
0
bool StackColoring::runOnMachineFunction(MachineFunction &Func) {
  DEBUG(dbgs() << "********** Stack Coloring **********\n"
               << "********** Function: "
               << ((const Value*)Func.getFunction())->getName() << '\n');
  MF = &Func;
  MFI = MF->getFrameInfo();
  Indexes = &getAnalysis<SlotIndexes>();
  SP = &getAnalysis<StackProtector>();
  BlockLiveness.clear();
  BasicBlocks.clear();
  BasicBlockNumbering.clear();
  Markers.clear();
  Intervals.clear();
  VNInfoAllocator.Reset();

  unsigned NumSlots = MFI->getObjectIndexEnd();

  // If there are no stack slots then there are no markers to remove.
  if (!NumSlots)
    return false;

  SmallVector<int, 8> SortedSlots;
  SortedSlots.reserve(NumSlots);
  Intervals.reserve(NumSlots);

  unsigned NumMarkers = collectMarkers(NumSlots);

  unsigned TotalSize = 0;
  DEBUG(dbgs()<<"Found "<<NumMarkers<<" markers and "<<NumSlots<<" slots\n");
  DEBUG(dbgs()<<"Slot structure:\n");

  for (int i=0; i < MFI->getObjectIndexEnd(); ++i) {
    DEBUG(dbgs()<<"Slot #"<<i<<" - "<<MFI->getObjectSize(i)<<" bytes.\n");
    TotalSize += MFI->getObjectSize(i);
  }

  DEBUG(dbgs()<<"Total Stack size: "<<TotalSize<<" bytes\n\n");

  // Don't continue because there are not enough lifetime markers, or the
  // stack is too small, or we are told not to optimize the slots.
  if (NumMarkers < 2 || TotalSize < 16 || DisableColoring ||
      skipFunction(*Func.getFunction())) {
    DEBUG(dbgs()<<"Will not try to merge slots.\n");
    return removeAllMarkers();
  }

  for (unsigned i=0; i < NumSlots; ++i) {
    std::unique_ptr<LiveInterval> LI(new LiveInterval(i, 0));
    LI->getNextValue(Indexes->getZeroIndex(), VNInfoAllocator);
    Intervals.push_back(std::move(LI));
    SortedSlots.push_back(i);
  }

  // Calculate the liveness of each block.
  calculateLocalLiveness();
  DEBUG(dbgs() << "Dataflow iterations: " << NumIterations << "\n");
  DEBUG(dump());

  // Propagate the liveness information.
  calculateLiveIntervals(NumSlots);
  DEBUG(dumpIntervals());

  // Search for allocas which are used outside of the declared lifetime
  // markers.
  if (ProtectFromEscapedAllocas)
    removeInvalidSlotRanges();

  // Maps old slots to new slots.
  DenseMap<int, int> SlotRemap;
  unsigned RemovedSlots = 0;
  unsigned ReducedSize = 0;

  // Do not bother looking at empty intervals.
  for (unsigned I = 0; I < NumSlots; ++I) {
    if (Intervals[SortedSlots[I]]->empty())
      SortedSlots[I] = -1;
  }

  // This is a simple greedy algorithm for merging allocas. First, sort the
  // slots, placing the largest slots first. Next, perform an n^2 scan and look
  // for disjoint slots. When you find disjoint slots, merge the samller one
  // into the bigger one and update the live interval. Remove the small alloca
  // and continue.

  // Sort the slots according to their size. Place unused slots at the end.
  // Use stable sort to guarantee deterministic code generation.
  std::stable_sort(SortedSlots.begin(), SortedSlots.end(),
                   [this](int LHS, int RHS) {
    // We use -1 to denote a uninteresting slot. Place these slots at the end.
    if (LHS == -1) return false;
    if (RHS == -1) return true;
    // Sort according to size.
    return MFI->getObjectSize(LHS) > MFI->getObjectSize(RHS);
  });

  bool Changed = true;
  while (Changed) {
    Changed = false;
    for (unsigned I = 0; I < NumSlots; ++I) {
      if (SortedSlots[I] == -1)
        continue;

      for (unsigned J=I+1; J < NumSlots; ++J) {
        if (SortedSlots[J] == -1)
          continue;

        int FirstSlot = SortedSlots[I];
        int SecondSlot = SortedSlots[J];
        LiveInterval *First = &*Intervals[FirstSlot];
        LiveInterval *Second = &*Intervals[SecondSlot];
        assert (!First->empty() && !Second->empty() && "Found an empty range");

        // Merge disjoint slots.
        if (!First->overlaps(*Second)) {
          Changed = true;
          First->MergeSegmentsInAsValue(*Second, First->getValNumInfo(0));
          SlotRemap[SecondSlot] = FirstSlot;
          SortedSlots[J] = -1;
          DEBUG(dbgs()<<"Merging #"<<FirstSlot<<" and slots #"<<
                SecondSlot<<" together.\n");
          unsigned MaxAlignment = std::max(MFI->getObjectAlignment(FirstSlot),
                                           MFI->getObjectAlignment(SecondSlot));

          assert(MFI->getObjectSize(FirstSlot) >=
                 MFI->getObjectSize(SecondSlot) &&
                 "Merging a small object into a larger one");

          RemovedSlots+=1;
          ReducedSize += MFI->getObjectSize(SecondSlot);
          MFI->setObjectAlignment(FirstSlot, MaxAlignment);
          MFI->RemoveStackObject(SecondSlot);
        }
      }
    }
  }// While changed.

  // Record statistics.
  StackSpaceSaved += ReducedSize;
  StackSlotMerged += RemovedSlots;
  DEBUG(dbgs()<<"Merge "<<RemovedSlots<<" slots. Saved "<<
        ReducedSize<<" bytes\n");

  // Scan the entire function and update all machine operands that use frame
  // indices to use the remapped frame index.
  expungeSlotMap(SlotRemap, NumSlots);
  remapInstructions(SlotRemap);

  return removeAllMarkers();
}
Ejemplo n.º 27
0
std::vector<std::string> ArgList::getAllArgValues(OptSpecifier Id) const {
  SmallVector<const char *, 16> Values;
  AddAllArgValues(Values, Id);
  return std::vector<std::string>(Values.begin(), Values.end());
}
Ejemplo n.º 28
0
// NewBB is split and now it has one successor. Update dominance frontier to
// reflect this change.
void DominanceFrontier::splitBlock(BasicBlock *NewBB) {
  assert(NewBB->getTerminator()->getNumSuccessors() == 1
         && "NewBB should have a single successor!");
  BasicBlock *NewBBSucc = NewBB->getTerminator()->getSuccessor(0);

  SmallVector<BasicBlock*, 8> PredBlocks;
  for (pred_iterator PI = pred_begin(NewBB), PE = pred_end(NewBB);
       PI != PE; ++PI)
    PredBlocks.push_back(*PI);  

  if (PredBlocks.empty())
    // If NewBB does not have any predecessors then it is a entry block.
    // In this case, NewBB and its successor NewBBSucc dominates all
    // other blocks.
    return;

  // NewBBSucc inherits original NewBB frontier.
  DominanceFrontier::iterator NewBBI = find(NewBB);
  if (NewBBI != end()) {
    DominanceFrontier::DomSetType NewBBSet = NewBBI->second;
    DominanceFrontier::DomSetType NewBBSuccSet;
    NewBBSuccSet.insert(NewBBSet.begin(), NewBBSet.end());
    addBasicBlock(NewBBSucc, NewBBSuccSet);
  }

  // If NewBB dominates NewBBSucc, then DF(NewBB) is now going to be the
  // DF(NewBBSucc) without the stuff that the new block does not dominate
  // a predecessor of.
  DominatorTree &DT = getAnalysis<DominatorTree>();
  if (DT.dominates(NewBB, NewBBSucc)) {
    DominanceFrontier::iterator DFI = find(NewBBSucc);
    if (DFI != end()) {
      DominanceFrontier::DomSetType Set = DFI->second;
      // Filter out stuff in Set that we do not dominate a predecessor of.
      for (DominanceFrontier::DomSetType::iterator SetI = Set.begin(),
             E = Set.end(); SetI != E;) {
        bool DominatesPred = false;
        for (pred_iterator PI = pred_begin(*SetI), E = pred_end(*SetI);
             PI != E; ++PI)
          if (DT.dominates(NewBB, *PI))
            DominatesPred = true;
        if (!DominatesPred)
          Set.erase(SetI++);
        else
          ++SetI;
      }

      if (NewBBI != end()) {
        for (DominanceFrontier::DomSetType::iterator SetI = Set.begin(),
               E = Set.end(); SetI != E; ++SetI) {
          BasicBlock *SB = *SetI;
          addToFrontier(NewBBI, SB);
        }
      } else 
        addBasicBlock(NewBB, Set);
    }
    
  } else {
    // DF(NewBB) is {NewBBSucc} because NewBB does not strictly dominate
    // NewBBSucc, but it does dominate itself (and there is an edge (NewBB ->
    // NewBBSucc)).  NewBBSucc is the single successor of NewBB.
    DominanceFrontier::DomSetType NewDFSet;
    NewDFSet.insert(NewBBSucc);
    addBasicBlock(NewBB, NewDFSet);
  }
  
  // Now we must loop over all of the dominance frontiers in the function,
  // replacing occurrences of NewBBSucc with NewBB in some cases.  All
  // blocks that dominate a block in PredBlocks and contained NewBBSucc in
  // their dominance frontier must be updated to contain NewBB instead.
  //
  for (Function::iterator FI = NewBB->getParent()->begin(),
         FE = NewBB->getParent()->end(); FI != FE; ++FI) {
    DominanceFrontier::iterator DFI = find(FI);
    if (DFI == end()) continue;  // unreachable block.
    
    // Only consider nodes that have NewBBSucc in their dominator frontier.
    if (!DFI->second.count(NewBBSucc)) continue;

    // Verify whether this block dominates a block in predblocks.  If not, do
    // not update it.
    bool BlockDominatesAny = false;
    for (SmallVectorImpl<BasicBlock*>::const_iterator BI = PredBlocks.begin(), 
           BE = PredBlocks.end(); BI != BE; ++BI) {
      if (DT.dominates(FI, *BI)) {
        BlockDominatesAny = true;
        break;
      }
    }

    // If NewBBSucc should not stay in our dominator frontier, remove it.
    // We remove it unless there is a predecessor of NewBBSucc that we
    // dominate, but we don't strictly dominate NewBBSucc.
    bool ShouldRemove = true;
    if ((BasicBlock*)FI == NewBBSucc || !DT.dominates(FI, NewBBSucc)) {
      // Okay, we know that PredDom does not strictly dominate NewBBSucc.
      // Check to see if it dominates any predecessors of NewBBSucc.
      for (pred_iterator PI = pred_begin(NewBBSucc),
           E = pred_end(NewBBSucc); PI != E; ++PI)
        if (DT.dominates(FI, *PI)) {
          ShouldRemove = false;
          break;
        }
    }
    
    if (ShouldRemove)
      removeFromFrontier(DFI, NewBBSucc);
    if (BlockDominatesAny && (&*FI == NewBB || !DT.dominates(FI, NewBB)))
      addToFrontier(DFI, NewBB);
  }
}
/// TailDuplicate - If it is profitable, duplicate TailBB's contents in each
/// of its predecessors.
bool
TailDuplicatePass::TailDuplicate(MachineBasicBlock *TailBB,
                                 bool IsSimple,
                                 MachineFunction &MF,
                                 SmallVector<MachineBasicBlock*, 8> &TDBBs,
                                 SmallVector<MachineInstr*, 16> &Copies) {
    DEBUG(dbgs() << "\n*** Tail-duplicating BB#" << TailBB->getNumber() << '\n');

    DenseSet<unsigned> UsedByPhi;
    getRegsUsedByPHIs(*TailBB, &UsedByPhi);

    if (IsSimple)
        return duplicateSimpleBB(TailBB, TDBBs, UsedByPhi, Copies);

    // Iterate through all the unique predecessors and tail-duplicate this
    // block into them, if possible. Copying the list ahead of time also
    // avoids trouble with the predecessor list reallocating.
    bool Changed = false;
    SmallSetVector<MachineBasicBlock*, 8> Preds(TailBB->pred_begin(),
            TailBB->pred_end());
    for (SmallSetVector<MachineBasicBlock *, 8>::iterator PI = Preds.begin(),
            PE = Preds.end(); PI != PE; ++PI) {
        MachineBasicBlock *PredBB = *PI;

        assert(TailBB != PredBB &&
               "Single-block loop should have been rejected earlier!");
        // EH edges are ignored by AnalyzeBranch.
        if (PredBB->succ_size() > 1)
            continue;

        MachineBasicBlock *PredTBB, *PredFBB;
        SmallVector<MachineOperand, 4> PredCond;
        if (TII->AnalyzeBranch(*PredBB, PredTBB, PredFBB, PredCond, true))
            continue;
        if (!PredCond.empty())
            continue;
        // Don't duplicate into a fall-through predecessor (at least for now).
        if (PredBB->isLayoutSuccessor(TailBB) && PredBB->canFallThrough())
            continue;

        DEBUG(dbgs() << "\nTail-duplicating into PredBB: " << *PredBB
              << "From Succ: " << *TailBB);

        TDBBs.push_back(PredBB);

        // Remove PredBB's unconditional branch.
        TII->RemoveBranch(*PredBB);

        if (RS && !TailBB->livein_empty()) {
            // Update PredBB livein.
            RS->enterBasicBlock(PredBB);
            if (!PredBB->empty())
                RS->forward(prior(PredBB->end()));
            BitVector RegsLiveAtExit(TRI->getNumRegs());
            RS->getRegsUsed(RegsLiveAtExit, false);
            for (MachineBasicBlock::livein_iterator I = TailBB->livein_begin(),
                    E = TailBB->livein_end(); I != E; ++I) {
                if (!RegsLiveAtExit[*I])
                    // If a register is previously livein to the tail but it's not live
                    // at the end of predecessor BB, then it should be added to its
                    // livein list.
                    PredBB->addLiveIn(*I);
            }
        }

        // Clone the contents of TailBB into PredBB.
        DenseMap<unsigned, unsigned> LocalVRMap;
        SmallVector<std::pair<unsigned,unsigned>, 4> CopyInfos;
        // Use instr_iterator here to properly handle bundles, e.g.
        // ARM Thumb2 IT block.
        MachineBasicBlock::instr_iterator I = TailBB->instr_begin();
        while (I != TailBB->instr_end()) {
            MachineInstr *MI = &*I;
            ++I;
            if (MI->isPHI()) {
                // Replace the uses of the def of the PHI with the register coming
                // from PredBB.
                ProcessPHI(MI, TailBB, PredBB, LocalVRMap, CopyInfos, UsedByPhi, true);
            } else {
                // Replace def of virtual registers with new registers, and update
                // uses with PHI source register or the new registers.
                DuplicateInstruction(MI, TailBB, PredBB, MF, LocalVRMap, UsedByPhi);
            }
        }
        MachineBasicBlock::iterator Loc = PredBB->getFirstTerminator();
        for (unsigned i = 0, e = CopyInfos.size(); i != e; ++i) {
            Copies.push_back(BuildMI(*PredBB, Loc, DebugLoc(),
                                     TII->get(TargetOpcode::COPY),
                                     CopyInfos[i].first).addReg(CopyInfos[i].second));
        }

        // Simplify
        TII->AnalyzeBranch(*PredBB, PredTBB, PredFBB, PredCond, true);

        NumInstrDups += TailBB->size() - 1; // subtract one for removed branch

        // Update the CFG.
        PredBB->removeSuccessor(PredBB->succ_begin());
        assert(PredBB->succ_empty() &&
               "TailDuplicate called on block with multiple successors!");
        for (MachineBasicBlock::succ_iterator I = TailBB->succ_begin(),
                E = TailBB->succ_end(); I != E; ++I)
            PredBB->addSuccessor(*I);

        Changed = true;
        ++NumTailDups;
    }

    // If TailBB was duplicated into all its predecessors except for the prior
    // block, which falls through unconditionally, move the contents of this
    // block into the prior block.
    MachineBasicBlock *PrevBB = prior(MachineFunction::iterator(TailBB));
    MachineBasicBlock *PriorTBB = 0, *PriorFBB = 0;
    SmallVector<MachineOperand, 4> PriorCond;
    // This has to check PrevBB->succ_size() because EH edges are ignored by
    // AnalyzeBranch.
    if (PrevBB->succ_size() == 1 &&
            !TII->AnalyzeBranch(*PrevBB, PriorTBB, PriorFBB, PriorCond, true) &&
            PriorCond.empty() && !PriorTBB && TailBB->pred_size() == 1 &&
            !TailBB->hasAddressTaken()) {
        DEBUG(dbgs() << "\nMerging into block: " << *PrevBB
              << "From MBB: " << *TailBB);
        if (PreRegAlloc) {
            DenseMap<unsigned, unsigned> LocalVRMap;
            SmallVector<std::pair<unsigned,unsigned>, 4> CopyInfos;
            MachineBasicBlock::iterator I = TailBB->begin();
            // Process PHI instructions first.
            while (I != TailBB->end() && I->isPHI()) {
                // Replace the uses of the def of the PHI with the register coming
                // from PredBB.
                MachineInstr *MI = &*I++;
                ProcessPHI(MI, TailBB, PrevBB, LocalVRMap, CopyInfos, UsedByPhi, true);
                if (MI->getParent())
                    MI->eraseFromParent();
            }

            // Now copy the non-PHI instructions.
            while (I != TailBB->end()) {
                // Replace def of virtual registers with new registers, and update
                // uses with PHI source register or the new registers.
                MachineInstr *MI = &*I++;
                assert(!MI->isBundle() && "Not expecting bundles before regalloc!");
                DuplicateInstruction(MI, TailBB, PrevBB, MF, LocalVRMap, UsedByPhi);
                MI->eraseFromParent();
            }
            MachineBasicBlock::iterator Loc = PrevBB->getFirstTerminator();
            for (unsigned i = 0, e = CopyInfos.size(); i != e; ++i) {
                Copies.push_back(BuildMI(*PrevBB, Loc, DebugLoc(),
                                         TII->get(TargetOpcode::COPY),
                                         CopyInfos[i].first)
                                 .addReg(CopyInfos[i].second));
            }
        } else {
            // No PHIs to worry about, just splice the instructions over.
            PrevBB->splice(PrevBB->end(), TailBB, TailBB->begin(), TailBB->end());
        }
        PrevBB->removeSuccessor(PrevBB->succ_begin());
        assert(PrevBB->succ_empty());
        PrevBB->transferSuccessors(TailBB);
        TDBBs.push_back(PrevBB);
        Changed = true;
    }

    // If this is after register allocation, there are no phis to fix.
    if (!PreRegAlloc)
        return Changed;

    // If we made no changes so far, we are safe.
    if (!Changed)
        return Changed;


    // Handle the nasty case in that we duplicated a block that is part of a loop
    // into some but not all of its predecessors. For example:
    //    1 -> 2 <-> 3                 |
    //          \                      |
    //           \---> rest            |
    // if we duplicate 2 into 1 but not into 3, we end up with
    // 12 -> 3 <-> 2 -> rest           |
    //   \             /               |
    //    \----->-----/                |
    // If there was a "var = phi(1, 3)" in 2, it has to be ultimately replaced
    // with a phi in 3 (which now dominates 2).
    // What we do here is introduce a copy in 3 of the register defined by the
    // phi, just like when we are duplicating 2 into 3, but we don't copy any
    // real instructions or remove the 3 -> 2 edge from the phi in 2.
    for (SmallSetVector<MachineBasicBlock *, 8>::iterator PI = Preds.begin(),
            PE = Preds.end(); PI != PE; ++PI) {
        MachineBasicBlock *PredBB = *PI;
        if (std::find(TDBBs.begin(), TDBBs.end(), PredBB) != TDBBs.end())
            continue;

        // EH edges
        if (PredBB->succ_size() != 1)
            continue;

        DenseMap<unsigned, unsigned> LocalVRMap;
        SmallVector<std::pair<unsigned,unsigned>, 4> CopyInfos;
        MachineBasicBlock::iterator I = TailBB->begin();
        // Process PHI instructions first.
        while (I != TailBB->end() && I->isPHI()) {
            // Replace the uses of the def of the PHI with the register coming
            // from PredBB.
            MachineInstr *MI = &*I++;
            ProcessPHI(MI, TailBB, PredBB, LocalVRMap, CopyInfos, UsedByPhi, false);
        }
        MachineBasicBlock::iterator Loc = PredBB->getFirstTerminator();
        for (unsigned i = 0, e = CopyInfos.size(); i != e; ++i) {
            Copies.push_back(BuildMI(*PredBB, Loc, DebugLoc(),
                                     TII->get(TargetOpcode::COPY),
                                     CopyInfos[i].first).addReg(CopyInfos[i].second));
        }
    }

    return Changed;
}
Ejemplo n.º 30
0
void Diagnostic::
FormatDiagnostic(const char *DiagStr, const char *DiagEnd,
                 SmallVectorImpl<char> &OutStr) const {

  /// FormattedArgs - Keep track of all of the arguments formatted by
  /// ConvertArgToString and pass them into subsequent calls to
  /// ConvertArgToString, allowing the implementation to avoid redundancies in
  /// obvious cases.
  SmallVector<DiagnosticsEngine::ArgumentValue, 8> FormattedArgs;

  /// QualTypeVals - Pass a vector of arrays so that QualType names can be
  /// compared to see if more information is needed to be printed.
  SmallVector<intptr_t, 2> QualTypeVals;
  SmallVector<char, 64> Tree;

  for (unsigned i = 0, e = getNumArgs(); i < e; ++i)
    if (getArgKind(i) == DiagnosticsEngine::ak_qualtype)
      QualTypeVals.push_back(getRawArg(i));

  while (DiagStr != DiagEnd) {
    if (DiagStr[0] != '%') {
      // Append non-%0 substrings to Str if we have one.
      const char *StrEnd = std::find(DiagStr, DiagEnd, '%');
      OutStr.append(DiagStr, StrEnd);
      DiagStr = StrEnd;
      continue;
    } else if (ispunct(DiagStr[1])) {
      OutStr.push_back(DiagStr[1]);  // %% -> %.
      DiagStr += 2;
      continue;
    }

    // Skip the %.
    ++DiagStr;

    // This must be a placeholder for a diagnostic argument.  The format for a
    // placeholder is one of "%0", "%modifier0", or "%modifier{arguments}0".
    // The digit is a number from 0-9 indicating which argument this comes from.
    // The modifier is a string of digits from the set [-a-z]+, arguments is a
    // brace enclosed string.
    const char *Modifier = 0, *Argument = 0;
    unsigned ModifierLen = 0, ArgumentLen = 0;

    // Check to see if we have a modifier.  If so eat it.
    if (!isdigit(DiagStr[0])) {
      Modifier = DiagStr;
      while (DiagStr[0] == '-' ||
             (DiagStr[0] >= 'a' && DiagStr[0] <= 'z'))
        ++DiagStr;
      ModifierLen = DiagStr-Modifier;

      // If we have an argument, get it next.
      if (DiagStr[0] == '{') {
        ++DiagStr; // Skip {.
        Argument = DiagStr;

        DiagStr = ScanFormat(DiagStr, DiagEnd, '}');
        assert(DiagStr != DiagEnd && "Mismatched {}'s in diagnostic string!");
        ArgumentLen = DiagStr-Argument;
        ++DiagStr;  // Skip }.
      }
    }

    assert(isdigit(*DiagStr) && "Invalid format for argument in diagnostic");
    unsigned ArgNo = *DiagStr++ - '0';

    // Only used for type diffing.
    unsigned ArgNo2 = ArgNo;

    DiagnosticsEngine::ArgumentKind Kind = getArgKind(ArgNo);
    if (Kind == DiagnosticsEngine::ak_qualtype &&
        ModifierIs(Modifier, ModifierLen, "diff")) {
      Kind = DiagnosticsEngine::ak_qualtype_pair;
      assert(*DiagStr == ',' && isdigit(*(DiagStr + 1)) &&
             "Invalid format for diff modifier");
      ++DiagStr;  // Comma.
      ArgNo2 = *DiagStr++ - '0';
      assert(getArgKind(ArgNo2) == DiagnosticsEngine::ak_qualtype &&
             "Second value of type diff must be a qualtype");
    }
    
    switch (Kind) {
    // ---- STRINGS ----
    case DiagnosticsEngine::ak_std_string: {
      const std::string &S = getArgStdStr(ArgNo);
      assert(ModifierLen == 0 && "No modifiers for strings yet");
      OutStr.append(S.begin(), S.end());
      break;
    }
    case DiagnosticsEngine::ak_c_string: {
      const char *S = getArgCStr(ArgNo);
      assert(ModifierLen == 0 && "No modifiers for strings yet");

      // Don't crash if get passed a null pointer by accident.
      if (!S)
        S = "(null)";

      OutStr.append(S, S + strlen(S));
      break;
    }
    // ---- INTEGERS ----
    case DiagnosticsEngine::ak_sint: {
      int Val = getArgSInt(ArgNo);

      if (ModifierIs(Modifier, ModifierLen, "select")) {
        HandleSelectModifier(*this, (unsigned)Val, Argument, ArgumentLen,
                             OutStr);
      } else if (ModifierIs(Modifier, ModifierLen, "s")) {
        HandleIntegerSModifier(Val, OutStr);
      } else if (ModifierIs(Modifier, ModifierLen, "plural")) {
        HandlePluralModifier(*this, (unsigned)Val, Argument, ArgumentLen,
                             OutStr);
      } else if (ModifierIs(Modifier, ModifierLen, "ordinal")) {
        HandleOrdinalModifier((unsigned)Val, OutStr);
      } else {
        assert(ModifierLen == 0 && "Unknown integer modifier");
        llvm::raw_svector_ostream(OutStr) << Val;
      }
      break;
    }
    case DiagnosticsEngine::ak_uint: {
      unsigned Val = getArgUInt(ArgNo);

      if (ModifierIs(Modifier, ModifierLen, "select")) {
        HandleSelectModifier(*this, Val, Argument, ArgumentLen, OutStr);
      } else if (ModifierIs(Modifier, ModifierLen, "s")) {
        HandleIntegerSModifier(Val, OutStr);
      } else if (ModifierIs(Modifier, ModifierLen, "plural")) {
        HandlePluralModifier(*this, (unsigned)Val, Argument, ArgumentLen,
                             OutStr);
      } else if (ModifierIs(Modifier, ModifierLen, "ordinal")) {
        HandleOrdinalModifier(Val, OutStr);
      } else {
        assert(ModifierLen == 0 && "Unknown integer modifier");
        llvm::raw_svector_ostream(OutStr) << Val;
      }
      break;
    }
    // ---- NAMES and TYPES ----
    case DiagnosticsEngine::ak_identifierinfo: {
      const IdentifierInfo *II = getArgIdentifier(ArgNo);
      assert(ModifierLen == 0 && "No modifiers for strings yet");

      // Don't crash if get passed a null pointer by accident.
      if (!II) {
        const char *S = "(null)";
        OutStr.append(S, S + strlen(S));
        continue;
      }

      llvm::raw_svector_ostream(OutStr) << '\'' << II->getName() << '\'';
      break;
    }
    case DiagnosticsEngine::ak_qualtype:
    case DiagnosticsEngine::ak_declarationname:
    case DiagnosticsEngine::ak_nameddecl:
    case DiagnosticsEngine::ak_nestednamespec:
    case DiagnosticsEngine::ak_declcontext:
      getDiags()->ConvertArgToString(Kind, getRawArg(ArgNo),
                                     Modifier, ModifierLen,
                                     Argument, ArgumentLen,
                                     FormattedArgs.data(), FormattedArgs.size(),
                                     OutStr, QualTypeVals);
      break;
    case DiagnosticsEngine::ak_qualtype_pair:
      // Create a struct with all the info needed for printing.
      TemplateDiffTypes TDT;
      TDT.FromType = getRawArg(ArgNo);
      TDT.ToType = getRawArg(ArgNo2);
      TDT.ElideType = getDiags()->ElideType;
      TDT.ShowColors = getDiags()->ShowColors;
      intptr_t val = reinterpret_cast<intptr_t>(&TDT);

      const char *ArgumentEnd = Argument + ArgumentLen;
      const char *Pipe = ScanFormat(Argument, ArgumentEnd, '|');

      // Print the tree.
      if (getDiags()->PrintTemplateTree) {
        TDT.PrintFromType = true;
        TDT.PrintTree = true;
        getDiags()->ConvertArgToString(Kind, val,
                                       Modifier, ModifierLen,
                                       Argument, ArgumentLen,
                                       FormattedArgs.data(),
                                       FormattedArgs.size(),
                                       Tree, QualTypeVals);
        // If there is no tree information, fall back to regular printing.
        if (!Tree.empty()) {
          FormatDiagnostic(Pipe + 1, ArgumentEnd, OutStr);
          break;
        }
      }

      // Non-tree printing, also the fall-back when tree printing fails.
      // The fall-back is triggered when the types compared are not templates.
      const char *FirstDollar = ScanFormat(Argument, ArgumentEnd, '$');
      const char *SecondDollar = ScanFormat(FirstDollar + 1, ArgumentEnd, '$');

      // Append before text
      FormatDiagnostic(Argument, FirstDollar, OutStr);

      // Append first type
      TDT.PrintTree = false;
      TDT.PrintFromType = true;
      getDiags()->ConvertArgToString(Kind, val,
                                     Modifier, ModifierLen,
                                     Argument, ArgumentLen,
                                     FormattedArgs.data(), FormattedArgs.size(),
                                     OutStr, QualTypeVals);
      // Append middle text
      FormatDiagnostic(FirstDollar + 1, SecondDollar, OutStr);

      // Append second type
      TDT.PrintFromType = false;
      getDiags()->ConvertArgToString(Kind, val,
                                     Modifier, ModifierLen,
                                     Argument, ArgumentLen,
                                     FormattedArgs.data(), FormattedArgs.size(),
                                     OutStr, QualTypeVals);
      // Append end text
      FormatDiagnostic(SecondDollar + 1, Pipe, OutStr);
      break;
    }
    
    // Remember this argument info for subsequent formatting operations.  Turn
    // std::strings into a null terminated string to make it be the same case as
    // all the other ones.
    if (Kind == DiagnosticsEngine::ak_qualtype_pair)
      continue;
    else if (Kind != DiagnosticsEngine::ak_std_string)
      FormattedArgs.push_back(std::make_pair(Kind, getRawArg(ArgNo)));
    else
      FormattedArgs.push_back(std::make_pair(DiagnosticsEngine::ak_c_string,
                                        (intptr_t)getArgStdStr(ArgNo).c_str()));
    
  }

  // Append the type tree to the end of the diagnostics.
  OutStr.append(Tree.begin(), Tree.end());
}