// Calculate the largest possible vregsPassed sets. These are the registers that
// can pass through an MBB live, but may not be live every time. It is assumed
// that all vregsPassed sets are empty before the call.
void MachineVerifier::calcRegsPassed() {
  // First push live-out regs to successors' vregsPassed. Remember the MBBs that
  // have any vregsPassed.
  DenseSet<const MachineBasicBlock*> todo;
  for (MachineFunction::const_iterator MFI = MF->begin(), MFE = MF->end();
       MFI != MFE; ++MFI) {
    const MachineBasicBlock &MBB(*MFI);
    BBInfo &MInfo = MBBInfoMap[&MBB];
    if (!MInfo.reachable)
      continue;
    for (MachineBasicBlock::const_succ_iterator SuI = MBB.succ_begin(),
           SuE = MBB.succ_end(); SuI != SuE; ++SuI) {
      BBInfo &SInfo = MBBInfoMap[*SuI];
      if (SInfo.addPassed(MInfo.regsLiveOut))
        todo.insert(*SuI);
    }
  }

  // Iteratively push vregsPassed to successors. This will converge to the same
  // final state regardless of DenseSet iteration order.
  while (!todo.empty()) {
    const MachineBasicBlock *MBB = *todo.begin();
    todo.erase(MBB);
    BBInfo &MInfo = MBBInfoMap[MBB];
    for (MachineBasicBlock::const_succ_iterator SuI = MBB->succ_begin(),
           SuE = MBB->succ_end(); SuI != SuE; ++SuI) {
      if (*SuI == MBB)
        continue;
      BBInfo &SInfo = MBBInfoMap[*SuI];
      if (SInfo.addPassed(MInfo.vregsPassed))
        todo.insert(*SuI);
    }
  }
}
// Calculate the set of virtual registers that must be passed through each basic
// block in order to satisfy the requirements of successor blocks. This is very
// similar to calcRegsPassed, only backwards.
void MachineVerifier::calcRegsRequired() {
  // First push live-in regs to predecessors' vregsRequired.
  DenseSet<const MachineBasicBlock*> todo;
  for (MachineFunction::const_iterator MFI = MF->begin(), MFE = MF->end();
       MFI != MFE; ++MFI) {
    const MachineBasicBlock &MBB(*MFI);
    BBInfo &MInfo = MBBInfoMap[&MBB];
    for (MachineBasicBlock::const_pred_iterator PrI = MBB.pred_begin(),
           PrE = MBB.pred_end(); PrI != PrE; ++PrI) {
      BBInfo &PInfo = MBBInfoMap[*PrI];
      if (PInfo.addRequired(MInfo.vregsLiveIn))
        todo.insert(*PrI);
    }
  }

  // Iteratively push vregsRequired to predecessors. This will converge to the
  // same final state regardless of DenseSet iteration order.
  while (!todo.empty()) {
    const MachineBasicBlock *MBB = *todo.begin();
    todo.erase(MBB);
    BBInfo &MInfo = MBBInfoMap[MBB];
    for (MachineBasicBlock::const_pred_iterator PrI = MBB->pred_begin(),
           PrE = MBB->pred_end(); PrI != PrE; ++PrI) {
      if (*PrI == MBB)
        continue;
      BBInfo &SInfo = MBBInfoMap[*PrI];
      if (SInfo.addRequired(MInfo.vregsRequired))
        todo.insert(*PrI);
    }
  }
}
示例#3
0
static std::unique_ptr<MemoryBuffer>
ProcessThinLTOModule(Module &TheModule, ModuleSummaryIndex &Index,
                     StringMap<MemoryBufferRef> &ModuleMap, TargetMachine &TM,
                     const FunctionImporter::ImportMapTy &ImportList,
                     const FunctionImporter::ExportSetTy &ExportList,
                     const DenseSet<GlobalValue::GUID> &GUIDPreservedSymbols,
                     const GVSummaryMapTy &DefinedGlobals,
                     const ThinLTOCodeGenerator::CachingOptions &CacheOptions,
                     bool DisableCodeGen, StringRef SaveTempsDir,
                     bool Freestanding, unsigned OptLevel, unsigned count) {

  // "Benchmark"-like optimization: single-source case
  bool SingleModule = (ModuleMap.size() == 1);

  if (!SingleModule) {
    promoteModule(TheModule, Index);

    // Apply summary-based LinkOnce/Weak resolution decisions.
    thinLTOResolveWeakForLinkerModule(TheModule, DefinedGlobals);

    // Save temps: after promotion.
    saveTempBitcode(TheModule, SaveTempsDir, count, ".1.promoted.bc");
  }

  // Be friendly and don't nuke totally the module when the client didn't
  // supply anything to preserve.
  if (!ExportList.empty() || !GUIDPreservedSymbols.empty()) {
    // Apply summary-based internalization decisions.
    thinLTOInternalizeModule(TheModule, DefinedGlobals);
  }

  // Save internalized bitcode
  saveTempBitcode(TheModule, SaveTempsDir, count, ".2.internalized.bc");

  if (!SingleModule) {
    crossImportIntoModule(TheModule, Index, ModuleMap, ImportList);

    // Save temps: after cross-module import.
    saveTempBitcode(TheModule, SaveTempsDir, count, ".3.imported.bc");
  }

  optimizeModule(TheModule, TM, OptLevel, Freestanding);

  saveTempBitcode(TheModule, SaveTempsDir, count, ".4.opt.bc");

  if (DisableCodeGen) {
    // Configured to stop before CodeGen, serialize the bitcode and return.
    SmallVector<char, 128> OutputBuffer;
    {
      raw_svector_ostream OS(OutputBuffer);
      ProfileSummaryInfo PSI(TheModule);
      auto Index = buildModuleSummaryIndex(TheModule, nullptr, &PSI);
      WriteBitcodeToFile(TheModule, OS, true, &Index);
    }
    return make_unique<SmallVectorMemoryBuffer>(std::move(OutputBuffer));
  }

  return codegenModule(TheModule, TM);
}
// Find the number of arguments we need to add to the functions.
void CSDataRando::findFunctionArgNodes(const std::vector<const Function *> &Functions) {
  std::vector<DSNodeHandle> RootNodes;
  for (const Function *F : Functions) {
    DSGraph *G = DSA->getDSGraph(*F);
    G->getFunctionArgumentsForCall(F, RootNodes);
  }

  // No additional args to pass.
  if (RootNodes.size() == 0) {
    return;
  }

  DenseSet<const DSNode*> MarkedNodes;
  for (DSNodeHandle &NH : RootNodes) {
    if (DSNode *N = NH.getNode()) {
      N->markReachableNodes(MarkedNodes);
    }
  }

  // Remove global nodes from the arg nodes. If we are using the bottom-up
  // analysis then if a node is a global node all contexts will use the global map.
  for (auto i : GlobalNodes) {
    MarkedNodes.erase(i);
  }

  // Remove any nodes that are marked do not encrypt.
  SmallVector<const DSNode*, 8> MarkedNodeWorkList;
  for (auto i : MarkedNodes) {
    if (i->isDoNotEncryptNode()) {
      MarkedNodeWorkList.push_back(i);
    }
  }
  for (auto i : MarkedNodeWorkList) {
    MarkedNodes.erase(i);
  }

  if (MarkedNodes.empty()) {
    return;
  }

  // Create a FuncInfo entry for each of the functions with the arg nodes that
  // need to be passed
  for (const Function *F : Functions) {
    FuncInfo &FI = FunctionInfo[F];
    FI.ArgNodes.insert(FI.ArgNodes.end(), MarkedNodes.begin(), MarkedNodes.end());
  }
}
示例#5
0
static std::unique_ptr<Module>
getModuleForFile(LLVMContext &Context, claimed_file &F, raw_fd_ostream *ApiFile,
                 StringSet<> &Internalize, StringSet<> &Maybe) {
  ld_plugin_input_file File;
  if (get_input_file(F.handle, &File) != LDPS_OK)
    message(LDPL_FATAL, "Failed to get file information");

  if (get_symbols(F.handle, F.syms.size(), &F.syms[0]) != LDPS_OK)
    message(LDPL_FATAL, "Failed to get symbol information");

  const void *View;
  if (get_view(F.handle, &View) != LDPS_OK)
    message(LDPL_FATAL, "Failed to get a view of file");

  std::unique_ptr<MemoryBuffer> Buffer = MemoryBuffer::getMemBuffer(
      StringRef((char *)View, File.filesize), "", false);

  if (release_input_file(F.handle) != LDPS_OK)
    message(LDPL_FATAL, "Failed to release file information");

  ErrorOr<Module *> MOrErr = getLazyBitcodeModule(std::move(Buffer), Context);

  if (std::error_code EC = MOrErr.getError())
    message(LDPL_FATAL, "Could not read bitcode from file : %s",
            EC.message().c_str());

  std::unique_ptr<Module> M(MOrErr.get());

  SmallPtrSet<GlobalValue *, 8> Used;
  collectUsedGlobalVariables(*M, Used, /*CompilerUsed*/ false);

  DenseSet<GlobalValue *> Drop;
  std::vector<GlobalAlias *> KeptAliases;
  for (ld_plugin_symbol &Sym : F.syms) {
    ld_plugin_symbol_resolution Resolution =
        (ld_plugin_symbol_resolution)Sym.resolution;

    if (options::generate_api_file)
      *ApiFile << Sym.name << ' ' << getResolutionName(Resolution) << '\n';

    GlobalValue *GV = M->getNamedValue(Sym.name);
    if (!GV)
      continue; // Asm symbol.

    if (GV->hasCommonLinkage()) {
      // Common linkage is special. There is no single symbol that wins the
      // resolution. Instead we have to collect the maximum alignment and size.
      // The IR linker does that for us if we just pass it every common GV.
      continue;
    }

    switch (Resolution) {
    case LDPR_UNKNOWN:
      llvm_unreachable("Unexpected resolution");

    case LDPR_RESOLVED_IR:
    case LDPR_RESOLVED_EXEC:
    case LDPR_RESOLVED_DYN:
    case LDPR_UNDEF:
      assert(isDeclaration(*GV));
      break;

    case LDPR_PREVAILING_DEF_IRONLY: {
      keepGlobalValue(*GV, KeptAliases);
      if (!Used.count(GV)) {
        // Since we use the regular lib/Linker, we cannot just internalize GV
        // now or it will not be copied to the merged module. Instead we force
        // it to be copied and then internalize it.
        Internalize.insert(Sym.name);
      }
      break;
    }

    case LDPR_PREVAILING_DEF:
      keepGlobalValue(*GV, KeptAliases);
      break;

    case LDPR_PREEMPTED_REG:
    case LDPR_PREEMPTED_IR:
      Drop.insert(GV);
      break;

    case LDPR_PREVAILING_DEF_IRONLY_EXP: {
      // We can only check for address uses after we merge the modules. The
      // reason is that this GV might have a copy in another module
      // and in that module the address might be significant, but that
      // copy will be LDPR_PREEMPTED_IR.
      if (GV->hasLinkOnceODRLinkage())
        Maybe.insert(Sym.name);
      keepGlobalValue(*GV, KeptAliases);
      break;
    }
    }

    free(Sym.name);
    free(Sym.comdat_key);
    Sym.name = nullptr;
    Sym.comdat_key = nullptr;
  }

  if (!Drop.empty())
    // This is horrible. Given how lazy loading is implemented, dropping
    // the body while there is a materializer present doesn't work, the
    // linker will just read the body back.
    M->materializeAllPermanently();

  ValueToValueMapTy VM;
  LocalValueMaterializer Materializer(Drop);
  for (GlobalAlias *GA : KeptAliases) {
    // Gold told us to keep GA. It is possible that a GV usied in the aliasee
    // expression is being dropped. If that is the case, that GV must be copied.
    Constant *Aliasee = GA->getAliasee();
    Constant *Replacement = mapConstantToLocalCopy(Aliasee, VM, &Materializer);
    if (Aliasee != Replacement)
      GA->setAliasee(Replacement);
  }

  for (auto *GV : Drop)
    drop(*GV);

  return M;
}
bool PlaceSafepoints::runOnFunction(Function &F) {
  if (F.isDeclaration() || F.empty()) {
    // This is a declaration, nothing to do.  Must exit early to avoid crash in
    // dom tree calculation
    return false;
  }

  bool modified = false;

  // In various bits below, we rely on the fact that uses are reachable from
  // defs.  When there are basic blocks unreachable from the entry, dominance
  // and reachablity queries return non-sensical results.  Thus, we preprocess
  // the function to ensure these properties hold.
  modified |= removeUnreachableBlocks(F);

  // STEP 1 - Insert the safepoint polling locations.  We do not need to
  // actually insert parse points yet.  That will be done for all polls and
  // calls in a single pass.

  // Note: With the migration, we need to recompute this for each 'pass'.  Once
  // we merge these, we'll do it once before the analysis
  DominatorTree DT;

  std::vector<CallSite> ParsePointNeeded;

  if (EnableBackedgeSafepoints) {
    // Construct a pass manager to run the LoopPass backedge logic.  We
    // need the pass manager to handle scheduling all the loop passes
    // appropriately.  Doing this by hand is painful and just not worth messing
    // with for the moment.
    FunctionPassManager FPM(F.getParent());
    PlaceBackedgeSafepointsImpl *PBS =
        new PlaceBackedgeSafepointsImpl(EnableCallSafepoints);
    FPM.add(PBS);
    // Note: While the analysis pass itself won't modify the IR, LoopSimplify
    // (which it depends on) may.  i.e. analysis must be recalculated after run
    FPM.run(F);

    // We preserve dominance information when inserting the poll, otherwise
    // we'd have to recalculate this on every insert
    DT.recalculate(F);

    // Insert a poll at each point the analysis pass identified
    for (size_t i = 0; i < PBS->PollLocations.size(); i++) {
      // We are inserting a poll, the function is modified
      modified = true;

      // The poll location must be the terminator of a loop latch block.
      TerminatorInst *Term = PBS->PollLocations[i];

      std::vector<CallSite> ParsePoints;
      if (SplitBackedge) {
        // Split the backedge of the loop and insert the poll within that new
        // basic block.  This creates a loop with two latches per original
        // latch (which is non-ideal), but this appears to be easier to
        // optimize in practice than inserting the poll immediately before the
        // latch test.

        // Since this is a latch, at least one of the successors must dominate
        // it. Its possible that we have a) duplicate edges to the same header
        // and b) edges to distinct loop headers.  We need to insert pools on
        // each. (Note: This still relies on LoopSimplify.)
        DenseSet<BasicBlock *> Headers;
        for (unsigned i = 0; i < Term->getNumSuccessors(); i++) {
          BasicBlock *Succ = Term->getSuccessor(i);
          if (DT.dominates(Succ, Term->getParent())) {
            Headers.insert(Succ);
          }
        }
        assert(!Headers.empty() && "poll location is not a loop latch?");

        // The split loop structure here is so that we only need to recalculate
        // the dominator tree once.  Alternatively, we could just keep it up to
        // date and use a more natural merged loop.
        DenseSet<BasicBlock *> SplitBackedges;
        for (BasicBlock *Header : Headers) {
          BasicBlock *NewBB = SplitEdge(Term->getParent(), Header, nullptr);
          SplitBackedges.insert(NewBB);
        }
        DT.recalculate(F);
        for (BasicBlock *NewBB : SplitBackedges) {
          InsertSafepointPoll(DT, NewBB->getTerminator(), ParsePoints);
          NumBackedgeSafepoints++;
        }

      } else {
        // Split the latch block itself, right before the terminator.
        InsertSafepointPoll(DT, Term, ParsePoints);
        NumBackedgeSafepoints++;
      }

      // Record the parse points for later use
      ParsePointNeeded.insert(ParsePointNeeded.end(), ParsePoints.begin(),
                              ParsePoints.end());
    }
  }

  if (EnableEntrySafepoints) {
    DT.recalculate(F);
    Instruction *term = findLocationForEntrySafepoint(F, DT);
    if (!term) {
      // policy choice not to insert?
    } else {
      std::vector<CallSite> RuntimeCalls;
      InsertSafepointPoll(DT, term, RuntimeCalls);
      modified = true;
      NumEntrySafepoints++;
      ParsePointNeeded.insert(ParsePointNeeded.end(), RuntimeCalls.begin(),
                              RuntimeCalls.end());
    }
  }

  if (EnableCallSafepoints) {
    DT.recalculate(F);
    std::vector<CallSite> Calls;
    findCallSafepoints(F, Calls);
    NumCallSafepoints += Calls.size();
    ParsePointNeeded.insert(ParsePointNeeded.end(), Calls.begin(), Calls.end());
  }

  // Unique the vectors since we can end up with duplicates if we scan the call
  // site for call safepoints after we add it for entry or backedge.  The
  // only reason we need tracking at all is that some functions might have
  // polls but not call safepoints and thus we might miss marking the runtime
  // calls for the polls. (This is useful in test cases!)
  unique_unsorted(ParsePointNeeded);

  // Any parse point (no matter what source) will be handled here
  DT.recalculate(F); // Needed?

  // We're about to start modifying the function
  if (!ParsePointNeeded.empty())
    modified = true;

  // Now run through and insert the safepoints, but do _NOT_ update or remove
  // any existing uses.  We have references to live variables that need to
  // survive to the last iteration of this loop.
  std::vector<Value *> Results;
  Results.reserve(ParsePointNeeded.size());
  for (size_t i = 0; i < ParsePointNeeded.size(); i++) {
    CallSite &CS = ParsePointNeeded[i];
    Value *GCResult = ReplaceWithStatepoint(CS, nullptr);
    Results.push_back(GCResult);
  }
  assert(Results.size() == ParsePointNeeded.size());

  // Adjust all users of the old call sites to use the new ones instead
  for (size_t i = 0; i < ParsePointNeeded.size(); i++) {
    CallSite &CS = ParsePointNeeded[i];
    Value *GCResult = Results[i];
    if (GCResult) {
      // In case if we inserted result in a different basic block than the
      // original safepoint (this can happen for invokes). We need to be sure
      // that
      // original result value was not used in any of the phi nodes at the
      // beginning of basic block with gc result. Because we know that all such
      // blocks will have single predecessor we can safely assume that all phi
      // nodes have single entry (because of normalizeBBForInvokeSafepoint).
      // Just remove them all here.
      if (CS.isInvoke()) {
        FoldSingleEntryPHINodes(cast<Instruction>(GCResult)->getParent(),
                                nullptr);
        assert(
            !isa<PHINode>(cast<Instruction>(GCResult)->getParent()->begin()));
      }

      // Replace all uses with the new call
      CS.getInstruction()->replaceAllUsesWith(GCResult);
    }

    // Now that we've handled all uses, remove the original call itself
    // Note: The insert point can't be the deleted instruction!
    CS.getInstruction()->eraseFromParent();
  }
  return modified;
}
Result::Sat AttemptSolutionSDP::attempt(const ApproximateSimplex::Solution& sol){
  const DenseSet& newBasis = sol.newBasis;
  const DenseMap<DeltaRational>& newValues = sol.newValues;

  DenseSet needsToBeAdded;
  for(DenseSet::const_iterator i = newBasis.begin(), i_end = newBasis.end(); i != i_end; ++i){
    ArithVar b = *i;
    if(!d_tableau.isBasic(b)){
      needsToBeAdded.add(b);
    }
  }
  DenseMap<DeltaRational>::const_iterator nvi = newValues.begin(), nvi_end = newValues.end();
  for(; nvi != nvi_end; ++nvi){
    ArithVar currentlyNb = *nvi;
    if(!d_tableau.isBasic(currentlyNb)){
      if(!matchesNewValue(newValues, currentlyNb)){
        const DeltaRational& newValue = newValues[currentlyNb];
        Trace("arith::updateMany")
          << "updateMany:" << currentlyNb << " "
          << d_variables.getAssignment(currentlyNb) << " to "<< newValue << endl;
        d_linEq.update(currentlyNb, newValue);
        Assert(d_variables.assignmentIsConsistent(currentlyNb));
      }
    }
  }
  d_errorSet.reduceToSignals();
  d_errorSet.setSelectionRule(VAR_ORDER);

  static int instance = 0;
  ++instance;

  if(processSignals()){
    Debug("arith::findModel") << "attemptSolution("<< instance <<") early conflict" << endl;
    d_conflictVariables.purge();
    return Result::UNSAT;
  }else if(d_errorSet.errorEmpty()){
    Debug("arith::findModel") << "attemptSolution("<< instance <<") fixed itself" << endl;
    return Result::SAT;
  }

  while(!needsToBeAdded.empty() && !d_errorSet.errorEmpty()){
    ArithVar toRemove = ARITHVAR_SENTINEL;
    ArithVar toAdd = ARITHVAR_SENTINEL;
    DenseSet::const_iterator i = needsToBeAdded.begin(), i_end = needsToBeAdded.end();
    for(; toAdd == ARITHVAR_SENTINEL && i != i_end; ++i){
      ArithVar v = *i;

      Tableau::ColIterator colIter = d_tableau.colIterator(v);
      for(; !colIter.atEnd(); ++colIter){
        const Tableau::Entry& entry = *colIter;
        Assert(entry.getColVar() == v);
        ArithVar b = d_tableau.rowIndexToBasic(entry.getRowIndex());
        if(!newBasis.isMember(b)){
          toAdd = v;

          bool favorBOverToRemove =
            (toRemove == ARITHVAR_SENTINEL) ||
            (matchesNewValue(newValues, toRemove) && !matchesNewValue(newValues, b)) ||
            (d_tableau.basicRowLength(toRemove) > d_tableau.basicRowLength(b));

          if(favorBOverToRemove){
            toRemove = b;
          }
        }
      }
    }
    Assert(toRemove != ARITHVAR_SENTINEL);
    Assert(toAdd != ARITHVAR_SENTINEL);

    Trace("arith::forceNewBasis") << toRemove << " " << toAdd << endl;
    //Message() << toRemove << " " << toAdd << endl;

    d_linEq.pivotAndUpdate(toRemove, toAdd, newValues[toRemove]);

    Trace("arith::forceNewBasis") << needsToBeAdded.size() << "to go" << endl;
    //Message() << needsToBeAdded.size() << "to go" << endl;
    needsToBeAdded.remove(toAdd);

    bool conflict = processSignals();
    if(conflict){
      d_errorSet.reduceToSignals();
      d_conflictVariables.purge();

      return Result::UNSAT;
    }
  }
  Assert( d_conflictVariables.empty() );

  if(d_errorSet.errorEmpty()){
    return Result::SAT;
  }else{
    d_errorSet.reduceToSignals();
    return Result::SAT_UNKNOWN;
  }
}
示例#8
0
bool LoopDependenceAnalysis::isLoopInvariant(const SCEV *S) const {
  DenseSet<const Loop*> loops;
  getLoops(S, &loops);
  return loops.empty();
}