Exemplo n.º 1
0
void LoopInfo::verifyAnalysis() const {
  // LoopInfo is a FunctionPass, but verifying every loop in the function
  // each time verifyAnalysis is called is very expensive. The
  // -verify-loop-info option can enable this. In order to perform some
  // checking by default, LoopPass has been taught to call verifyLoop
  // manually during loop pass sequences.

  if (!VerifyLoopInfo) return;

  DenseSet<const Loop*> Loops;
  for (iterator I = begin(), E = end(); I != E; ++I) {
    assert(!(*I)->getParentLoop() && "Top-level loop has a parent!");
    (*I)->verifyLoopNest(&Loops);
  }

  // Verify that blocks are mapped to valid loops.
  //
  // FIXME: With an up-to-date DFS (see LoopIterator.h) and DominatorTree, we
  // could also verify that the blocks are still in the correct loops.
  for (DenseMap<BasicBlock*, Loop*>::const_iterator I = LI.BBMap.begin(),
         E = LI.BBMap.end(); I != E; ++I) {
    assert(Loops.count(I->second) && "orphaned loop");
    assert(I->second->contains(I->first) && "orphaned block");
  }
}
Exemplo n.º 2
0
void DebugTypeInfoRemoval::traverse(MDNode *N) {
  if (!N || Replacements.count(N))
    return;

  // To avoid cycles, as well as for efficiency sake, we will sometimes prune
  // parts of the graph.
  auto prune = [](MDNode *Parent, MDNode *Child) {
    if (auto *MDS = dyn_cast<DISubprogram>(Parent))
      return Child == MDS->getVariables().get();
    return false;
  };

  SmallVector<MDNode *, 16> ToVisit;
  DenseSet<MDNode *> Opened;

  // Visit each node starting at N in post order, and map them.
  ToVisit.push_back(N);
  while (!ToVisit.empty()) {
    auto *N = ToVisit.back();
    if (!Opened.insert(N).second) {
      // Close it.
      remap(N);
      ToVisit.pop_back();
      continue;
    }
    for (auto &I : N->operands())
      if (auto *MDN = dyn_cast_or_null<MDNode>(I))
        if (!Opened.count(MDN) && !Replacements.count(MDN) && !prune(N, MDN) &&
            !isa<DICompileUnit>(MDN))
          ToVisit.push_back(MDN);
  }
}
Exemplo n.º 3
0
/// DuplicateInstruction - Duplicate a TailBB instruction to PredBB and update
/// the source operands due to earlier PHI translation.
void TailDuplicatePass::DuplicateInstruction(MachineInstr *MI,
                                     MachineBasicBlock *TailBB,
                                     MachineBasicBlock *PredBB,
                                     MachineFunction &MF,
                                     DenseMap<unsigned, unsigned> &LocalVRMap,
                                     const DenseSet<unsigned> &UsedByPhi) {
  MachineInstr *NewMI = TII->duplicate(MI, MF);
  for (unsigned i = 0, e = NewMI->getNumOperands(); i != e; ++i) {
    MachineOperand &MO = NewMI->getOperand(i);
    if (!MO.isReg())
      continue;
    unsigned Reg = MO.getReg();
    if (!TargetRegisterInfo::isVirtualRegister(Reg))
      continue;
    if (MO.isDef()) {
      const TargetRegisterClass *RC = MRI->getRegClass(Reg);
      unsigned NewReg = MRI->createVirtualRegister(RC);
      MO.setReg(NewReg);
      LocalVRMap.insert(std::make_pair(Reg, NewReg));
      if (isDefLiveOut(Reg, TailBB, MRI) || UsedByPhi.count(Reg))
        AddSSAUpdateEntry(Reg, NewReg, PredBB);
    } else {
      DenseMap<unsigned, unsigned>::iterator VI = LocalVRMap.find(Reg);
      if (VI != LocalVRMap.end()) {
        MO.setReg(VI->second);
        // Clear any kill flags from this operand.  The new register could have
        // uses after this one, so kills are not valid here.
        MO.setIsKill(false);
        MRI->constrainRegClass(VI->second, MRI->getRegClass(Reg));
      }
    }
  }
  PredBB->insert(PredBB->instr_end(), NewMI);
}
// Check PHI instructions at the beginning of MBB. It is assumed that
// calcRegsPassed has been run so BBInfo::isLiveOut is valid.
void MachineVerifier::checkPHIOps(const MachineBasicBlock *MBB) {
  for (MachineBasicBlock::const_iterator BBI = MBB->begin(), BBE = MBB->end();
       BBI != BBE && BBI->isPHI(); ++BBI) {
    DenseSet<const MachineBasicBlock*> seen;

    for (unsigned i = 1, e = BBI->getNumOperands(); i != e; i += 2) {
      unsigned Reg = BBI->getOperand(i).getReg();
      const MachineBasicBlock *Pre = BBI->getOperand(i + 1).getMBB();
      if (!Pre->isSuccessor(MBB))
        continue;
      seen.insert(Pre);
      BBInfo &PrInfo = MBBInfoMap[Pre];
      if (PrInfo.reachable && !PrInfo.isLiveOut(Reg))
        report("PHI operand is not live-out from predecessor",
               &BBI->getOperand(i), i);
    }

    // Did we see all predecessors?
    for (MachineBasicBlock::const_pred_iterator PrI = MBB->pred_begin(),
           PrE = MBB->pred_end(); PrI != PrE; ++PrI) {
      if (!seen.count(*PrI)) {
        report("Missing PHI operand", BBI);
        *OS << "BB#" << (*PrI)->getNumber()
            << " is a predecessor according to the CFG.\n";
      }
    }
  }
}
Exemplo n.º 5
0
/// ProcessPHI - Process PHI node in TailBB by turning it into a copy in PredBB.
/// Remember the source register that's contributed by PredBB and update SSA
/// update map.
void TailDuplicatePass::ProcessPHI(MachineInstr *MI,
                                   MachineBasicBlock *TailBB,
                                   MachineBasicBlock *PredBB,
                                   DenseMap<unsigned, unsigned> &LocalVRMap,
                           SmallVector<std::pair<unsigned,unsigned>, 4> &Copies,
                                   const DenseSet<unsigned> &RegsUsedByPhi,
                                   bool Remove) {
  unsigned DefReg = MI->getOperand(0).getReg();
  unsigned SrcOpIdx = getPHISrcRegOpIdx(MI, PredBB);
  assert(SrcOpIdx && "Unable to find matching PHI source?");
  unsigned SrcReg = MI->getOperand(SrcOpIdx).getReg();
  const TargetRegisterClass *RC = MRI->getRegClass(DefReg);
  LocalVRMap.insert(std::make_pair(DefReg, SrcReg));

  // Insert a copy from source to the end of the block. The def register is the
  // available value liveout of the block.
  unsigned NewDef = MRI->createVirtualRegister(RC);
  Copies.push_back(std::make_pair(NewDef, SrcReg));
  if (isDefLiveOut(DefReg, TailBB, MRI) || RegsUsedByPhi.count(DefReg))
    AddSSAUpdateEntry(DefReg, NewDef, PredBB);

  if (!Remove)
    return;

  // Remove PredBB from the PHI node.
  MI->RemoveOperand(SrcOpIdx+1);
  MI->RemoveOperand(SrcOpIdx);
  if (MI->getNumOperands() == 1)
    MI->eraseFromParent();
}
Exemplo n.º 6
0
// Do the constant propagation in combined index.
// The goal of constant propagation is internalization of readonly
// variables. To determine which variables are readonly and which
// are not we take following steps:
// - During analysis we speculatively assign readonly attribute to
//   all variables which can be internalized. When computing function
//   summary we also assign readonly attribute to a reference if
//   function doesn't modify referenced variable.
//
// - After computing dead symbols in combined index we do the constant
//   propagation. During this step we clear readonly attribute from
//   all variables which:
//   a. are preserved or can't be imported
//   b. referenced by any global variable initializer
//   c. referenced by a function and reference is not readonly
//
// Internalization itself happens in the backend after import is finished
// See internalizeImmutableGVs.
void ModuleSummaryIndex::propagateConstants(
    const DenseSet<GlobalValue::GUID> &GUIDPreservedSymbols) {
  for (auto &P : *this)
    for (auto &S : P.second.SummaryList) {
      if (!isGlobalValueLive(S.get()))
        // We don't examine references from dead objects
        continue;

      // Global variable can't be marked read only if it is not eligible
      // to import since we need to ensure that all external references
      // get a local (imported) copy. It also can't be marked read only
      // if it or any alias (since alias points to the same memory) are
      // preserved or notEligibleToImport, since either of those means
      // there could be writes that are not visible (because preserved
      // means it could have external to DSO writes, and notEligibleToImport
      // means it could have writes via inline assembly leading it to be
      // in the @llvm.*used).
      if (auto *GVS = dyn_cast<GlobalVarSummary>(S->getBaseObject()))
        // Here we intentionally pass S.get() not GVS, because S could be
        // an alias.
        if (!canImportGlobalVar(S.get()) || GUIDPreservedSymbols.count(P.first))
          GVS->setReadOnly(false);
      propagateConstantsToRefs(S.get());
    }
  if (llvm::AreStatisticsEnabled())
    for (auto &P : *this)
      if (P.second.SummaryList.size())
        if (auto *GVS = dyn_cast<GlobalVarSummary>(
                P.second.SummaryList[0]->getBaseObject()))
          if (isGlobalValueLive(GVS) && GVS->isReadOnly())
            ReadOnlyLiveGVars++;
}
Exemplo n.º 7
0
void ModuleLinker::dropReplacedComdat(
    GlobalValue &GV, const DenseSet<const Comdat *> &ReplacedDstComdats) {
  Comdat *C = GV.getComdat();
  if (!C)
    return;
  if (!ReplacedDstComdats.count(C))
    return;
  if (GV.use_empty()) {
    GV.eraseFromParent();
    return;
  }

  if (auto *F = dyn_cast<Function>(&GV)) {
    F->deleteBody();
  } else if (auto *Var = dyn_cast<GlobalVariable>(&GV)) {
    Var->setInitializer(nullptr);
  } else {
    auto &Alias = cast<GlobalAlias>(GV);
    Module &M = *Alias.getParent();
    PointerType &Ty = *cast<PointerType>(Alias.getType());
    GlobalValue *Declaration;
    if (auto *FTy = dyn_cast<FunctionType>(Alias.getValueType())) {
      Declaration = Function::Create(FTy, GlobalValue::ExternalLinkage, "", &M);
    } else {
      Declaration =
          new GlobalVariable(M, Ty.getElementType(), /*isConstant*/ false,
                             GlobalValue::ExternalLinkage,
                             /*Initializer*/ nullptr);
    }
    Declaration->takeName(&Alias);
    Alias.replaceAllUsesWith(Declaration);
    Alias.eraseFromParent();
  }
}
Exemplo n.º 8
0
/// DuplicateInstruction - Duplicate a TailBB instruction to PredBB and update
/// the source operands due to earlier PHI translation.
void TailDuplicatePass::DuplicateInstruction(MachineInstr *MI,
                                     MachineBasicBlock *TailBB,
                                     MachineBasicBlock *PredBB,
                                     MachineFunction &MF,
                                     DenseMap<unsigned, unsigned> &LocalVRMap,
                                     const DenseSet<unsigned> &UsedByPhi) {
  MachineInstr *NewMI = TII->duplicate(MI, MF);
  for (unsigned i = 0, e = NewMI->getNumOperands(); i != e; ++i) {
    MachineOperand &MO = NewMI->getOperand(i);
    if (!MO.isReg())
      continue;
    unsigned Reg = MO.getReg();
    if (!TargetRegisterInfo::isVirtualRegister(Reg))
      continue;
    if (MO.isDef()) {
      const TargetRegisterClass *RC = MRI->getRegClass(Reg);
      unsigned NewReg = MRI->createVirtualRegister(RC);
      MO.setReg(NewReg);
      LocalVRMap.insert(std::make_pair(Reg, NewReg));
      if (isDefLiveOut(Reg, TailBB, MRI) || UsedByPhi.count(Reg))
        AddSSAUpdateEntry(Reg, NewReg, PredBB);
    } else {
      DenseMap<unsigned, unsigned>::iterator VI = LocalVRMap.find(Reg);
      if (VI != LocalVRMap.end())
        MO.setReg(VI->second);
    }
  }
  PredBB->insert(PredBB->end(), NewMI);
}
Exemplo n.º 9
0
//
// Method: findGlobalPoolNodes()
//
// Description:
//  This method finds DSNodes that are reachable from globals and that need a
//  pool.  The Automatic Pool Allocation transform will use the returned
//  information to build global pools for the DSNodes in question.
//
//  Note that this method does not assign DSNodes to pools; it merely decides
//  which DSNodes are reachable from globals and will need a pool of global
//  scope.
//
// Outputs:
//  Nodes - The DSNodes that are both reachable from globals and which should
//          have global pools will be *added* to this container.
//
void
AllHeapNodesHeuristic::findGlobalPoolNodes (DSNodeSet_t & Nodes) {
  // Get the globals graph for the program.
  DSGraph* GG = Graphs->getGlobalsGraph();

  // Get all of the nodes reachable from globals.
  DenseSet<const DSNode*> GlobalHeapNodes;
  GetNodesReachableFromGlobals (GG, GlobalHeapNodes);
  //
  // Create a global pool for each global DSNode.
  //
  for (DenseSet<const DSNode *>::iterator NI = GlobalHeapNodes.begin();
              NI != GlobalHeapNodes.end();++NI) {
    const DSNode * N = *NI;
    PoolMap[N] = OnePool(N);
  }

  //
  // Now find all DSNodes belonging to function-local DSGraphs which are
  // mirrored in the globals graph.  These DSNodes require a global pool, too.
  //
  for (Module::iterator F = M->begin(); F != M->end(); ++F) {
    if (Graphs->hasDSGraph(*F)) {
      DSGraph* G = Graphs->getDSGraph(*F);
      DSGraph::NodeMapTy NodeMap;
      G->computeGToGGMapping (NodeMap);
      //
      // Scan through all DSNodes in the local graph.  If a local DSNode has a
      // corresponding DSNode in the globals graph that is reachable from a 
      // global, then add the local DSNode to the set of DSNodes reachable from
      // a global.
      //
      DSGraph::node_iterator ni = G->node_begin();
      for (; ni != G->node_end(); ++ni) {
        DSNode * N = ni;
        DSNode * GGN = NodeMap[N].getNode();
        
        //assert (!GGN || GlobalHeapNodes.count (GGN));
        if (GGN && GlobalHeapNodes.count (GGN))
          PoolMap[GGN].NodesInPool.push_back (N);
      }
    }
  }

  //
  // Copy the values into the output container.  Note that DenseSet has no
  // iterator traits (or whatever allows us to treat DenseSet has a generic
  // container), so we have to use a loop to copy values from the DenseSet into
  // the output container.
  //
  for (DenseSet<const DSNode*>::iterator I = GlobalHeapNodes.begin(),
         E = GlobalHeapNodes.end(); I != E; ++I) {
    Nodes.insert (*I);
  }

  return;
}
Exemplo n.º 10
0
void Instruction::copyMetadata(const Instruction &SrcInst,
                               ArrayRef<unsigned> WL) {
  if (!SrcInst.hasMetadata())
    return;

  DenseSet<unsigned> WLS;
  for (unsigned M : WL)
    WLS.insert(M);

  // Otherwise, enumerate and copy over metadata from the old instruction to the
  // new one.
  SmallVector<std::pair<unsigned, MDNode *>, 4> TheMDs;
  SrcInst.getAllMetadataOtherThanDebugLoc(TheMDs);
  for (const auto &MD : TheMDs) {
    if (WL.empty() || WLS.count(MD.first))
      setMetadata(MD.first, MD.second);
  }
  if (WL.empty() || WLS.count(LLVMContext::MD_dbg))
    setDebugLoc(SrcInst.getDebugLoc());
}
Exemplo n.º 11
0
// Add MI and its defs to the lists if MI reads one of the defs that are
// already in the list. Returns true in that case.
static bool addToListsIfDependent(MachineInstr &MI, DenseSet<unsigned> &RegDefs,
                                  DenseSet<unsigned> &PhysRegUses,
                                  SmallVectorImpl<MachineInstr *> &Insts) {
  for (MachineOperand &Use : MI.operands()) {
    // If one of the defs is read, then there is a use of Def between I and the
    // instruction that I will potentially be merged with. We will need to move
    // this instruction after the merged instructions.
    //
    // Similarly, if there is a def which is read by an instruction that is to
    // be moved for merging, then we need to move the def-instruction as well.
    // This can only happen for physical registers such as M0; virtual
    // registers are in SSA form.
    if (Use.isReg() &&
        ((Use.readsReg() && RegDefs.count(Use.getReg())) ||
         (Use.isDef() && TargetRegisterInfo::isPhysicalRegister(Use.getReg()) &&
          PhysRegUses.count(Use.getReg())))) {
      Insts.push_back(&MI);
      addDefsUsesToList(MI, RegDefs, PhysRegUses);
      return true;
    }
  }

  return false;
}
Exemplo n.º 12
0
MachineConstantPool::~MachineConstantPool() {
  // A constant may be a member of both Constants and MachineCPVsSharingEntries,
  // so keep track of which we've deleted to avoid double deletions.
  DenseSet<MachineConstantPoolValue*> Deleted;
  for (unsigned i = 0, e = Constants.size(); i != e; ++i)
    if (Constants[i].isMachineConstantPoolEntry()) {
      Deleted.insert(Constants[i].Val.MachineCPVal);
      delete Constants[i].Val.MachineCPVal;
    }
  for (DenseSet<MachineConstantPoolValue*>::iterator I =
       MachineCPVsSharingEntries.begin(), E = MachineCPVsSharingEntries.end();
       I != E; ++I) {
    if (Deleted.count(*I) == 0)
      delete *I;
  }
}
Exemplo n.º 13
0
void TDDataStructures::markReachableFunctionsExternallyAccessible(DSNode *N,
                                                   DenseSet<DSNode*> &Visited) {
  if (!N || Visited.count(N)) return;
  Visited.insert(N);

  // Handle this node
  {
    N->addFullFunctionSet(ExternallyCallable);
  }

  for (DSNode::edge_iterator ii = N->edge_begin(),
          ee = N->edge_end(); ii != ee; ++ii)
    if (!ii->second.isNull()) {
      DSNodeHandle &NH = ii->second;
      DSNode * NN = NH.getNode();
      NN->addFullFunctionSet(ExternallyCallable);
      markReachableFunctionsExternallyAccessible(NN, Visited);
    }
}
Exemplo n.º 14
0
//
// AddInsnClass - Return all combinations of resource reservation
// which are possible from this state (PossibleStates).
//
void State::AddInsnClass(unsigned InsnClass,
                            std::set<unsigned> &PossibleStates) const {
  //
  // Iterate over all resource states in currentState.
  //

  for (std::set<unsigned>::iterator SI = stateInfo.begin();
       SI != stateInfo.end(); ++SI) {
    unsigned thisState = *SI;

    //
    // Iterate over all possible resources used in InsnClass.
    // For ex: for InsnClass = 0x11, all resources = {0x01, 0x10}.
    //

    DenseSet<unsigned> VisitedResourceStates;
    for (unsigned int j = 0; j < sizeof(InsnClass) * 8; ++j) {
      if ((0x1 << j) & InsnClass) {
        //
        // For each possible resource used in InsnClass, generate the
        // resource state if that resource was used.
        //
        unsigned ResultingResourceState = thisState | (0x1 << j);
        //
        // Check if the resulting resource state can be accommodated in this
        // packet.
        // We compute ResultingResourceState OR thisState.
        // If the result of the OR is different than thisState, it implies
        // that there is at least one resource that can be used to schedule
        // InsnClass in the current packet.
        // Insert ResultingResourceState into PossibleStates only if we haven't
        // processed ResultingResourceState before.
        //
        if ((ResultingResourceState != thisState) &&
            (VisitedResourceStates.count(ResultingResourceState) == 0)) {
          VisitedResourceStates.insert(ResultingResourceState);
          PossibleStates.insert(ResultingResourceState);
        }
      }
    }
  }

}
Exemplo n.º 15
0
/// Removes redundant check_unowned calls if they check the same reference and
/// there is no instruction in between which could decrement the reference count.
static void performRedundantCheckUnownedRemoval(BasicBlock &BB) {
  DenseSet<Value *> checkedValues;
  for (BasicBlock::iterator BBI = BB.begin(), E = BB.end(); BBI != E; ) {
    // Preincrement the iterator to avoid invalidation and out trouble.
    Instruction &I = *BBI++;
    switch (classifyInstruction(I)) {
      case RT_NoMemoryAccessed:
      case RT_AllocObject:
      case RT_FixLifetime:
      case RT_Retain:
      case RT_UnknownRetain:
      case RT_BridgeRetain:
      case RT_RetainUnowned:
      case RT_ObjCRetain:
        // All this cannot decrement reference counts.
        continue;

      case RT_CheckUnowned: {
        Value *Arg = cast<CallInst>(&I)->getArgOperand(0);
        if (checkedValues.count(Arg) != 0) {
          // We checked this reference already -> delete the second check.
          I.eraseFromParent();
        } else {
          // Record the check.
          checkedValues.insert(Arg);
        }
        continue;
      }
        
      case RT_Unknown:
        // Loads cannot affect the retain.
        if (isa<LoadInst>(I) || isa<StoreInst>(I) || isa<MemIntrinsic>(I))
          continue;
        break;
        
      default:
        break;
    }
    // We found some potential reference decrementing instruction. Bail out.
    checkedValues.clear();
  }
}
Exemplo n.º 16
0
static void thinLTOResolveWeakForLinkerGUID(
    GlobalValueSummaryList &GVSummaryList, GlobalValue::GUID GUID,
    DenseSet<GlobalValueSummary *> &GlobalInvolvedWithAlias,
    std::function<bool(GlobalValue::GUID, const GlobalValueSummary *)>
        isPrevailing,
    std::function<bool(StringRef, GlobalValue::GUID)> isExported,
    std::function<void(StringRef, GlobalValue::GUID, GlobalValue::LinkageTypes)>
        recordNewLinkage) {
  auto HasMultipleCopies = GVSummaryList.size() > 1;

  for (auto &S : GVSummaryList) {
    if (GlobalInvolvedWithAlias.count(S.get()))
      continue;
    GlobalValue::LinkageTypes OriginalLinkage = S->linkage();
    if (!GlobalValue::isWeakForLinker(OriginalLinkage))
      continue;
    // We need to emit only one of these, the first module will keep it,
    // but turned into a weak, while the others will drop it when possible.
    if (!HasMultipleCopies) {
      // Exported Linkonce needs to be promoted to not be discarded.
      if (GlobalValue::isLinkOnceLinkage(OriginalLinkage) &&
          isExported(S->modulePath(), GUID))
        S->setLinkage(GlobalValue::getWeakLinkage(
            GlobalValue::isLinkOnceODRLinkage(OriginalLinkage)));
    } else if (isPrevailing(GUID, S.get())) {
      if (GlobalValue::isLinkOnceLinkage(OriginalLinkage))
        S->setLinkage(GlobalValue::getWeakLinkage(
            GlobalValue::isLinkOnceODRLinkage(OriginalLinkage)));
    }
    // Alias can't be turned into available_externally.
    else if (!isa<AliasSummary>(S.get()) &&
             (GlobalValue::isLinkOnceODRLinkage(OriginalLinkage) ||
              GlobalValue::isWeakODRLinkage(OriginalLinkage)))
      S->setLinkage(GlobalValue::AvailableExternallyLinkage);
    if (S->linkage() != OriginalLinkage)
      recordNewLinkage(S->modulePath(), GUID, S->linkage());
  }
}
Exemplo n.º 17
0
//
// Method: findGlobalPoolNodes()
//
// Description:
//  This method finds DSNodes that are reachable from globals and that need a
//  pool.  The Automatic Pool Allocation transform will use the returned
//  information to build global pools for the DSNodes in question.
//
//  For efficiency, this method also determines which DSNodes should be in the
//  same pool.
//
// Outputs:
//  Nodes - The DSNodes that are both reachable from globals and which should
//          have global pools will be *added* to this container.
//
void
AllNodesHeuristic::findGlobalPoolNodes (DSNodeSet_t & Nodes) {
    // Get the globals graph for the program.
    DSGraph* GG = Graphs->getGlobalsGraph();

    //
    // Get all of the nodes reachable from globals.
    //
    DenseSet<const DSNode*> GlobalNodes;
    GetNodesReachableFromGlobals (GG, GlobalNodes);

    //
    // Create a global pool for each global DSNode.
    //
    for (DenseSet<const DSNode *>::iterator NI = GlobalNodes.begin();
            NI != GlobalNodes.end();
            ++NI) {
        const DSNode * N = *NI;
        PoolMap[N] = OnePool(N);
    }

    //
    // Now find all DSNodes belonging to function-local DSGraphs which are
    // mirrored in the globals graph.  These DSNodes require a global pool, too,
    // but must use the same pool as the one assigned to the corresponding global
    // DSNode.
    //
    for (Module::iterator F = M->begin(); F != M->end(); ++F) {
        //
        // Ignore functions that have no DSGraph.
        //
        if (!(Graphs->hasDSGraph(*F))) continue;

        //
        // Compute a mapping between local DSNodes and DSNodes in the globals
        // graph.
        //
        DSGraph* G = Graphs->getDSGraph(*F);
        DSGraph::NodeMapTy NodeMap;
        G->computeGToGGMapping (NodeMap);

        //
        // Scan through all DSNodes in the local graph.  If a local DSNode has a
        // corresponding DSNode in the globals graph that is reachable from a
        // global, then add the local DSNode to the set of DSNodes reachable from
        // a global.
        //
        DSGraph::node_iterator ni = G->node_begin();
        for (; ni != G->node_end(); ++ni) {
            DSNode * N = ni;
            DSNode * GGN = NodeMap[N].getNode();

            assert (!GGN || GlobalNodes.count (GGN));
            if (GGN && GlobalNodes.count (GGN))
                PoolMap[GGN].NodesInPool.push_back (N);
        }
    }

    //
    // Scan through all the local graphs looking for DSNodes which may be
    // reachable by a global.  These nodes may not end up in the globals graph
    // because of the fact that DSA doesn't actually know what is happening to
    // them.
    //
    // FIXME: I believe this code causes a condition in which a local DSNode is
    //        given a local pool in one function but not in other functions.
    //        Someone needs to investigate whether DSA is being consistent here,
    //        and if not, if that inconsistency is correct.
    //
#if 0
    for (Module::iterator F = M->begin(); F != M->end(); ++F) {
        if (F->isDeclaration()) continue;
        DSGraph* G = Graphs->getDSGraph(*F);
        for (DSGraph::node_iterator I = G->node_begin(), E = G->node_end();
                I != E;
                ++I) {
            DSNode * Node = I;
            if (Node->isExternalNode() || Node->isUnknownNode()) {
                GlobalNodes.insert (Node);
            }
        }
    }
#endif

    //
    // Copy the values into the output container.  Note that DenseSet has no
    // iterator traits (or whatever allows us to treat DenseSet has a generic
    // container), so we have to use a loop to copy values from the DenseSet into
    // the output container.
    //
    // Note that we do not copy local DSNodes into the output container; we
    // merely copy those nodes in the globals graph.
    //
    for (DenseSet<const DSNode*>::iterator I = GlobalNodes.begin(),
            E = GlobalNodes.end(); I != E; ++I) {
        Nodes.insert (*I);
    }

    return;
}
Exemplo n.º 18
0
//
// Function: GetNodesReachableFromGlobals()
//
// Description:
//  This function finds all DSNodes which are reachable from globals.  It finds
//  DSNodes both within the local DSGraph as well as in the Globals graph that
//  are reachable from globals.  It does, however, filter out those DSNodes
//  which are of no interest to automatic pool allocation.
//
// Inputs:
//  G - The DSGraph for which to find DSNodes which are reachable by globals.
//      This DSGraph can either by a DSGraph associated with a function *or*
//      it can be the globals graph itself.
//
// Outputs:
//  NodesFromGlobals - A reference to a container object in which to record
//                     DSNodes reachable from globals.  DSNodes are *added* to
//                     this container; it is not cleared by this function.
//                     DSNodes from both the local and globals graph are added.
void
AllHeapNodesHeuristic::GetNodesReachableFromGlobals (DSGraph* G,
                              DenseSet<const DSNode*> &NodesFromGlobals) {
  //
  // Get the globals graph associated with this DSGraph.  If the globals graph
  // is NULL, then the graph that was passed in *is* the globals graph.
  //
  DSGraph * GlobalsGraph = G->getGlobalsGraph();
  if (!GlobalsGraph)
    GlobalsGraph = G;

  //
  // Find all DSNodes which are reachable in the globals graph.
  //
  for (DSGraph::node_iterator NI = GlobalsGraph->node_begin();
       NI != GlobalsGraph->node_end();
       ++NI) {
    NI->markReachableNodes(NodesFromGlobals);
  }

  //
  // Remove those global nodes which we know will never be pool allocated.
  //
  
  std::vector<const DSNode *> toRemove;
  for (DenseSet<const DSNode*>::iterator I = NodesFromGlobals.begin(),
         E = NodesFromGlobals.end(); I != E; ) {
    DenseSet<const DSNode*>::iterator Last = I; ++I;

    const DSNode *tmp = *Last;
    if (!(tmp->isHeapNode())) 
      toRemove.push_back (tmp);
    // Do not poolallocate nodes that are cast to Int.
    // As we do not track through ints, these could be escaping
    if (tmp->isPtrToIntNode())
      toRemove.push_back(tmp);
  }
 
  //
  // Remove all globally reachable DSNodes which do not require pools.
  //
  for (unsigned index = 0; index < toRemove.size(); ++index) {
    NodesFromGlobals.erase(toRemove[index]);
  }

  //
  // Now the fun part.  Find DSNodes in the local graph that correspond to
  // those nodes reachable in the globals graph.  Add them to the set of
  // reachable nodes, too.
  //
  if (G->getGlobalsGraph()) {
    //
    // Compute a mapping between local DSNodes and DSNodes in the globals
    // graph.
    //
    DSGraph::NodeMapTy NodeMap;
    G->computeGToGGMapping (NodeMap);

    //
    // Scan through all DSNodes in the local graph.  If a local DSNode has a
    // corresponding DSNode in the globals graph that is reachable from a 
    // global, then add the local DSNode to the set of DSNodes reachable from a
    // global.
    //
    // FIXME: A node's existance within the global DSGraph is probably
    //        sufficient evidence that it is reachable from a global.
    //

    DSGraph::node_iterator ni = G->node_begin();
    for (; ni != G->node_end(); ++ni) {
      DSNode * N = ni;
      if (NodesFromGlobals.count (NodeMap[N].getNode()))
        NodesFromGlobals.insert (N);
    }
  }
}
Exemplo n.º 19
0
// Automatically import functions in Module \p DestModule based on the summaries
// index.
//
Expected<bool> FunctionImporter::importFunctions(
    Module &DestModule, const FunctionImporter::ImportMapTy &ImportList,
    bool ForceImportReferencedDiscardableSymbols) {
  DEBUG(dbgs() << "Starting import for Module "
               << DestModule.getModuleIdentifier() << "\n");
  unsigned ImportedCount = 0;

  // Linker that will be used for importing function
  Linker TheLinker(DestModule);
  // Do the actual import of functions now, one Module at a time
  std::set<StringRef> ModuleNameOrderedList;
  for (auto &FunctionsToImportPerModule : ImportList) {
    ModuleNameOrderedList.insert(FunctionsToImportPerModule.first());
  }
  for (auto &Name : ModuleNameOrderedList) {
    // Get the module for the import
    const auto &FunctionsToImportPerModule = ImportList.find(Name);
    assert(FunctionsToImportPerModule != ImportList.end());
    Expected<std::unique_ptr<Module>> SrcModuleOrErr = ModuleLoader(Name);
    if (!SrcModuleOrErr)
      return SrcModuleOrErr.takeError();
    std::unique_ptr<Module> SrcModule = std::move(*SrcModuleOrErr);
    assert(&DestModule.getContext() == &SrcModule->getContext() &&
           "Context mismatch");

    // If modules were created with lazy metadata loading, materialize it
    // now, before linking it (otherwise this will be a noop).
    if (Error Err = SrcModule->materializeMetadata())
      return std::move(Err);
    UpgradeDebugInfo(*SrcModule);

    auto &ImportGUIDs = FunctionsToImportPerModule->second;
    // Find the globals to import
    DenseSet<const GlobalValue *> GlobalsToImport;
    for (Function &F : *SrcModule) {
      if (!F.hasName())
        continue;
      auto GUID = F.getGUID();
      auto Import = ImportGUIDs.count(GUID);
      DEBUG(dbgs() << (Import ? "Is" : "Not") << " importing function " << GUID
                   << " " << F.getName() << " from "
                   << SrcModule->getSourceFileName() << "\n");
      if (Import) {
        if (Error Err = F.materialize())
          return std::move(Err);
        if (EnableImportMetadata) {
          // Add 'thinlto_src_module' metadata for statistics and debugging.
          F.setMetadata(
              "thinlto_src_module",
              llvm::MDNode::get(
                  DestModule.getContext(),
                  {llvm::MDString::get(DestModule.getContext(),
                                       SrcModule->getSourceFileName())}));
        }
        GlobalsToImport.insert(&F);
      }
    }
    for (GlobalVariable &GV : SrcModule->globals()) {
      if (!GV.hasName())
        continue;
      auto GUID = GV.getGUID();
      auto Import = ImportGUIDs.count(GUID);
      DEBUG(dbgs() << (Import ? "Is" : "Not") << " importing global " << GUID
                   << " " << GV.getName() << " from "
                   << SrcModule->getSourceFileName() << "\n");
      if (Import) {
        if (Error Err = GV.materialize())
          return std::move(Err);
        GlobalsToImport.insert(&GV);
      }
    }
    for (GlobalAlias &GA : SrcModule->aliases()) {
      if (!GA.hasName())
        continue;
      auto GUID = GA.getGUID();
      auto Import = ImportGUIDs.count(GUID);
      DEBUG(dbgs() << (Import ? "Is" : "Not") << " importing alias " << GUID
                   << " " << GA.getName() << " from "
                   << SrcModule->getSourceFileName() << "\n");
      if (Import) {
        // Alias can't point to "available_externally". However when we import
        // linkOnceODR the linkage does not change. So we import the alias
        // and aliasee only in this case. This has been handled by
        // computeImportForFunction()
        GlobalObject *GO = GA.getBaseObject();
        assert(GO->hasLinkOnceODRLinkage() &&
               "Unexpected alias to a non-linkonceODR in import list");
#ifndef NDEBUG
        if (!GlobalsToImport.count(GO))
          DEBUG(dbgs() << " alias triggers importing aliasee " << GO->getGUID()
                       << " " << GO->getName() << " from "
                       << SrcModule->getSourceFileName() << "\n");
#endif
        if (Error Err = GO->materialize())
          return std::move(Err);
        GlobalsToImport.insert(GO);
        if (Error Err = GA.materialize())
          return std::move(Err);
        GlobalsToImport.insert(&GA);
      }
    }

    // Link in the specified functions.
    if (renameModuleForThinLTO(*SrcModule, Index, &GlobalsToImport))
      return true;

    if (PrintImports) {
      for (const auto *GV : GlobalsToImport)
        dbgs() << DestModule.getSourceFileName() << ": Import " << GV->getName()
               << " from " << SrcModule->getSourceFileName() << "\n";
    }

    // Instruct the linker that the client will take care of linkonce resolution
    unsigned Flags = Linker::Flags::None;
    if (!ForceImportReferencedDiscardableSymbols)
      Flags |= Linker::Flags::DontForceLinkLinkonceODR;

    if (TheLinker.linkInModule(std::move(SrcModule), Flags, &GlobalsToImport))
      report_fatal_error("Function Import: link error");

    ImportedCount += GlobalsToImport.size();
  }

  NumImported += ImportedCount;

  DEBUG(dbgs() << "Imported " << ImportedCount << " functions for Module "
               << DestModule.getModuleIdentifier() << "\n");
  return ImportedCount;
}
ModuleSummaryIndex llvm::buildModuleSummaryIndex(
    const Module &M,
    std::function<BlockFrequencyInfo *(const Function &F)> GetBFICallback,
    ProfileSummaryInfo *PSI) {
  ModuleSummaryIndex Index;

  // Identify the local values in the llvm.used and llvm.compiler.used sets,
  // which should not be exported as they would then require renaming and
  // promotion, but we may have opaque uses e.g. in inline asm. We collect them
  // here because we use this information to mark functions containing inline
  // assembly calls as not importable.
  SmallPtrSet<GlobalValue *, 8> LocalsUsed;
  SmallPtrSet<GlobalValue *, 8> Used;
  // First collect those in the llvm.used set.
  collectUsedGlobalVariables(M, Used, /*CompilerUsed*/ false);
  // Next collect those in the llvm.compiler.used set.
  collectUsedGlobalVariables(M, Used, /*CompilerUsed*/ true);
  DenseSet<GlobalValue::GUID> CantBePromoted;
  for (auto *V : Used) {
    if (V->hasLocalLinkage()) {
      LocalsUsed.insert(V);
      CantBePromoted.insert(V->getGUID());
    }
  }

  // Compute summaries for all functions defined in module, and save in the
  // index.
  for (auto &F : M) {
    if (F.isDeclaration())
      continue;

    BlockFrequencyInfo *BFI = nullptr;
    std::unique_ptr<BlockFrequencyInfo> BFIPtr;
    if (GetBFICallback)
      BFI = GetBFICallback(F);
    else if (F.getEntryCount().hasValue()) {
      LoopInfo LI{DominatorTree(const_cast<Function &>(F))};
      BranchProbabilityInfo BPI{F, LI};
      BFIPtr = llvm::make_unique<BlockFrequencyInfo>(F, BPI, LI);
      BFI = BFIPtr.get();
    }

    computeFunctionSummary(Index, M, F, BFI, PSI, !LocalsUsed.empty(),
                           CantBePromoted);
  }

  // Compute summaries for all variables defined in module, and save in the
  // index.
  for (const GlobalVariable &G : M.globals()) {
    if (G.isDeclaration())
      continue;
    computeVariableSummary(Index, G, CantBePromoted);
  }

  // Compute summaries for all aliases defined in module, and save in the
  // index.
  for (const GlobalAlias &A : M.aliases())
    computeAliasSummary(Index, A, CantBePromoted);

  for (auto *V : LocalsUsed) {
    auto *Summary = Index.getGlobalValueSummary(*V);
    assert(Summary && "Missing summary for global value");
    Summary->setNotEligibleToImport();
  }

  // The linker doesn't know about these LLVM produced values, so we need
  // to flag them as live in the index to ensure index-based dead value
  // analysis treats them as live roots of the analysis.
  setLiveRoot(Index, "llvm.used");
  setLiveRoot(Index, "llvm.compiler.used");
  setLiveRoot(Index, "llvm.global_ctors");
  setLiveRoot(Index, "llvm.global_dtors");
  setLiveRoot(Index, "llvm.global.annotations");

  if (!M.getModuleInlineAsm().empty()) {
    // Collect the local values defined by module level asm, and set up
    // summaries for these symbols so that they can be marked as NoRename,
    // to prevent export of any use of them in regular IR that would require
    // renaming within the module level asm. Note we don't need to create a
    // summary for weak or global defs, as they don't need to be flagged as
    // NoRename, and defs in module level asm can't be imported anyway.
    // Also, any values used but not defined within module level asm should
    // be listed on the llvm.used or llvm.compiler.used global and marked as
    // referenced from there.
    ModuleSymbolTable::CollectAsmSymbols(
        Triple(M.getTargetTriple()), M.getModuleInlineAsm(),
        [&M, &Index, &CantBePromoted](StringRef Name,
                                      object::BasicSymbolRef::Flags Flags) {
          // Symbols not marked as Weak or Global are local definitions.
          if (Flags & (object::BasicSymbolRef::SF_Weak |
                       object::BasicSymbolRef::SF_Global))
            return;
          GlobalValue *GV = M.getNamedValue(Name);
          if (!GV)
            return;
          assert(GV->isDeclaration() && "Def in module asm already has definition");
          GlobalValueSummary::GVFlags GVFlags(GlobalValue::InternalLinkage,
                                              /* NotEligibleToImport */ true,
                                              /* LiveRoot */ true);
          CantBePromoted.insert(GlobalValue::getGUID(Name));
          // Create the appropriate summary type.
          if (isa<Function>(GV)) {
            std::unique_ptr<FunctionSummary> Summary =
                llvm::make_unique<FunctionSummary>(
                    GVFlags, 0, ArrayRef<ValueInfo>{},
                    ArrayRef<FunctionSummary::EdgeTy>{},
                    ArrayRef<GlobalValue::GUID>{});
            Index.addGlobalValueSummary(Name, std::move(Summary));
          } else {
            std::unique_ptr<GlobalVarSummary> Summary =
                llvm::make_unique<GlobalVarSummary>(GVFlags,
                                                    ArrayRef<ValueInfo>{});
            Index.addGlobalValueSummary(Name, std::move(Summary));
          }
        });
  }

  for (auto &GlobalList : Index) {
    assert(GlobalList.second.size() == 1 &&
           "Expected module's index to have one summary per GUID");
    auto &Summary = GlobalList.second[0];
    bool AllRefsCanBeExternallyReferenced =
        llvm::all_of(Summary->refs(), [&](const ValueInfo &VI) {
          return !CantBePromoted.count(VI.getValue()->getGUID());
        });
    if (!AllRefsCanBeExternallyReferenced) {
      Summary->setNotEligibleToImport();
      continue;
    }

    if (auto *FuncSummary = dyn_cast<FunctionSummary>(Summary.get())) {
      bool AllCallsCanBeExternallyReferenced = llvm::all_of(
          FuncSummary->calls(), [&](const FunctionSummary::EdgeTy &Edge) {
            auto GUID = Edge.first.isGUID() ? Edge.first.getGUID()
                                            : Edge.first.getValue()->getGUID();
            return !CantBePromoted.count(GUID);
          });
      if (!AllCallsCanBeExternallyReferenced)
        Summary->setNotEligibleToImport();
    }
  }

  return Index;
}
Exemplo n.º 21
0
// Reroll the provided loop with respect to the provided induction variable.
// Generally, we're looking for a loop like this:
//
// %iv = phi [ (preheader, ...), (body, %iv.next) ]
// f(%iv)
// %iv.1 = add %iv, 1                <-- a root increment
// f(%iv.1)
// %iv.2 = add %iv, 2                <-- a root increment
// f(%iv.2)
// %iv.scale_m_1 = add %iv, scale-1  <-- a root increment
// f(%iv.scale_m_1)
// ...
// %iv.next = add %iv, scale
// %cmp = icmp(%iv, ...)
// br %cmp, header, exit
//
// Notably, we do not require that f(%iv), f(%iv.1), etc. be isolated groups of
// instructions. In other words, the instructions in f(%iv), f(%iv.1), etc. can
// be intermixed with eachother. The restriction imposed by this algorithm is
// that the relative order of the isomorphic instructions in f(%iv), f(%iv.1),
// etc. be the same.
//
// First, we collect the use set of %iv, excluding the other increment roots.
// This gives us f(%iv). Then we iterate over the loop instructions (scale-1)
// times, having collected the use set of f(%iv.(i+1)), during which we:
//   - Ensure that the next unmatched instruction in f(%iv) is isomorphic to
//     the next unmatched instruction in f(%iv.(i+1)).
//   - Ensure that both matched instructions don't have any external users
//     (with the exception of last-in-chain reduction instructions).
//   - Track the (aliasing) write set, and other side effects, of all
//     instructions that belong to future iterations that come before the matched
//     instructions. If the matched instructions read from that write set, then
//     f(%iv) or f(%iv.(i+1)) has some dependency on instructions in
//     f(%iv.(j+1)) for some j > i, and we cannot reroll the loop. Similarly,
//     if any of these future instructions had side effects (could not be
//     speculatively executed), and so do the matched instructions, when we
//     cannot reorder those side-effect-producing instructions, and rerolling
//     fails.
//
// Finally, we make sure that all loop instructions are either loop increment
// roots, belong to simple latch code, parts of validated reductions, part of
// f(%iv) or part of some f(%iv.i). If all of that is true (and all reductions
// have been validated), then we reroll the loop.
bool LoopReroll::reroll(Instruction *IV, Loop *L, BasicBlock *Header,
                        const SCEV *IterCount,
                        ReductionTracker &Reductions) {
  const SCEVAddRecExpr *RealIVSCEV = cast<SCEVAddRecExpr>(SE->getSCEV(IV));
  uint64_t Inc = cast<SCEVConstant>(RealIVSCEV->getOperand(1))->
                   getValue()->getZExtValue();
  // The collection of loop increment instructions.
  SmallInstructionVector LoopIncs;
  uint64_t Scale = Inc;

  // The effective induction variable, IV, is normally also the real induction
  // variable. When we're dealing with a loop like:
  //   for (int i = 0; i < 500; ++i)
  //     x[3*i] = ...;
  //     x[3*i+1] = ...;
  //     x[3*i+2] = ...;
  // then the real IV is still i, but the effective IV is (3*i).
  Instruction *RealIV = IV;
  if (Inc == 1 && !findScaleFromMul(RealIV, Scale, IV, LoopIncs))
    return false;

  assert(Scale <= MaxInc && "Scale is too large");
  assert(Scale > 1 && "Scale must be at least 2");

  // The set of increment instructions for each increment value.
  SmallVector<SmallInstructionVector, 32> Roots(Scale-1);
  SmallInstructionSet AllRoots;
  if (!collectAllRoots(L, Inc, Scale, IV, Roots, AllRoots, LoopIncs))
    return false;

  DEBUG(dbgs() << "LRR: Found all root induction increments for: " <<
                  *RealIV << "\n");

  // An array of just the possible reductions for this scale factor. When we
  // collect the set of all users of some root instructions, these reduction
  // instructions are treated as 'final' (their uses are not considered).
  // This is important because we don't want the root use set to search down
  // the reduction chain.
  SmallInstructionSet PossibleRedSet;
  SmallInstructionSet PossibleRedLastSet, PossibleRedPHISet;
  Reductions.restrictToScale(Scale, PossibleRedSet, PossibleRedPHISet,
                             PossibleRedLastSet);

  // We now need to check for equivalence of the use graph of each root with
  // that of the primary induction variable (excluding the roots). Our goal
  // here is not to solve the full graph isomorphism problem, but rather to
  // catch common cases without a lot of work. As a result, we will assume
  // that the relative order of the instructions in each unrolled iteration
  // is the same (although we will not make an assumption about how the
  // different iterations are intermixed). Note that while the order must be
  // the same, the instructions may not be in the same basic block.
  SmallInstructionSet Exclude(AllRoots);
  Exclude.insert(LoopIncs.begin(), LoopIncs.end());

  DenseSet<Instruction *> BaseUseSet;
  collectInLoopUserSet(L, IV, Exclude, PossibleRedSet, BaseUseSet);

  DenseSet<Instruction *> AllRootUses;
  std::vector<DenseSet<Instruction *> > RootUseSets(Scale-1);

  bool MatchFailed = false;
  for (unsigned i = 0; i < Scale-1 && !MatchFailed; ++i) {
    DenseSet<Instruction *> &RootUseSet = RootUseSets[i];
    collectInLoopUserSet(L, Roots[i], SmallInstructionSet(),
                         PossibleRedSet, RootUseSet);

    DEBUG(dbgs() << "LRR: base use set size: " << BaseUseSet.size() <<
                    " vs. iteration increment " << (i+1) <<
                    " use set size: " << RootUseSet.size() << "\n");

    if (BaseUseSet.size() != RootUseSet.size()) {
      MatchFailed = true;
      break;
    }

    // In addition to regular aliasing information, we need to look for
    // instructions from later (future) iterations that have side effects
    // preventing us from reordering them past other instructions with side
    // effects.
    bool FutureSideEffects = false;
    AliasSetTracker AST(*AA);

    // The map between instructions in f(%iv.(i+1)) and f(%iv).
    DenseMap<Value *, Value *> BaseMap;

    assert(L->getNumBlocks() == 1 && "Cannot handle multi-block loops");
    for (BasicBlock::iterator J1 = Header->begin(), J2 = Header->begin(),
         JE = Header->end(); J1 != JE && !MatchFailed; ++J1) {
      if (cast<Instruction>(J1) == RealIV)
        continue;
      if (cast<Instruction>(J1) == IV)
        continue;
      if (!BaseUseSet.count(J1))
        continue;
      if (PossibleRedPHISet.count(J1)) // Skip reduction PHIs.
        continue;

      while (J2 != JE && (!RootUseSet.count(J2) ||
             std::find(Roots[i].begin(), Roots[i].end(), J2) !=
               Roots[i].end())) {
        // As we iterate through the instructions, instructions that don't
        // belong to previous iterations (or the base case), must belong to
        // future iterations. We want to track the alias set of writes from
        // previous iterations.
        if (!isa<PHINode>(J2) && !BaseUseSet.count(J2) &&
            !AllRootUses.count(J2)) {
          if (J2->mayWriteToMemory())
            AST.add(J2);

          // Note: This is specifically guarded by a check on isa<PHINode>,
          // which while a valid (somewhat arbitrary) micro-optimization, is
          // needed because otherwise isSafeToSpeculativelyExecute returns
          // false on PHI nodes.
          if (!isSimpleLoadStore(J2) && !isSafeToSpeculativelyExecute(J2, DL))
            FutureSideEffects = true; 
        }

        ++J2;
      }

      if (!J1->isSameOperationAs(J2)) {
        DEBUG(dbgs() << "LRR: iteration root match failed at " << *J1 <<
                        " vs. " << *J2 << "\n");
        MatchFailed = true;
        break;
      }

      // Make sure that this instruction, which is in the use set of this
      // root instruction, does not also belong to the base set or the set of
      // some previous root instruction.
      if (BaseUseSet.count(J2) || AllRootUses.count(J2)) {
        DEBUG(dbgs() << "LRR: iteration root match failed at " << *J1 <<
                        " vs. " << *J2 << " (prev. case overlap)\n");
        MatchFailed = true;
        break;
      }

      // Make sure that we don't alias with any instruction in the alias set
      // tracker. If we do, then we depend on a future iteration, and we
      // can't reroll.
      if (J2->mayReadFromMemory()) {
        for (AliasSetTracker::iterator K = AST.begin(), KE = AST.end();
             K != KE && !MatchFailed; ++K) {
          if (K->aliasesUnknownInst(J2, *AA)) {
            DEBUG(dbgs() << "LRR: iteration root match failed at " << *J1 <<
                            " vs. " << *J2 << " (depends on future store)\n");
            MatchFailed = true;
            break;
          }
        }
      }

      // If we've past an instruction from a future iteration that may have
      // side effects, and this instruction might also, then we can't reorder
      // them, and this matching fails. As an exception, we allow the alias
      // set tracker to handle regular (simple) load/store dependencies.
      if (FutureSideEffects &&
            ((!isSimpleLoadStore(J1) && !isSafeToSpeculativelyExecute(J1)) ||
             (!isSimpleLoadStore(J2) && !isSafeToSpeculativelyExecute(J2)))) {
        DEBUG(dbgs() << "LRR: iteration root match failed at " << *J1 <<
                        " vs. " << *J2 <<
                        " (side effects prevent reordering)\n");
        MatchFailed = true;
        break;
      }

      // For instructions that are part of a reduction, if the operation is
      // associative, then don't bother matching the operands (because we
      // already know that the instructions are isomorphic, and the order
      // within the iteration does not matter). For non-associative reductions,
      // we do need to match the operands, because we need to reject
      // out-of-order instructions within an iteration!
      // For example (assume floating-point addition), we need to reject this:
      //   x += a[i]; x += b[i];
      //   x += a[i+1]; x += b[i+1];
      //   x += b[i+2]; x += a[i+2];
      bool InReduction = Reductions.isPairInSame(J1, J2);

      if (!(InReduction && J1->isAssociative())) {
        bool Swapped = false, SomeOpMatched = false;;
        for (unsigned j = 0; j < J1->getNumOperands() && !MatchFailed; ++j) {
          Value *Op2 = J2->getOperand(j);

	  // If this is part of a reduction (and the operation is not
	  // associatve), then we match all operands, but not those that are
	  // part of the reduction.
          if (InReduction)
            if (Instruction *Op2I = dyn_cast<Instruction>(Op2))
              if (Reductions.isPairInSame(J2, Op2I))
                continue;

          DenseMap<Value *, Value *>::iterator BMI = BaseMap.find(Op2);
          if (BMI != BaseMap.end())
            Op2 = BMI->second;
          else if (std::find(Roots[i].begin(), Roots[i].end(),
                             (Instruction*) Op2) != Roots[i].end())
            Op2 = IV;

          if (J1->getOperand(Swapped ? unsigned(!j) : j) != Op2) {
	    // If we've not already decided to swap the matched operands, and
	    // we've not already matched our first operand (note that we could
	    // have skipped matching the first operand because it is part of a
	    // reduction above), and the instruction is commutative, then try
	    // the swapped match.
            if (!Swapped && J1->isCommutative() && !SomeOpMatched &&
                J1->getOperand(!j) == Op2) {
              Swapped = true;
            } else {
              DEBUG(dbgs() << "LRR: iteration root match failed at " << *J1 <<
                              " vs. " << *J2 << " (operand " << j << ")\n");
              MatchFailed = true;
              break;
            }
          }

          SomeOpMatched = true;
        }
      }

      if ((!PossibleRedLastSet.count(J1) && hasUsesOutsideLoop(J1, L)) ||
          (!PossibleRedLastSet.count(J2) && hasUsesOutsideLoop(J2, L))) {
        DEBUG(dbgs() << "LRR: iteration root match failed at " << *J1 <<
                        " vs. " << *J2 << " (uses outside loop)\n");
        MatchFailed = true;
        break;
      }

      if (!MatchFailed)
        BaseMap.insert(std::pair<Value *, Value *>(J2, J1));

      AllRootUses.insert(J2);
      Reductions.recordPair(J1, J2, i+1);

      ++J2;
    }
  }

  if (MatchFailed)
    return false;

  DEBUG(dbgs() << "LRR: Matched all iteration increments for " <<
                  *RealIV << "\n");

  DenseSet<Instruction *> LoopIncUseSet;
  collectInLoopUserSet(L, LoopIncs, SmallInstructionSet(),
                       SmallInstructionSet(), LoopIncUseSet);
  DEBUG(dbgs() << "LRR: Loop increment set size: " <<
                  LoopIncUseSet.size() << "\n");

  // Make sure that all instructions in the loop have been included in some
  // use set.
  for (BasicBlock::iterator J = Header->begin(), JE = Header->end();
       J != JE; ++J) {
    if (isa<DbgInfoIntrinsic>(J))
      continue;
    if (cast<Instruction>(J) == RealIV)
      continue;
    if (cast<Instruction>(J) == IV)
      continue;
    if (BaseUseSet.count(J) || AllRootUses.count(J) ||
        (LoopIncUseSet.count(J) && (J->isTerminator() ||
                                    isSafeToSpeculativelyExecute(J, DL))))
      continue;

    if (AllRoots.count(J))
      continue;

    if (Reductions.isSelectedPHI(J))
      continue;

    DEBUG(dbgs() << "LRR: aborting reroll based on " << *RealIV <<
                    " unprocessed instruction found: " << *J << "\n");
    MatchFailed = true;
    break;
  }

  if (MatchFailed)
    return false;

  DEBUG(dbgs() << "LRR: all instructions processed from " <<
                  *RealIV << "\n");

  if (!Reductions.validateSelected())
    return false;

  // At this point, we've validated the rerolling, and we're committed to
  // making changes!

  Reductions.replaceSelected();

  // Remove instructions associated with non-base iterations.
  for (BasicBlock::reverse_iterator J = Header->rbegin();
       J != Header->rend();) {
    if (AllRootUses.count(&*J)) {
      Instruction *D = &*J;
      DEBUG(dbgs() << "LRR: removing: " << *D << "\n");
      D->eraseFromParent();
      continue;
    }

    ++J; 
  }

  // Insert the new induction variable.
  const SCEV *Start = RealIVSCEV->getStart();
  if (Inc == 1)
    Start = SE->getMulExpr(Start,
                           SE->getConstant(Start->getType(), Scale));
  const SCEVAddRecExpr *H =
    cast<SCEVAddRecExpr>(SE->getAddRecExpr(Start,
                           SE->getConstant(RealIVSCEV->getType(), 1),
                           L, SCEV::FlagAnyWrap));
  { // Limit the lifetime of SCEVExpander.
    SCEVExpander Expander(*SE, "reroll");
    Value *NewIV = Expander.expandCodeFor(H, IV->getType(), Header->begin());

    for (DenseSet<Instruction *>::iterator J = BaseUseSet.begin(),
         JE = BaseUseSet.end(); J != JE; ++J)
      (*J)->replaceUsesOfWith(IV, NewIV);

    if (BranchInst *BI = dyn_cast<BranchInst>(Header->getTerminator())) {
      if (LoopIncUseSet.count(BI)) {
        const SCEV *ICSCEV = RealIVSCEV->evaluateAtIteration(IterCount, *SE);
        if (Inc == 1)
          ICSCEV =
            SE->getMulExpr(ICSCEV, SE->getConstant(ICSCEV->getType(), Scale));
        // Iteration count SCEV minus 1
        const SCEV *ICMinus1SCEV =
          SE->getMinusSCEV(ICSCEV, SE->getConstant(ICSCEV->getType(), 1));

        Value *ICMinus1; // Iteration count minus 1
        if (isa<SCEVConstant>(ICMinus1SCEV)) {
          ICMinus1 = Expander.expandCodeFor(ICMinus1SCEV, NewIV->getType(), BI);
        } else {
          BasicBlock *Preheader = L->getLoopPreheader();
          if (!Preheader)
            Preheader = InsertPreheaderForLoop(L, this);

          ICMinus1 = Expander.expandCodeFor(ICMinus1SCEV, NewIV->getType(),
                                            Preheader->getTerminator());
        }
 
        Value *Cond = new ICmpInst(BI, CmpInst::ICMP_EQ, NewIV, ICMinus1,
                                   "exitcond");
        BI->setCondition(Cond);

        if (BI->getSuccessor(1) != Header)
          BI->swapSuccessors();
      }
    }
  }

  SimplifyInstructionsInBlock(Header, DL, TLI);
  DeleteDeadPHIs(Header, TLI);
  ++NumRerolledLoops;
  return true;
}
Exemplo n.º 22
0
// Automatically import functions in Module \p DestModule based on the summaries
// index.
//
bool FunctionImporter::importFunctions(
    Module &DestModule, const FunctionImporter::ImportMapTy &ImportList) {
  DEBUG(dbgs() << "Starting import for Module "
               << DestModule.getModuleIdentifier() << "\n");
  unsigned ImportedCount = 0;

  // Linker that will be used for importing function
  Linker TheLinker(DestModule);
  // Do the actual import of functions now, one Module at a time
  std::set<StringRef> ModuleNameOrderedList;
  for (auto &FunctionsToImportPerModule : ImportList) {
    ModuleNameOrderedList.insert(FunctionsToImportPerModule.first());
  }
  for (auto &Name : ModuleNameOrderedList) {
    // Get the module for the import
    const auto &FunctionsToImportPerModule = ImportList.find(Name);
    assert(FunctionsToImportPerModule != ImportList.end());
    std::unique_ptr<Module> SrcModule = ModuleLoader(Name);
    assert(&DestModule.getContext() == &SrcModule->getContext() &&
           "Context mismatch");

    // If modules were created with lazy metadata loading, materialize it
    // now, before linking it (otherwise this will be a noop).
    SrcModule->materializeMetadata();
    UpgradeDebugInfo(*SrcModule);

    auto &ImportGUIDs = FunctionsToImportPerModule->second;
    // Find the globals to import
    DenseSet<const GlobalValue *> GlobalsToImport;
    for (auto &GV : *SrcModule) {
      if (!GV.hasName())
        continue;
      auto GUID = GV.getGUID();
      auto Import = ImportGUIDs.count(GUID);
      DEBUG(dbgs() << (Import ? "Is" : "Not") << " importing " << GUID << " "
                   << GV.getName() << " from " << SrcModule->getSourceFileName()
                   << "\n");
      if (Import) {
        GV.materialize();
        GlobalsToImport.insert(&GV);
      }
    }
    for (auto &GV : SrcModule->globals()) {
      if (!GV.hasName())
        continue;
      auto GUID = GV.getGUID();
      auto Import = ImportGUIDs.count(GUID);
      DEBUG(dbgs() << (Import ? "Is" : "Not") << " importing " << GUID << " "
                   << GV.getName() << " from " << SrcModule->getSourceFileName()
                   << "\n");
      if (Import) {
        GV.materialize();
        GlobalsToImport.insert(&GV);
      }
    }
    for (auto &GV : SrcModule->aliases()) {
      if (!GV.hasName())
        continue;
      auto GUID = GV.getGUID();
      auto Import = ImportGUIDs.count(GUID);
      DEBUG(dbgs() << (Import ? "Is" : "Not") << " importing " << GUID << " "
                   << GV.getName() << " from " << SrcModule->getSourceFileName()
                   << "\n");
      if (Import) {
        // Alias can't point to "available_externally". However when we import
        // linkOnceODR the linkage does not change. So we import the alias
        // and aliasee only in this case.
        GlobalObject *GO = GV.getBaseObject();
        if (!GO->hasLinkOnceODRLinkage())
          continue;
#ifndef NDEBUG
        if (!GlobalsToImport.count(GO))
          DEBUG(dbgs() << " alias triggers importing aliasee " << GO->getGUID()
                       << " " << GO->getName() << " from "
                       << SrcModule->getSourceFileName() << "\n");
#endif
        GO->materialize();
        GlobalsToImport.insert(GO);
        GV.materialize();
        GlobalsToImport.insert(&GV);
      }
    }

    // Link in the specified functions.
    if (renameModuleForThinLTO(*SrcModule, Index, &GlobalsToImport))
      return true;

    if (PrintImports) {
      for (const auto *GV : GlobalsToImport)
        dbgs() << DestModule.getSourceFileName() << ": Import " << GV->getName()
               << " from " << SrcModule->getSourceFileName() << "\n";
    }

    if (TheLinker.linkInModule(std::move(SrcModule), Linker::Flags::None,
                               &GlobalsToImport))
      report_fatal_error("Function Import: link error");

    ImportedCount += GlobalsToImport.size();
  }

  NumImported += ImportedCount;

  DEBUG(dbgs() << "Imported " << ImportedCount << " functions for Module "
               << DestModule.getModuleIdentifier() << "\n");
  return ImportedCount;
}
Exemplo n.º 23
0
void CodeViewDebug::collectVariableInfo(const DISubprogram *SP) {
  DenseSet<InlinedVariable> Processed;
  // Grab the variable info that was squirreled away in the MMI side-table.
  collectVariableInfoFromMMITable(Processed);

  const TargetRegisterInfo *TRI = Asm->MF->getSubtarget().getRegisterInfo();

  for (const auto &I : DbgValues) {
    InlinedVariable IV = I.first;
    if (Processed.count(IV))
      continue;
    const DILocalVariable *DIVar = IV.first;
    const DILocation *InlinedAt = IV.second;

    // Instruction ranges, specifying where IV is accessible.
    const auto &Ranges = I.second;

    LexicalScope *Scope = nullptr;
    if (InlinedAt)
      Scope = LScopes.findInlinedScope(DIVar->getScope(), InlinedAt);
    else
      Scope = LScopes.findLexicalScope(DIVar->getScope());
    // If variable scope is not found then skip this variable.
    if (!Scope)
      continue;

    LocalVariable Var;
    Var.DIVar = DIVar;

    // Calculate the definition ranges.
    for (auto I = Ranges.begin(), E = Ranges.end(); I != E; ++I) {
      const InsnRange &Range = *I;
      const MachineInstr *DVInst = Range.first;
      assert(DVInst->isDebugValue() && "Invalid History entry");
      const DIExpression *DIExpr = DVInst->getDebugExpression();

      // Bail if there is a complex DWARF expression for now.
      if (DIExpr && DIExpr->getNumElements() > 0)
        continue;

      // Bail if operand 0 is not a valid register. This means the variable is a
      // simple constant, or is described by a complex expression.
      // FIXME: Find a way to represent constant variables, since they are
      // relatively common.
      unsigned Reg =
          DVInst->getOperand(0).isReg() ? DVInst->getOperand(0).getReg() : 0;
      if (Reg == 0)
        continue;

      // Handle the two cases we can handle: indirect in memory and in register.
      bool IsIndirect = DVInst->getOperand(1).isImm();
      unsigned CVReg = TRI->getCodeViewRegNum(DVInst->getOperand(0).getReg());
      {
        LocalVarDefRange DefRange;
        if (IsIndirect) {
          int64_t Offset = DVInst->getOperand(1).getImm();
          DefRange = createDefRangeMem(CVReg, Offset);
        } else {
          DefRange = createDefRangeReg(CVReg);
        }
        if (Var.DefRanges.empty() ||
            Var.DefRanges.back().isDifferentLocation(DefRange)) {
          Var.DefRanges.emplace_back(std::move(DefRange));
        }
      }

      // Compute the label range.
      const MCSymbol *Begin = getLabelBeforeInsn(Range.first);
      const MCSymbol *End = getLabelAfterInsn(Range.second);
      if (!End) {
        if (std::next(I) != E)
          End = getLabelBeforeInsn(std::next(I)->first);
        else
          End = Asm->getFunctionEnd();
      }

      // If the last range end is our begin, just extend the last range.
      // Otherwise make a new range.
      SmallVectorImpl<std::pair<const MCSymbol *, const MCSymbol *>> &Ranges =
          Var.DefRanges.back().Ranges;
      if (!Ranges.empty() && Ranges.back().second == Begin)
        Ranges.back().second = End;
      else
        Ranges.emplace_back(Begin, End);

      // FIXME: Do more range combining.
    }

    recordLocalVariable(std::move(Var), InlinedAt);
  }
}
Exemplo n.º 24
0
bool OptimalEdgeProfiler::runOnModule(Module &M) {
  Function *Main = M.getFunction("main");
  if (Main == 0) {
    errs() << "WARNING: cannot insert edge profiling into a module"
           << " with no main function!\n";
    return false;  // No main, no instrumentation!
  }

  // NumEdges counts all the edges that may be instrumented. Later on its
  // decided which edges to actually instrument, to achieve optimal profiling.
  // For the entry block a virtual edge (0,entry) is reserved, for each block
  // with no successors an edge (BB,0) is reserved. These edges are necessary
  // to calculate a truly optimal maximum spanning tree and thus an optimal
  // instrumentation.
  unsigned NumEdges = 0;

  for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) {
    if (F->isDeclaration()) continue;
    // Reserve space for (0,entry) edge.
    ++NumEdges;
    for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
      // Keep track of which blocks need to be instrumented.  We don't want to
      // instrument blocks that are added as the result of breaking critical
      // edges!
      if (BB->getTerminator()->getNumSuccessors() == 0) {
        // Reserve space for (BB,0) edge.
        ++NumEdges;
      } else {
        NumEdges += BB->getTerminator()->getNumSuccessors();
      }
    }
  }

  // In the profiling output a counter for each edge is reserved, but only few
  // are used. This is done to be able to read back in the profile without
  // calulating the maximum spanning tree again, instead each edge counter that
  // is not used is initialised with -1 to signal that this edge counter has to
  // be calculated from other edge counters on reading the profile info back
  // in.

  const Type *Int32 = Type::getInt32Ty(M.getContext());
  const ArrayType *ATy = ArrayType::get(Int32, NumEdges);
  GlobalVariable *Counters =
    new GlobalVariable(M, ATy, false, GlobalValue::InternalLinkage,
                       Constant::getNullValue(ATy), "OptEdgeProfCounters");
  NumEdgesInserted = 0;

  std::vector<Constant*> Initializer(NumEdges);
  Constant* Zero = ConstantInt::get(Int32, 0);
  Constant* Uncounted = ConstantInt::get(Int32, ProfileInfoLoader::Uncounted);

  // Instrument all of the edges not in MST...
  unsigned i = 0;
  for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) {
    if (F->isDeclaration()) continue;
    DEBUG(dbgs()<<"Working on "<<F->getNameStr()<<"\n");

    // Calculate a Maximum Spanning Tree with the edge weights determined by
    // ProfileEstimator. ProfileEstimator also assign weights to the virtual
    // edges (0,entry) and (BB,0) (for blocks with no successors) and this
    // edges also participate in the maximum spanning tree calculation.
    // The third parameter of MaximumSpanningTree() has the effect that not the
    // actual MST is returned but the edges _not_ in the MST.

    ProfileInfo::EdgeWeights ECs =
      getAnalysis<ProfileInfo>(*F).getEdgeWeights(F);
    std::vector<ProfileInfo::EdgeWeight> EdgeVector(ECs.begin(), ECs.end());
    MaximumSpanningTree<BasicBlock> MST (EdgeVector);
    std::stable_sort(MST.begin(),MST.end());

    // Check if (0,entry) not in the MST. If not, instrument edge
    // (IncrementCounterInBlock()) and set the counter initially to zero, if
    // the edge is in the MST the counter is initialised to -1.

    BasicBlock *entry = &(F->getEntryBlock());
    ProfileInfo::Edge edge = ProfileInfo::getEdge(0,entry);
    if (!std::binary_search(MST.begin(), MST.end(), edge)) {
      printEdgeCounter(edge,entry,i);
      IncrementCounterInBlock(entry, i, Counters); ++NumEdgesInserted;
      Initializer[i++] = (Zero);
    } else{
      Initializer[i++] = (Uncounted);
    }

    // InsertedBlocks contains all blocks that were inserted for splitting an
    // edge, this blocks do not have to be instrumented.
    DenseSet<BasicBlock*> InsertedBlocks;
    for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
      // Check if block was not inserted and thus does not have to be
      // instrumented.
      if (InsertedBlocks.count(BB)) continue;

      // Okay, we have to add a counter of each outgoing edge not in MST. If
      // the outgoing edge is not critical don't split it, just insert the
      // counter in the source or destination of the edge. Also, if the block
      // has no successors, the virtual edge (BB,0) is processed.
      TerminatorInst *TI = BB->getTerminator();
      if (TI->getNumSuccessors() == 0) {
        ProfileInfo::Edge edge = ProfileInfo::getEdge(BB,0);
        if (!std::binary_search(MST.begin(), MST.end(), edge)) {
          printEdgeCounter(edge,BB,i);
          IncrementCounterInBlock(BB, i, Counters); ++NumEdgesInserted;
          Initializer[i++] = (Zero);
        } else{
          Initializer[i++] = (Uncounted);
        }
      }
      for (unsigned s = 0, e = TI->getNumSuccessors(); s != e; ++s) {
        BasicBlock *Succ = TI->getSuccessor(s);
        ProfileInfo::Edge edge = ProfileInfo::getEdge(BB,Succ);
        if (!std::binary_search(MST.begin(), MST.end(), edge)) {

          // If the edge is critical, split it.
          bool wasInserted = SplitCriticalEdge(TI, s, this);
          Succ = TI->getSuccessor(s);
          if (wasInserted)
            InsertedBlocks.insert(Succ);

          // Okay, we are guaranteed that the edge is no longer critical.  If
          // we only have a single successor, insert the counter in this block,
          // otherwise insert it in the successor block.
          if (TI->getNumSuccessors() == 1) {
            // Insert counter at the start of the block
            printEdgeCounter(edge,BB,i);
            IncrementCounterInBlock(BB, i, Counters); ++NumEdgesInserted;
          } else {
            // Insert counter at the start of the block
            printEdgeCounter(edge,Succ,i);
            IncrementCounterInBlock(Succ, i, Counters); ++NumEdgesInserted;
          }
          Initializer[i++] = (Zero);
        } else {
          Initializer[i++] = (Uncounted);
        }
      }
    }
  }

  // Check if the number of edges counted at first was the number of edges we
  // considered for instrumentation.
  assert(i==NumEdges && "the number of edges in counting array is wrong");

  // Assing the now completely defined initialiser to the array.
  Constant *init = ConstantArray::get(ATy, Initializer);
  Counters->setInitializer(init);

  // Add the initialization call to main.
  InsertProfilingInitCall(Main, "llvm_start_opt_edge_profiling", Counters);
  return true;
}
Exemplo n.º 25
0
/// Duplicate a TailBB instruction to PredBB and update
/// the source operands due to earlier PHI translation.
void TailDuplicator::duplicateInstruction(
    MachineInstr *MI, MachineBasicBlock *TailBB, MachineBasicBlock *PredBB,
    DenseMap<unsigned, RegSubRegPair> &LocalVRMap,
    const DenseSet<unsigned> &UsedByPhi) {
  MachineInstr &NewMI = TII->duplicate(*PredBB, PredBB->end(), *MI);
  if (PreRegAlloc) {
    for (unsigned i = 0, e = NewMI.getNumOperands(); i != e; ++i) {
      MachineOperand &MO = NewMI.getOperand(i);
      if (!MO.isReg())
        continue;
      unsigned Reg = MO.getReg();
      if (!TargetRegisterInfo::isVirtualRegister(Reg))
        continue;
      if (MO.isDef()) {
        const TargetRegisterClass *RC = MRI->getRegClass(Reg);
        unsigned NewReg = MRI->createVirtualRegister(RC);
        MO.setReg(NewReg);
        LocalVRMap.insert(std::make_pair(Reg, RegSubRegPair(NewReg, 0)));
        if (isDefLiveOut(Reg, TailBB, MRI) || UsedByPhi.count(Reg))
          addSSAUpdateEntry(Reg, NewReg, PredBB);
      } else {
        auto VI = LocalVRMap.find(Reg);
        if (VI != LocalVRMap.end()) {
          // Need to make sure that the register class of the mapped register
          // will satisfy the constraints of the class of the register being
          // replaced.
          auto *OrigRC = MRI->getRegClass(Reg);
          auto *MappedRC = MRI->getRegClass(VI->second.Reg);
          const TargetRegisterClass *ConstrRC;
          if (VI->second.SubReg != 0) {
            ConstrRC = TRI->getMatchingSuperRegClass(MappedRC, OrigRC,
                                                     VI->second.SubReg);
            if (ConstrRC) {
              // The actual constraining (as in "find appropriate new class")
              // is done by getMatchingSuperRegClass, so now we only need to
              // change the class of the mapped register.
              MRI->setRegClass(VI->second.Reg, ConstrRC);
            }
          } else {
            // For mapped registers that do not have sub-registers, simply
            // restrict their class to match the original one.
            ConstrRC = MRI->constrainRegClass(VI->second.Reg, OrigRC);
          }

          if (ConstrRC) {
            // If the class constraining succeeded, we can simply replace
            // the old register with the mapped one.
            MO.setReg(VI->second.Reg);
            // We have Reg -> VI.Reg:VI.SubReg, so if Reg is used with a
            // sub-register, we need to compose the sub-register indices.
            MO.setSubReg(TRI->composeSubRegIndices(MO.getSubReg(),
                                                   VI->second.SubReg));
          } else {
            // The direct replacement is not possible, due to failing register
            // class constraints. An explicit COPY is necessary. Create one
            // that can be reused
            auto *NewRC = MI->getRegClassConstraint(i, TII, TRI);
            if (NewRC == nullptr)
              NewRC = OrigRC;
            unsigned NewReg = MRI->createVirtualRegister(NewRC);
            BuildMI(*PredBB, MI, MI->getDebugLoc(),
                    TII->get(TargetOpcode::COPY), NewReg)
                .addReg(VI->second.Reg, 0, VI->second.SubReg);
            LocalVRMap.erase(VI);
            LocalVRMap.insert(std::make_pair(Reg, RegSubRegPair(NewReg, 0)));
            MO.setReg(NewReg);
            // The composed VI.Reg:VI.SubReg is replaced with NewReg, which
            // is equivalent to the whole register Reg. Hence, Reg:subreg
            // is same as NewReg:subreg, so keep the sub-register index
            // unchanged.
          }
          // Clear any kill flags from this operand.  The new register could
          // have uses after this one, so kills are not valid here.
          MO.setIsKill(false);
        }
      }
    }
  }
}
Exemplo n.º 26
0
std::unique_ptr<FunctionOutliningInfo>
PartialInlinerImpl::computeOutliningInfo(Function *F) {
  BasicBlock *EntryBlock = &F->front();
  BranchInst *BR = dyn_cast<BranchInst>(EntryBlock->getTerminator());
  if (!BR || BR->isUnconditional())
    return std::unique_ptr<FunctionOutliningInfo>();

  // Returns true if Succ is BB's successor
  auto IsSuccessor = [](BasicBlock *Succ, BasicBlock *BB) {
    return is_contained(successors(BB), Succ);
  };

  auto SuccSize = [](BasicBlock *BB) {
    return std::distance(succ_begin(BB), succ_end(BB));
  };

  auto IsReturnBlock = [](BasicBlock *BB) {
    TerminatorInst *TI = BB->getTerminator();
    return isa<ReturnInst>(TI);
  };

  auto GetReturnBlock = [&](BasicBlock *Succ1, BasicBlock *Succ2) {
    if (IsReturnBlock(Succ1))
      return std::make_tuple(Succ1, Succ2);
    if (IsReturnBlock(Succ2))
      return std::make_tuple(Succ2, Succ1);

    return std::make_tuple<BasicBlock *, BasicBlock *>(nullptr, nullptr);
  };

  // Detect a triangular shape:
  auto GetCommonSucc = [&](BasicBlock *Succ1, BasicBlock *Succ2) {
    if (IsSuccessor(Succ1, Succ2))
      return std::make_tuple(Succ1, Succ2);
    if (IsSuccessor(Succ2, Succ1))
      return std::make_tuple(Succ2, Succ1);

    return std::make_tuple<BasicBlock *, BasicBlock *>(nullptr, nullptr);
  };

  std::unique_ptr<FunctionOutliningInfo> OutliningInfo =
      llvm::make_unique<FunctionOutliningInfo>();

  BasicBlock *CurrEntry = EntryBlock;
  bool CandidateFound = false;
  do {
    // The number of blocks to be inlined has already reached
    // the limit. When MaxNumInlineBlocks is set to 0 or 1, this
    // disables partial inlining for the function.
    if (OutliningInfo->GetNumInlinedBlocks() >= MaxNumInlineBlocks)
      break;

    if (SuccSize(CurrEntry) != 2)
      break;

    BasicBlock *Succ1 = *succ_begin(CurrEntry);
    BasicBlock *Succ2 = *(succ_begin(CurrEntry) + 1);

    BasicBlock *ReturnBlock, *NonReturnBlock;
    std::tie(ReturnBlock, NonReturnBlock) = GetReturnBlock(Succ1, Succ2);

    if (ReturnBlock) {
      OutliningInfo->Entries.push_back(CurrEntry);
      OutliningInfo->ReturnBlock = ReturnBlock;
      OutliningInfo->NonReturnBlock = NonReturnBlock;
      CandidateFound = true;
      break;
    }

    BasicBlock *CommSucc;
    BasicBlock *OtherSucc;
    std::tie(CommSucc, OtherSucc) = GetCommonSucc(Succ1, Succ2);

    if (!CommSucc)
      break;

    OutliningInfo->Entries.push_back(CurrEntry);
    CurrEntry = OtherSucc;

  } while (true);

  if (!CandidateFound)
    return std::unique_ptr<FunctionOutliningInfo>();

  // Do sanity check of the entries: threre should not
  // be any successors (not in the entry set) other than
  // {ReturnBlock, NonReturnBlock}
  assert(OutliningInfo->Entries[0] == &F->front() &&
         "Function Entry must be the first in Entries vector");
  DenseSet<BasicBlock *> Entries;
  for (BasicBlock *E : OutliningInfo->Entries)
    Entries.insert(E);

  // Returns true of BB has Predecessor which is not
  // in Entries set.
  auto HasNonEntryPred = [Entries](BasicBlock *BB) {
    for (auto Pred : predecessors(BB)) {
      if (!Entries.count(Pred))
        return true;
    }
    return false;
  };
  auto CheckAndNormalizeCandidate =
      [Entries, HasNonEntryPred](FunctionOutliningInfo *OutliningInfo) {
        for (BasicBlock *E : OutliningInfo->Entries) {
          for (auto Succ : successors(E)) {
            if (Entries.count(Succ))
              continue;
            if (Succ == OutliningInfo->ReturnBlock)
              OutliningInfo->ReturnBlockPreds.push_back(E);
            else if (Succ != OutliningInfo->NonReturnBlock)
              return false;
          }
          // There should not be any outside incoming edges either:
          if (HasNonEntryPred(E))
            return false;
        }
        return true;
      };

  if (!CheckAndNormalizeCandidate(OutliningInfo.get()))
    return std::unique_ptr<FunctionOutliningInfo>();

  // Now further growing the candidate's inlining region by
  // peeling off dominating blocks from the outlining region:
  while (OutliningInfo->GetNumInlinedBlocks() < MaxNumInlineBlocks) {
    BasicBlock *Cand = OutliningInfo->NonReturnBlock;
    if (SuccSize(Cand) != 2)
      break;

    if (HasNonEntryPred(Cand))
      break;

    BasicBlock *Succ1 = *succ_begin(Cand);
    BasicBlock *Succ2 = *(succ_begin(Cand) + 1);

    BasicBlock *ReturnBlock, *NonReturnBlock;
    std::tie(ReturnBlock, NonReturnBlock) = GetReturnBlock(Succ1, Succ2);
    if (!ReturnBlock || ReturnBlock != OutliningInfo->ReturnBlock)
      break;

    if (NonReturnBlock->getSinglePredecessor() != Cand)
      break;

    // Now grow and update OutlininigInfo:
    OutliningInfo->Entries.push_back(Cand);
    OutliningInfo->NonReturnBlock = NonReturnBlock;
    OutliningInfo->ReturnBlockPreds.push_back(Cand);
    Entries.insert(Cand);
  }

  return OutliningInfo;
}
Exemplo n.º 27
0
bool DelinkPass::runOnModule(Module& M) {

  if(DelinkExcept.size()) {

    std::ifstream RFI(DelinkExcept.c_str());

    DenseSet<GlobalValue*> Keep;
  
    while(!RFI.eof()) {

      std::string line;
      std::getline(RFI, line);

      trim(line);

      if(line.empty())
	continue;

      GlobalValue* GV = M.getNamedValue(line);
      if(!GV) {
	
	errs() << "Warning: Skipped " << line << "\n";
	continue;

      }

      Keep.insert(GV);
  
    }

    for(Module::iterator it = M.begin(), itend = M.end(); it != itend; ++it) {

      if(!Keep.count(it))
	it->deleteBody();

    }

  }
  else {

    std::ifstream RFI(DelinkSymbols.c_str());
  
    while(!RFI.eof()) {

      std::string line;
      std::getline(RFI, line);

      trim(line);

      if(line.empty())
	continue;

      if(line == "__uClibc_main" || line == "__uClibc_main_spec")
	continue;
    
      GlobalValue* GV = M.getNamedValue(line);
      if(!GV) {

	errs() << "Warning: Skipped " << line << "\n";
	continue;

      }
    
      if(Function* F = dyn_cast<Function>(GV))
	F->deleteBody();

    }

  }

  return true;

}