Пример #1
0
MachineBasicBlock *
MachineBasicBlock::SplitCriticalEdge(MachineBasicBlock *Succ, Pass *P) {
  // Splitting the critical edge to a landing pad block is non-trivial. Don't do
  // it in this generic function.
  if (Succ->isLandingPad())
    return nullptr;

  MachineFunction *MF = getParent();
  DebugLoc dl;  // FIXME: this is nowhere

  // Performance might be harmed on HW that implements branching using exec mask
  // where both sides of the branches are always executed.
  if (MF->getTarget().requiresStructuredCFG())
    return nullptr;

  // We may need to update this's terminator, but we can't do that if
  // AnalyzeBranch fails. If this uses a jump table, we won't touch it.
  const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
  MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
  SmallVector<MachineOperand, 4> Cond;
  if (TII->AnalyzeBranch(*this, TBB, FBB, Cond))
    return nullptr;

  // Avoid bugpoint weirdness: A block may end with a conditional branch but
  // jumps to the same MBB is either case. We have duplicate CFG edges in that
  // case that we can't handle. Since this never happens in properly optimized
  // code, just skip those edges.
  if (TBB && TBB == FBB) {
    DEBUG(dbgs() << "Won't split critical edge after degenerate BB#"
                 << getNumber() << '\n');
    return nullptr;
  }

  MachineBasicBlock *NMBB = MF->CreateMachineBasicBlock();
  MF->insert(std::next(MachineFunction::iterator(this)), NMBB);
  DEBUG(dbgs() << "Splitting critical edge:"
        " BB#" << getNumber()
        << " -- BB#" << NMBB->getNumber()
        << " -- BB#" << Succ->getNumber() << '\n');

  LiveIntervals *LIS = P->getAnalysisIfAvailable<LiveIntervals>();
  SlotIndexes *Indexes = P->getAnalysisIfAvailable<SlotIndexes>();
  if (LIS)
    LIS->insertMBBInMaps(NMBB);
  else if (Indexes)
    Indexes->insertMBBInMaps(NMBB);

  // On some targets like Mips, branches may kill virtual registers. Make sure
  // that LiveVariables is properly updated after updateTerminator replaces the
  // terminators.
  LiveVariables *LV = P->getAnalysisIfAvailable<LiveVariables>();

  // Collect a list of virtual registers killed by the terminators.
  SmallVector<unsigned, 4> KilledRegs;
  if (LV)
    for (instr_iterator I = getFirstInstrTerminator(), E = instr_end();
         I != E; ++I) {
      MachineInstr *MI = I;
      for (MachineInstr::mop_iterator OI = MI->operands_begin(),
           OE = MI->operands_end(); OI != OE; ++OI) {
        if (!OI->isReg() || OI->getReg() == 0 ||
            !OI->isUse() || !OI->isKill() || OI->isUndef())
          continue;
        unsigned Reg = OI->getReg();
        if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
            LV->getVarInfo(Reg).removeKill(MI)) {
          KilledRegs.push_back(Reg);
          DEBUG(dbgs() << "Removing terminator kill: " << *MI);
          OI->setIsKill(false);
        }
      }
    }

  SmallVector<unsigned, 4> UsedRegs;
  if (LIS) {
    for (instr_iterator I = getFirstInstrTerminator(), E = instr_end();
         I != E; ++I) {
      MachineInstr *MI = I;

      for (MachineInstr::mop_iterator OI = MI->operands_begin(),
           OE = MI->operands_end(); OI != OE; ++OI) {
        if (!OI->isReg() || OI->getReg() == 0)
          continue;

        unsigned Reg = OI->getReg();
        if (std::find(UsedRegs.begin(), UsedRegs.end(), Reg) == UsedRegs.end())
          UsedRegs.push_back(Reg);
      }
    }
  }

  ReplaceUsesOfBlockWith(Succ, NMBB);

  // If updateTerminator() removes instructions, we need to remove them from
  // SlotIndexes.
  SmallVector<MachineInstr*, 4> Terminators;
  if (Indexes) {
    for (instr_iterator I = getFirstInstrTerminator(), E = instr_end();
         I != E; ++I)
      Terminators.push_back(I);
  }

  updateTerminator();

  if (Indexes) {
    SmallVector<MachineInstr*, 4> NewTerminators;
    for (instr_iterator I = getFirstInstrTerminator(), E = instr_end();
         I != E; ++I)
      NewTerminators.push_back(I);

    for (SmallVectorImpl<MachineInstr*>::iterator I = Terminators.begin(),
        E = Terminators.end(); I != E; ++I) {
      if (std::find(NewTerminators.begin(), NewTerminators.end(), *I) ==
          NewTerminators.end())
       Indexes->removeMachineInstrFromMaps(*I);
    }
  }

  // Insert unconditional "jump Succ" instruction in NMBB if necessary.
  NMBB->addSuccessor(Succ);
  if (!NMBB->isLayoutSuccessor(Succ)) {
    Cond.clear();
    MF->getSubtarget().getInstrInfo()->InsertBranch(*NMBB, Succ, nullptr, Cond,
                                                    dl);

    if (Indexes) {
      for (instr_iterator I = NMBB->instr_begin(), E = NMBB->instr_end();
           I != E; ++I) {
        // Some instructions may have been moved to NMBB by updateTerminator(),
        // so we first remove any instruction that already has an index.
        if (Indexes->hasIndex(I))
          Indexes->removeMachineInstrFromMaps(I);
        Indexes->insertMachineInstrInMaps(I);
      }
    }
  }

  // Fix PHI nodes in Succ so they refer to NMBB instead of this
  for (MachineBasicBlock::instr_iterator
         i = Succ->instr_begin(),e = Succ->instr_end();
       i != e && i->isPHI(); ++i)
    for (unsigned ni = 1, ne = i->getNumOperands(); ni != ne; ni += 2)
      if (i->getOperand(ni+1).getMBB() == this)
        i->getOperand(ni+1).setMBB(NMBB);

  // Inherit live-ins from the successor
  for (MachineBasicBlock::livein_iterator I = Succ->livein_begin(),
         E = Succ->livein_end(); I != E; ++I)
    NMBB->addLiveIn(*I);

  // Update LiveVariables.
  const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
  if (LV) {
    // Restore kills of virtual registers that were killed by the terminators.
    while (!KilledRegs.empty()) {
      unsigned Reg = KilledRegs.pop_back_val();
      for (instr_iterator I = instr_end(), E = instr_begin(); I != E;) {
        if (!(--I)->addRegisterKilled(Reg, TRI, /* addIfNotFound= */ false))
          continue;
        if (TargetRegisterInfo::isVirtualRegister(Reg))
          LV->getVarInfo(Reg).Kills.push_back(I);
        DEBUG(dbgs() << "Restored terminator kill: " << *I);
        break;
      }
    }
    // Update relevant live-through information.
    LV->addNewBlock(NMBB, this, Succ);
  }

  if (LIS) {
    // After splitting the edge and updating SlotIndexes, live intervals may be
    // in one of two situations, depending on whether this block was the last in
    // the function. If the original block was the last in the function, all live
    // intervals will end prior to the beginning of the new split block. If the
    // original block was not at the end of the function, all live intervals will
    // extend to the end of the new split block.

    bool isLastMBB =
      std::next(MachineFunction::iterator(NMBB)) == getParent()->end();

    SlotIndex StartIndex = Indexes->getMBBEndIdx(this);
    SlotIndex PrevIndex = StartIndex.getPrevSlot();
    SlotIndex EndIndex = Indexes->getMBBEndIdx(NMBB);

    // Find the registers used from NMBB in PHIs in Succ.
    SmallSet<unsigned, 8> PHISrcRegs;
    for (MachineBasicBlock::instr_iterator
         I = Succ->instr_begin(), E = Succ->instr_end();
         I != E && I->isPHI(); ++I) {
      for (unsigned ni = 1, ne = I->getNumOperands(); ni != ne; ni += 2) {
        if (I->getOperand(ni+1).getMBB() == NMBB) {
          MachineOperand &MO = I->getOperand(ni);
          unsigned Reg = MO.getReg();
          PHISrcRegs.insert(Reg);
          if (MO.isUndef())
            continue;

          LiveInterval &LI = LIS->getInterval(Reg);
          VNInfo *VNI = LI.getVNInfoAt(PrevIndex);
          assert(VNI && "PHI sources should be live out of their predecessors.");
          LI.addSegment(LiveInterval::Segment(StartIndex, EndIndex, VNI));
        }
      }
    }

    MachineRegisterInfo *MRI = &getParent()->getRegInfo();
    for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
      unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
      if (PHISrcRegs.count(Reg) || !LIS->hasInterval(Reg))
        continue;

      LiveInterval &LI = LIS->getInterval(Reg);
      if (!LI.liveAt(PrevIndex))
        continue;

      bool isLiveOut = LI.liveAt(LIS->getMBBStartIdx(Succ));
      if (isLiveOut && isLastMBB) {
        VNInfo *VNI = LI.getVNInfoAt(PrevIndex);
        assert(VNI && "LiveInterval should have VNInfo where it is live.");
        LI.addSegment(LiveInterval::Segment(StartIndex, EndIndex, VNI));
      } else if (!isLiveOut && !isLastMBB) {
        LI.removeSegment(StartIndex, EndIndex);
      }
    }

    // Update all intervals for registers whose uses may have been modified by
    // updateTerminator().
    LIS->repairIntervalsInRange(this, getFirstTerminator(), end(), UsedRegs);
  }

  if (MachineDominatorTree *MDT =
      P->getAnalysisIfAvailable<MachineDominatorTree>())
    MDT->recordSplitCriticalEdge(this, Succ, NMBB);

  if (MachineLoopInfo *MLI = P->getAnalysisIfAvailable<MachineLoopInfo>())
    if (MachineLoop *TIL = MLI->getLoopFor(this)) {
      // If one or the other blocks were not in a loop, the new block is not
      // either, and thus LI doesn't need to be updated.
      if (MachineLoop *DestLoop = MLI->getLoopFor(Succ)) {
        if (TIL == DestLoop) {
          // Both in the same loop, the NMBB joins loop.
          DestLoop->addBasicBlockToLoop(NMBB, MLI->getBase());
        } else if (TIL->contains(DestLoop)) {
          // Edge from an outer loop to an inner loop.  Add to the outer loop.
          TIL->addBasicBlockToLoop(NMBB, MLI->getBase());
        } else if (DestLoop->contains(TIL)) {
          // Edge from an inner loop to an outer loop.  Add to the outer loop.
          DestLoop->addBasicBlockToLoop(NMBB, MLI->getBase());
        } else {
          // Edge from two loops with no containment relation.  Because these
          // are natural loops, we know that the destination block must be the
          // header of its loop (adding a branch into a loop elsewhere would
          // create an irreducible loop).
          assert(DestLoop->getHeader() == Succ &&
                 "Should not create irreducible loops!");
          if (MachineLoop *P = DestLoop->getParentLoop())
            P->addBasicBlockToLoop(NMBB, MLI->getBase());
        }
      }
    }

  return NMBB;
}
Пример #2
0
void cl::ParseCommandLineOptions(int argc, char **argv,
                                 const char *Overview, bool ReadResponseFiles) {
  // Process all registered options.
  SmallVector<Option*, 4> PositionalOpts;
  SmallVector<Option*, 4> SinkOpts;
  StringMap<Option*> Opts;
  GetOptionInfo(PositionalOpts, SinkOpts, Opts);

  assert((!Opts.empty() || !PositionalOpts.empty()) &&
         "No options specified!");

  // Expand response files.
  std::vector<char*> newArgv;
  if (ReadResponseFiles) {
    newArgv.push_back(strdup(argv[0]));
    ExpandResponseFiles(argc, argv, newArgv);
    argv = &newArgv[0];
    argc = static_cast<int>(newArgv.size());
  }

  // Copy the program name into ProgName, making sure not to overflow it.
  std::string ProgName = sys::path::filename(argv[0]);
  size_t Len = std::min(ProgName.size(), size_t(79));
  memcpy(ProgramName, ProgName.data(), Len);
  ProgramName[Len] = '\0';

  ProgramOverview = Overview;
  bool ErrorParsing = false;

  // Check out the positional arguments to collect information about them.
  unsigned NumPositionalRequired = 0;

  // Determine whether or not there are an unlimited number of positionals
  bool HasUnlimitedPositionals = false;

  Option *ConsumeAfterOpt = 0;
  if (!PositionalOpts.empty()) {
    if (PositionalOpts[0]->getNumOccurrencesFlag() == cl::ConsumeAfter) {
      assert(PositionalOpts.size() > 1 &&
             "Cannot specify cl::ConsumeAfter without a positional argument!");
      ConsumeAfterOpt = PositionalOpts[0];
    }

    // Calculate how many positional values are _required_.
    bool UnboundedFound = false;
    for (size_t i = ConsumeAfterOpt != 0, e = PositionalOpts.size();
         i != e; ++i) {
      Option *Opt = PositionalOpts[i];
      if (RequiresValue(Opt))
        ++NumPositionalRequired;
      else if (ConsumeAfterOpt) {
        // ConsumeAfter cannot be combined with "optional" positional options
        // unless there is only one positional argument...
        if (PositionalOpts.size() > 2)
          ErrorParsing |=
            Opt->error("error - this positional option will never be matched, "
                       "because it does not Require a value, and a "
                       "cl::ConsumeAfter option is active!");
      } else if (UnboundedFound && !Opt->ArgStr[0]) {
        // This option does not "require" a value...  Make sure this option is
        // not specified after an option that eats all extra arguments, or this
        // one will never get any!
        //
        ErrorParsing |= Opt->error("error - option can never match, because "
                                   "another positional argument will match an "
                                   "unbounded number of values, and this option"
                                   " does not require a value!");
      }
      UnboundedFound |= EatsUnboundedNumberOfValues(Opt);
    }
    HasUnlimitedPositionals = UnboundedFound || ConsumeAfterOpt;
  }

  // PositionalVals - A vector of "positional" arguments we accumulate into
  // the process at the end.
  //
  SmallVector<std::pair<StringRef,unsigned>, 4> PositionalVals;

  // If the program has named positional arguments, and the name has been run
  // across, keep track of which positional argument was named.  Otherwise put
  // the positional args into the PositionalVals list...
  Option *ActivePositionalArg = 0;

  // Loop over all of the arguments... processing them.
  bool DashDashFound = false;  // Have we read '--'?
  for (int i = 1; i < argc; ++i) {
    Option *Handler = 0;
    Option *NearestHandler = 0;
    std::string NearestHandlerString;
    StringRef Value;
    StringRef ArgName = "";

    // If the option list changed, this means that some command line
    // option has just been registered or deregistered.  This can occur in
    // response to things like -load, etc.  If this happens, rescan the options.
    if (OptionListChanged) {
      PositionalOpts.clear();
      SinkOpts.clear();
      Opts.clear();
      GetOptionInfo(PositionalOpts, SinkOpts, Opts);
      OptionListChanged = false;
    }

    // Check to see if this is a positional argument.  This argument is
    // considered to be positional if it doesn't start with '-', if it is "-"
    // itself, or if we have seen "--" already.
    //
    if (argv[i][0] != '-' || argv[i][1] == 0 || DashDashFound) {
      // Positional argument!
      if (ActivePositionalArg) {
        ProvidePositionalOption(ActivePositionalArg, argv[i], i);
        continue;  // We are done!
      }

      if (!PositionalOpts.empty()) {
        PositionalVals.push_back(std::make_pair(argv[i],i));

        // All of the positional arguments have been fulfulled, give the rest to
        // the consume after option... if it's specified...
        //
        if (PositionalVals.size() >= NumPositionalRequired &&
            ConsumeAfterOpt != 0) {
          for (++i; i < argc; ++i)
            PositionalVals.push_back(std::make_pair(argv[i],i));
          break;   // Handle outside of the argument processing loop...
        }

        // Delay processing positional arguments until the end...
        continue;
      }
    } else if (argv[i][0] == '-' && argv[i][1] == '-' && argv[i][2] == 0 &&
               !DashDashFound) {
      DashDashFound = true;  // This is the mythical "--"?
      continue;              // Don't try to process it as an argument itself.
    } else if (ActivePositionalArg &&
               (ActivePositionalArg->getMiscFlags() & PositionalEatsArgs)) {
      // If there is a positional argument eating options, check to see if this
      // option is another positional argument.  If so, treat it as an argument,
      // otherwise feed it to the eating positional.
      ArgName = argv[i]+1;
      // Eat leading dashes.
      while (!ArgName.empty() && ArgName[0] == '-')
        ArgName = ArgName.substr(1);

      Handler = LookupOption(ArgName, Value, Opts);
      if (!Handler || Handler->getFormattingFlag() != cl::Positional) {
        ProvidePositionalOption(ActivePositionalArg, argv[i], i);
        continue;  // We are done!
      }

    } else {     // We start with a '-', must be an argument.
      ArgName = argv[i]+1;
      // Eat leading dashes.
      while (!ArgName.empty() && ArgName[0] == '-')
        ArgName = ArgName.substr(1);

      Handler = LookupOption(ArgName, Value, Opts);

      // Check to see if this "option" is really a prefixed or grouped argument.
      if (Handler == 0)
        Handler = HandlePrefixedOrGroupedOption(ArgName, Value,
                                                ErrorParsing, Opts);

      // Otherwise, look for the closest available option to report to the user
      // in the upcoming error.
      if (Handler == 0 && SinkOpts.empty())
        NearestHandler = LookupNearestOption(ArgName, Opts,
                                             NearestHandlerString);
    }

    if (Handler == 0) {
      if (SinkOpts.empty()) {
        errs() << ProgramName << ": Unknown command line argument '"
             << argv[i] << "'.  Try: '" << argv[0] << " -help'\n";

        if (NearestHandler) {
          // If we know a near match, report it as well.
          errs() << ProgramName << ": Did you mean '-"
                 << NearestHandlerString << "'?\n";
        }

        ErrorParsing = true;
      } else {
        for (SmallVectorImpl<Option*>::iterator I = SinkOpts.begin(),
               E = SinkOpts.end(); I != E ; ++I)
          (*I)->addOccurrence(i, "", argv[i]);
      }
      continue;
    }

    // If this is a named positional argument, just remember that it is the
    // active one...
    if (Handler->getFormattingFlag() == cl::Positional)
      ActivePositionalArg = Handler;
    else
      ErrorParsing |= ProvideOption(Handler, ArgName, Value, argc, argv, i);
  }

  // Check and handle positional arguments now...
  if (NumPositionalRequired > PositionalVals.size()) {
    errs() << ProgramName
         << ": Not enough positional command line arguments specified!\n"
         << "Must specify at least " << NumPositionalRequired
         << " positional arguments: See: " << argv[0] << " -help\n";

    ErrorParsing = true;
  } else if (!HasUnlimitedPositionals &&
             PositionalVals.size() > PositionalOpts.size()) {
    errs() << ProgramName
         << ": Too many positional arguments specified!\n"
         << "Can specify at most " << PositionalOpts.size()
         << " positional arguments: See: " << argv[0] << " -help\n";
    ErrorParsing = true;

  } else if (ConsumeAfterOpt == 0) {
    // Positional args have already been handled if ConsumeAfter is specified.
    unsigned ValNo = 0, NumVals = static_cast<unsigned>(PositionalVals.size());
    for (size_t i = 0, e = PositionalOpts.size(); i != e; ++i) {
      if (RequiresValue(PositionalOpts[i])) {
        ProvidePositionalOption(PositionalOpts[i], PositionalVals[ValNo].first,
                                PositionalVals[ValNo].second);
        ValNo++;
        --NumPositionalRequired;  // We fulfilled our duty...
      }

      // If we _can_ give this option more arguments, do so now, as long as we
      // do not give it values that others need.  'Done' controls whether the
      // option even _WANTS_ any more.
      //
      bool Done = PositionalOpts[i]->getNumOccurrencesFlag() == cl::Required;
      while (NumVals-ValNo > NumPositionalRequired && !Done) {
        switch (PositionalOpts[i]->getNumOccurrencesFlag()) {
        case cl::Optional:
          Done = true;          // Optional arguments want _at most_ one value
          // FALL THROUGH
        case cl::ZeroOrMore:    // Zero or more will take all they can get...
        case cl::OneOrMore:     // One or more will take all they can get...
          ProvidePositionalOption(PositionalOpts[i],
                                  PositionalVals[ValNo].first,
                                  PositionalVals[ValNo].second);
          ValNo++;
          break;
        default:
          llvm_unreachable("Internal error, unexpected NumOccurrences flag in "
                 "positional argument processing!");
        }
      }
    }
  } else {
    assert(ConsumeAfterOpt && NumPositionalRequired <= PositionalVals.size());
    unsigned ValNo = 0;
    for (size_t j = 1, e = PositionalOpts.size(); j != e; ++j)
      if (RequiresValue(PositionalOpts[j])) {
        ErrorParsing |= ProvidePositionalOption(PositionalOpts[j],
                                                PositionalVals[ValNo].first,
                                                PositionalVals[ValNo].second);
        ValNo++;
      }

    // Handle the case where there is just one positional option, and it's
    // optional.  In this case, we want to give JUST THE FIRST option to the
    // positional option and keep the rest for the consume after.  The above
    // loop would have assigned no values to positional options in this case.
    //
    if (PositionalOpts.size() == 2 && ValNo == 0 && !PositionalVals.empty()) {
      ErrorParsing |= ProvidePositionalOption(PositionalOpts[1],
                                              PositionalVals[ValNo].first,
                                              PositionalVals[ValNo].second);
      ValNo++;
    }

    // Handle over all of the rest of the arguments to the
    // cl::ConsumeAfter command line option...
    for (; ValNo != PositionalVals.size(); ++ValNo)
      ErrorParsing |= ProvidePositionalOption(ConsumeAfterOpt,
                                              PositionalVals[ValNo].first,
                                              PositionalVals[ValNo].second);
  }

  // Loop over args and make sure all required args are specified!
  for (StringMap<Option*>::iterator I = Opts.begin(),
         E = Opts.end(); I != E; ++I) {
    switch (I->second->getNumOccurrencesFlag()) {
    case Required:
    case OneOrMore:
      if (I->second->getNumOccurrences() == 0) {
        I->second->error("must be specified at least once!");
        ErrorParsing = true;
      }
      // Fall through
    default:
      break;
    }
  }

  // Now that we know if -debug is specified, we can use it.
  // Note that if ReadResponseFiles == true, this must be done before the
  // memory allocated for the expanded command line is free()d below.
  DEBUG(dbgs() << "Args: ";
        for (int i = 0; i < argc; ++i)
          dbgs() << argv[i] << ' ';
        dbgs() << '\n';
       );
Пример #3
0
/// finalizeBundle - Finalize a machine instruction bundle which includes
/// a sequence of instructions starting from FirstMI to LastMI (exclusive).
/// This routine adds a BUNDLE instruction to represent the bundle, it adds
/// IsInternalRead markers to MachineOperands which are defined inside the
/// bundle, and it copies externally visible defs and uses to the BUNDLE
/// instruction.
void llvm::finalizeBundle(MachineBasicBlock &MBB,
                          MachineBasicBlock::instr_iterator FirstMI,
                          MachineBasicBlock::instr_iterator LastMI) {
  assert(FirstMI != LastMI && "Empty bundle?");
  MIBundleBuilder Bundle(MBB, FirstMI, LastMI);

  MachineFunction &MF = *MBB.getParent();
  const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
  const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();

  MachineInstrBuilder MIB =
      BuildMI(MF, FirstMI->getDebugLoc(), TII->get(TargetOpcode::BUNDLE));
  Bundle.prepend(MIB);

  SmallVector<unsigned, 32> LocalDefs;
  SmallSet<unsigned, 32> LocalDefSet;
  SmallSet<unsigned, 8> DeadDefSet;
  SmallSet<unsigned, 16> KilledDefSet;
  SmallVector<unsigned, 8> ExternUses;
  SmallSet<unsigned, 8> ExternUseSet;
  SmallSet<unsigned, 8> KilledUseSet;
  SmallSet<unsigned, 8> UndefUseSet;
  SmallVector<MachineOperand*, 4> Defs;
  for (; FirstMI != LastMI; ++FirstMI) {
    for (unsigned i = 0, e = FirstMI->getNumOperands(); i != e; ++i) {
      MachineOperand &MO = FirstMI->getOperand(i);
      if (!MO.isReg())
        continue;
      if (MO.isDef()) {
        Defs.push_back(&MO);
        continue;
      }

      unsigned Reg = MO.getReg();
      if (!Reg)
        continue;
      assert(TargetRegisterInfo::isPhysicalRegister(Reg));
      if (LocalDefSet.count(Reg)) {
        MO.setIsInternalRead();
        if (MO.isKill())
          // Internal def is now killed.
          KilledDefSet.insert(Reg);
      } else {
        if (ExternUseSet.insert(Reg).second) {
          ExternUses.push_back(Reg);
          if (MO.isUndef())
            UndefUseSet.insert(Reg);
        }
        if (MO.isKill())
          // External def is now killed.
          KilledUseSet.insert(Reg);
      }
    }

    for (unsigned i = 0, e = Defs.size(); i != e; ++i) {
      MachineOperand &MO = *Defs[i];
      unsigned Reg = MO.getReg();
      if (!Reg)
        continue;

      if (LocalDefSet.insert(Reg).second) {
        LocalDefs.push_back(Reg);
        if (MO.isDead()) {
          DeadDefSet.insert(Reg);
        }
      } else {
        // Re-defined inside the bundle, it's no longer killed.
        KilledDefSet.erase(Reg);
        if (!MO.isDead())
          // Previously defined but dead.
          DeadDefSet.erase(Reg);
      }

      if (!MO.isDead()) {
        for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
          unsigned SubReg = *SubRegs;
          if (LocalDefSet.insert(SubReg).second)
            LocalDefs.push_back(SubReg);
        }
      }
    }

    Defs.clear();
  }

  SmallSet<unsigned, 32> Added;
  for (unsigned i = 0, e = LocalDefs.size(); i != e; ++i) {
    unsigned Reg = LocalDefs[i];
    if (Added.insert(Reg).second) {
      // If it's not live beyond end of the bundle, mark it dead.
      bool isDead = DeadDefSet.count(Reg) || KilledDefSet.count(Reg);
      MIB.addReg(Reg, getDefRegState(true) | getDeadRegState(isDead) |
                 getImplRegState(true));
    }
  }

  for (unsigned i = 0, e = ExternUses.size(); i != e; ++i) {
    unsigned Reg = ExternUses[i];
    bool isKill = KilledUseSet.count(Reg);
    bool isUndef = UndefUseSet.count(Reg);
    MIB.addReg(Reg, getKillRegState(isKill) | getUndefRegState(isUndef) |
               getImplRegState(true));
  }
}
Пример #4
0
/// DoPromotion - This method actually performs the promotion of the specified
/// arguments, and returns the new function.  At this point, we know that it's
/// safe to do so.
CallGraphNode *ArgPromotion::DoPromotion(Function *F,
                             SmallPtrSetImpl<Argument*> &ArgsToPromote,
                             SmallPtrSetImpl<Argument*> &ByValArgsToTransform) {

  // Start by computing a new prototype for the function, which is the same as
  // the old function, but has modified arguments.
  FunctionType *FTy = F->getFunctionType();
  std::vector<Type*> Params;

  typedef std::set<std::pair<Type *, IndicesVector>> ScalarizeTable;

  // ScalarizedElements - If we are promoting a pointer that has elements
  // accessed out of it, keep track of which elements are accessed so that we
  // can add one argument for each.
  //
  // Arguments that are directly loaded will have a zero element value here, to
  // handle cases where there are both a direct load and GEP accesses.
  //
  std::map<Argument*, ScalarizeTable> ScalarizedElements;

  // OriginalLoads - Keep track of a representative load instruction from the
  // original function so that we can tell the alias analysis implementation
  // what the new GEP/Load instructions we are inserting look like.
  // We need to keep the original loads for each argument and the elements
  // of the argument that are accessed.
  std::map<std::pair<Argument*, IndicesVector>, LoadInst*> OriginalLoads;

  // Attribute - Keep track of the parameter attributes for the arguments
  // that we are *not* promoting. For the ones that we do promote, the parameter
  // attributes are lost
  SmallVector<AttributeSet, 8> AttributesVec;
  const AttributeSet &PAL = F->getAttributes();

  // Add any return attributes.
  if (PAL.hasAttributes(AttributeSet::ReturnIndex))
    AttributesVec.push_back(AttributeSet::get(F->getContext(),
                                              PAL.getRetAttributes()));

  // First, determine the new argument list
  unsigned ArgIndex = 1;
  for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E;
       ++I, ++ArgIndex) {
    if (ByValArgsToTransform.count(&*I)) {
      // Simple byval argument? Just add all the struct element types.
      Type *AgTy = cast<PointerType>(I->getType())->getElementType();
      StructType *STy = cast<StructType>(AgTy);
      Params.insert(Params.end(), STy->element_begin(), STy->element_end());
      ++NumByValArgsPromoted;
    } else if (!ArgsToPromote.count(&*I)) {
      // Unchanged argument
      Params.push_back(I->getType());
      AttributeSet attrs = PAL.getParamAttributes(ArgIndex);
      if (attrs.hasAttributes(ArgIndex)) {
        AttrBuilder B(attrs, ArgIndex);
        AttributesVec.
          push_back(AttributeSet::get(F->getContext(), Params.size(), B));
      }
    } else if (I->use_empty()) {
      // Dead argument (which are always marked as promotable)
      ++NumArgumentsDead;
    } else {
      // Okay, this is being promoted. This means that the only uses are loads
      // or GEPs which are only used by loads

      // In this table, we will track which indices are loaded from the argument
      // (where direct loads are tracked as no indices).
      ScalarizeTable &ArgIndices = ScalarizedElements[&*I];
      for (User *U : I->users()) {
        Instruction *UI = cast<Instruction>(U);
        Type *SrcTy;
        if (LoadInst *L = dyn_cast<LoadInst>(UI))
          SrcTy = L->getType();
        else
          SrcTy = cast<GetElementPtrInst>(UI)->getSourceElementType();
        IndicesVector Indices;
        Indices.reserve(UI->getNumOperands() - 1);
        // Since loads will only have a single operand, and GEPs only a single
        // non-index operand, this will record direct loads without any indices,
        // and gep+loads with the GEP indices.
        for (User::op_iterator II = UI->op_begin() + 1, IE = UI->op_end();
             II != IE; ++II)
          Indices.push_back(cast<ConstantInt>(*II)->getSExtValue());
        // GEPs with a single 0 index can be merged with direct loads
        if (Indices.size() == 1 && Indices.front() == 0)
          Indices.clear();
        ArgIndices.insert(std::make_pair(SrcTy, Indices));
        LoadInst *OrigLoad;
        if (LoadInst *L = dyn_cast<LoadInst>(UI))
          OrigLoad = L;
        else
          // Take any load, we will use it only to update Alias Analysis
          OrigLoad = cast<LoadInst>(UI->user_back());
        OriginalLoads[std::make_pair(&*I, Indices)] = OrigLoad;
      }

      // Add a parameter to the function for each element passed in.
      for (ScalarizeTable::iterator SI = ArgIndices.begin(),
             E = ArgIndices.end(); SI != E; ++SI) {
        // not allowed to dereference ->begin() if size() is 0
        Params.push_back(GetElementPtrInst::getIndexedType(
            cast<PointerType>(I->getType()->getScalarType())->getElementType(),
            SI->second));
        assert(Params.back());
      }

      if (ArgIndices.size() == 1 && ArgIndices.begin()->second.empty())
        ++NumArgumentsPromoted;
      else
        ++NumAggregatesPromoted;
    }
  }

  // Add any function attributes.
  if (PAL.hasAttributes(AttributeSet::FunctionIndex))
    AttributesVec.push_back(AttributeSet::get(FTy->getContext(),
                                              PAL.getFnAttributes()));

  Type *RetTy = FTy->getReturnType();

  // Construct the new function type using the new arguments.
  FunctionType *NFTy = FunctionType::get(RetTy, Params, FTy->isVarArg());

  // Create the new function body and insert it into the module.
  Function *NF = Function::Create(NFTy, F->getLinkage(), F->getName());
  NF->copyAttributesFrom(F);

  // Patch the pointer to LLVM function in debug info descriptor.
  NF->setSubprogram(F->getSubprogram());
  F->setSubprogram(nullptr);

  DEBUG(dbgs() << "ARG PROMOTION:  Promoting to:" << *NF << "\n"
        << "From: " << *F);
  
  // Recompute the parameter attributes list based on the new arguments for
  // the function.
  NF->setAttributes(AttributeSet::get(F->getContext(), AttributesVec));
  AttributesVec.clear();

  F->getParent()->getFunctionList().insert(F->getIterator(), NF);
  NF->takeName(F);

  // Get the callgraph information that we need to update to reflect our
  // changes.
  CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();

  // Get a new callgraph node for NF.
  CallGraphNode *NF_CGN = CG.getOrInsertFunction(NF);

  // Loop over all of the callers of the function, transforming the call sites
  // to pass in the loaded pointers.
  //
  SmallVector<Value*, 16> Args;
  while (!F->use_empty()) {
    CallSite CS(F->user_back());
    assert(CS.getCalledFunction() == F);
    Instruction *Call = CS.getInstruction();
    const AttributeSet &CallPAL = CS.getAttributes();

    // Add any return attributes.
    if (CallPAL.hasAttributes(AttributeSet::ReturnIndex))
      AttributesVec.push_back(AttributeSet::get(F->getContext(),
                                                CallPAL.getRetAttributes()));

    // Loop over the operands, inserting GEP and loads in the caller as
    // appropriate.
    CallSite::arg_iterator AI = CS.arg_begin();
    ArgIndex = 1;
    for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end();
         I != E; ++I, ++AI, ++ArgIndex)
      if (!ArgsToPromote.count(&*I) && !ByValArgsToTransform.count(&*I)) {
        Args.push_back(*AI);          // Unmodified argument

        if (CallPAL.hasAttributes(ArgIndex)) {
          AttrBuilder B(CallPAL, ArgIndex);
          AttributesVec.
            push_back(AttributeSet::get(F->getContext(), Args.size(), B));
        }
      } else if (ByValArgsToTransform.count(&*I)) {
        // Emit a GEP and load for each element of the struct.
        Type *AgTy = cast<PointerType>(I->getType())->getElementType();
        StructType *STy = cast<StructType>(AgTy);
        Value *Idxs[2] = {
              ConstantInt::get(Type::getInt32Ty(F->getContext()), 0), nullptr };
        for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
          Idxs[1] = ConstantInt::get(Type::getInt32Ty(F->getContext()), i);
          Value *Idx = GetElementPtrInst::Create(
              STy, *AI, Idxs, (*AI)->getName() + "." + Twine(i), Call);
          // TODO: Tell AA about the new values?
          Args.push_back(new LoadInst(Idx, Idx->getName()+".val", Call));
        }
      } else if (!I->use_empty()) {
        // Non-dead argument: insert GEPs and loads as appropriate.
        ScalarizeTable &ArgIndices = ScalarizedElements[&*I];
        // Store the Value* version of the indices in here, but declare it now
        // for reuse.
        std::vector<Value*> Ops;
        for (ScalarizeTable::iterator SI = ArgIndices.begin(),
               E = ArgIndices.end(); SI != E; ++SI) {
          Value *V = *AI;
          LoadInst *OrigLoad = OriginalLoads[std::make_pair(&*I, SI->second)];
          if (!SI->second.empty()) {
            Ops.reserve(SI->second.size());
            Type *ElTy = V->getType();
            for (IndicesVector::const_iterator II = SI->second.begin(),
                                               IE = SI->second.end();
                 II != IE; ++II) {
              // Use i32 to index structs, and i64 for others (pointers/arrays).
              // This satisfies GEP constraints.
              Type *IdxTy = (ElTy->isStructTy() ?
                    Type::getInt32Ty(F->getContext()) : 
                    Type::getInt64Ty(F->getContext()));
              Ops.push_back(ConstantInt::get(IdxTy, *II));
              // Keep track of the type we're currently indexing.
              ElTy = cast<CompositeType>(ElTy)->getTypeAtIndex(*II);
            }
            // And create a GEP to extract those indices.
            V = GetElementPtrInst::Create(SI->first, V, Ops,
                                          V->getName() + ".idx", Call);
            Ops.clear();
          }
          // Since we're replacing a load make sure we take the alignment
          // of the previous load.
          LoadInst *newLoad = new LoadInst(V, V->getName()+".val", Call);
          newLoad->setAlignment(OrigLoad->getAlignment());
          // Transfer the AA info too.
          AAMDNodes AAInfo;
          OrigLoad->getAAMetadata(AAInfo);
          newLoad->setAAMetadata(AAInfo);

          Args.push_back(newLoad);
        }
      }

    // Push any varargs arguments on the list.
    for (; AI != CS.arg_end(); ++AI, ++ArgIndex) {
      Args.push_back(*AI);
      if (CallPAL.hasAttributes(ArgIndex)) {
        AttrBuilder B(CallPAL, ArgIndex);
        AttributesVec.
          push_back(AttributeSet::get(F->getContext(), Args.size(), B));
      }
    }

    // Add any function attributes.
    if (CallPAL.hasAttributes(AttributeSet::FunctionIndex))
      AttributesVec.push_back(AttributeSet::get(Call->getContext(),
                                                CallPAL.getFnAttributes()));

    Instruction *New;
    if (InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
      New = InvokeInst::Create(NF, II->getNormalDest(), II->getUnwindDest(),
                               Args, "", Call);
      cast<InvokeInst>(New)->setCallingConv(CS.getCallingConv());
      cast<InvokeInst>(New)->setAttributes(AttributeSet::get(II->getContext(),
                                                            AttributesVec));
    } else {
      New = CallInst::Create(NF, Args, "", Call);
      cast<CallInst>(New)->setCallingConv(CS.getCallingConv());
      cast<CallInst>(New)->setAttributes(AttributeSet::get(New->getContext(),
                                                          AttributesVec));
      if (cast<CallInst>(Call)->isTailCall())
        cast<CallInst>(New)->setTailCall();
    }
    New->setDebugLoc(Call->getDebugLoc());
    Args.clear();
    AttributesVec.clear();

    // Update the callgraph to know that the callsite has been transformed.
    CallGraphNode *CalleeNode = CG[Call->getParent()->getParent()];
    CalleeNode->replaceCallEdge(CS, CallSite(New), NF_CGN);

    if (!Call->use_empty()) {
      Call->replaceAllUsesWith(New);
      New->takeName(Call);
    }

    // Finally, remove the old call from the program, reducing the use-count of
    // F.
    Call->eraseFromParent();
  }

  // Since we have now created the new function, splice the body of the old
  // function right into the new function, leaving the old rotting hulk of the
  // function empty.
  NF->getBasicBlockList().splice(NF->begin(), F->getBasicBlockList());

  // Loop over the argument list, transferring uses of the old arguments over to
  // the new arguments, also transferring over the names as well.
  //
  for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(),
       I2 = NF->arg_begin(); I != E; ++I) {
    if (!ArgsToPromote.count(&*I) && !ByValArgsToTransform.count(&*I)) {
      // If this is an unmodified argument, move the name and users over to the
      // new version.
      I->replaceAllUsesWith(&*I2);
      I2->takeName(&*I);
      ++I2;
      continue;
    }

    if (ByValArgsToTransform.count(&*I)) {
      // In the callee, we create an alloca, and store each of the new incoming
      // arguments into the alloca.
      Instruction *InsertPt = &NF->begin()->front();

      // Just add all the struct element types.
      Type *AgTy = cast<PointerType>(I->getType())->getElementType();
      Value *TheAlloca = new AllocaInst(AgTy, nullptr, "", InsertPt);
      StructType *STy = cast<StructType>(AgTy);
      Value *Idxs[2] = {
            ConstantInt::get(Type::getInt32Ty(F->getContext()), 0), nullptr };

      for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
        Idxs[1] = ConstantInt::get(Type::getInt32Ty(F->getContext()), i);
        Value *Idx = GetElementPtrInst::Create(
            AgTy, TheAlloca, Idxs, TheAlloca->getName() + "." + Twine(i),
            InsertPt);
        I2->setName(I->getName()+"."+Twine(i));
        new StoreInst(&*I2++, Idx, InsertPt);
      }

      // Anything that used the arg should now use the alloca.
      I->replaceAllUsesWith(TheAlloca);
      TheAlloca->takeName(&*I);

      // If the alloca is used in a call, we must clear the tail flag since
      // the callee now uses an alloca from the caller.
      for (User *U : TheAlloca->users()) {
        CallInst *Call = dyn_cast<CallInst>(U);
        if (!Call)
          continue;
        Call->setTailCall(false);
      }
      continue;
    }

    if (I->use_empty())
      continue;

    // Otherwise, if we promoted this argument, then all users are load
    // instructions (or GEPs with only load users), and all loads should be
    // using the new argument that we added.
    ScalarizeTable &ArgIndices = ScalarizedElements[&*I];

    while (!I->use_empty()) {
      if (LoadInst *LI = dyn_cast<LoadInst>(I->user_back())) {
        assert(ArgIndices.begin()->second.empty() &&
               "Load element should sort to front!");
        I2->setName(I->getName()+".val");
        LI->replaceAllUsesWith(&*I2);
        LI->eraseFromParent();
        DEBUG(dbgs() << "*** Promoted load of argument '" << I->getName()
              << "' in function '" << F->getName() << "'\n");
      } else {
        GetElementPtrInst *GEP = cast<GetElementPtrInst>(I->user_back());
        IndicesVector Operands;
        Operands.reserve(GEP->getNumIndices());
        for (User::op_iterator II = GEP->idx_begin(), IE = GEP->idx_end();
             II != IE; ++II)
          Operands.push_back(cast<ConstantInt>(*II)->getSExtValue());

        // GEPs with a single 0 index can be merged with direct loads
        if (Operands.size() == 1 && Operands.front() == 0)
          Operands.clear();

        Function::arg_iterator TheArg = I2;
        for (ScalarizeTable::iterator It = ArgIndices.begin();
             It->second != Operands; ++It, ++TheArg) {
          assert(It != ArgIndices.end() && "GEP not handled??");
        }

        std::string NewName = I->getName();
        for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
            NewName += "." + utostr(Operands[i]);
        }
        NewName += ".val";
        TheArg->setName(NewName);

        DEBUG(dbgs() << "*** Promoted agg argument '" << TheArg->getName()
              << "' of function '" << NF->getName() << "'\n");

        // All of the uses must be load instructions.  Replace them all with
        // the argument specified by ArgNo.
        while (!GEP->use_empty()) {
          LoadInst *L = cast<LoadInst>(GEP->user_back());
          L->replaceAllUsesWith(&*TheArg);
          L->eraseFromParent();
        }
        GEP->eraseFromParent();
      }
    }

    // Increment I2 past all of the arguments added for this promoted pointer.
    std::advance(I2, ArgIndices.size());
  }

  NF_CGN->stealCalledFunctionsFrom(CG[F]);
  
  // Now that the old function is dead, delete it.  If there is a dangling
  // reference to the CallgraphNode, just leave the dead function around for
  // someone else to nuke.
  CallGraphNode *CGN = CG[F];
  if (CGN->getNumReferences() == 0)
    delete CG.removeFunctionFromModule(CGN);
  else
    F->setLinkage(Function::ExternalLinkage);
  
  return NF_CGN;
}
Пример #5
0
MachineBasicBlock *
MachineBasicBlock::SplitCriticalEdge(MachineBasicBlock *Succ, Pass *P) {
  MachineFunction *MF = getParent();
  DebugLoc dl;  // FIXME: this is nowhere

  // We may need to update this's terminator, but we can't do that if
  // AnalyzeBranch fails. If this uses a jump table, we won't touch it.
  const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
  MachineBasicBlock *TBB = 0, *FBB = 0;
  SmallVector<MachineOperand, 4> Cond;
  if (TII->AnalyzeBranch(*this, TBB, FBB, Cond))
    return NULL;

  // Avoid bugpoint weirdness: A block may end with a conditional branch but
  // jumps to the same MBB is either case. We have duplicate CFG edges in that
  // case that we can't handle. Since this never happens in properly optimized
  // code, just skip those edges.
  if (TBB && TBB == FBB) {
    DEBUG(dbgs() << "Won't split critical edge after degenerate BB#"
                 << getNumber() << '\n');
    return NULL;
  }

  MachineBasicBlock *NMBB = MF->CreateMachineBasicBlock();
  MF->insert(llvm::next(MachineFunction::iterator(this)), NMBB);
  DEBUG(dbgs() << "Splitting critical edge:"
        " BB#" << getNumber()
        << " -- BB#" << NMBB->getNumber()
        << " -- BB#" << Succ->getNumber() << '\n');

  // On some targets like Mips, branches may kill virtual registers. Make sure
  // that LiveVariables is properly updated after updateTerminator replaces the
  // terminators.
  LiveVariables *LV = P->getAnalysisIfAvailable<LiveVariables>();

  // Collect a list of virtual registers killed by the terminators.
  SmallVector<unsigned, 4> KilledRegs;
  if (LV)
    for (instr_iterator I = getFirstInstrTerminator(), E = instr_end();
         I != E; ++I) {
      MachineInstr *MI = I;
      for (MachineInstr::mop_iterator OI = MI->operands_begin(),
           OE = MI->operands_end(); OI != OE; ++OI) {
        if (!OI->isReg() || OI->getReg() == 0 ||
            !OI->isUse() || !OI->isKill() || OI->isUndef())
          continue;
        unsigned Reg = OI->getReg();
        if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
            LV->getVarInfo(Reg).removeKill(MI)) {
          KilledRegs.push_back(Reg);
          DEBUG(dbgs() << "Removing terminator kill: " << *MI);
          OI->setIsKill(false);
        }
      }
    }

  ReplaceUsesOfBlockWith(Succ, NMBB);
  updateTerminator();

  // Insert unconditional "jump Succ" instruction in NMBB if necessary.
  NMBB->addSuccessor(Succ);
  if (!NMBB->isLayoutSuccessor(Succ)) {
    Cond.clear();
    MF->getTarget().getInstrInfo()->InsertBranch(*NMBB, Succ, NULL, Cond, dl);
  }

  // Fix PHI nodes in Succ so they refer to NMBB instead of this
  for (MachineBasicBlock::instr_iterator
         i = Succ->instr_begin(),e = Succ->instr_end();
       i != e && i->isPHI(); ++i)
    for (unsigned ni = 1, ne = i->getNumOperands(); ni != ne; ni += 2)
      if (i->getOperand(ni+1).getMBB() == this)
        i->getOperand(ni+1).setMBB(NMBB);

  // Inherit live-ins from the successor
  for (MachineBasicBlock::livein_iterator I = Succ->livein_begin(),
	 E = Succ->livein_end(); I != E; ++I)
    NMBB->addLiveIn(*I);

  // Update LiveVariables.
  const TargetRegisterInfo *TRI = MF->getTarget().getRegisterInfo();
  if (LV) {
    // Restore kills of virtual registers that were killed by the terminators.
    while (!KilledRegs.empty()) {
      unsigned Reg = KilledRegs.pop_back_val();
      for (instr_iterator I = instr_end(), E = instr_begin(); I != E;) {
        if (!(--I)->addRegisterKilled(Reg, TRI, /* addIfNotFound= */ false))
          continue;
        if (TargetRegisterInfo::isVirtualRegister(Reg))
          LV->getVarInfo(Reg).Kills.push_back(I);
        DEBUG(dbgs() << "Restored terminator kill: " << *I);
        break;
      }
    }
    // Update relevant live-through information.
    LV->addNewBlock(NMBB, this, Succ);
  }

  if (MachineDominatorTree *MDT =
      P->getAnalysisIfAvailable<MachineDominatorTree>()) {
    // Update dominator information.
    MachineDomTreeNode *SucccDTNode = MDT->getNode(Succ);

    bool IsNewIDom = true;
    for (const_pred_iterator PI = Succ->pred_begin(), E = Succ->pred_end();
         PI != E; ++PI) {
      MachineBasicBlock *PredBB = *PI;
      if (PredBB == NMBB)
        continue;
      if (!MDT->dominates(SucccDTNode, MDT->getNode(PredBB))) {
        IsNewIDom = false;
        break;
      }
    }

    // We know "this" dominates the newly created basic block.
    MachineDomTreeNode *NewDTNode = MDT->addNewBlock(NMBB, this);

    // If all the other predecessors of "Succ" are dominated by "Succ" itself
    // then the new block is the new immediate dominator of "Succ". Otherwise,
    // the new block doesn't dominate anything.
    if (IsNewIDom)
      MDT->changeImmediateDominator(SucccDTNode, NewDTNode);
  }

  if (MachineLoopInfo *MLI = P->getAnalysisIfAvailable<MachineLoopInfo>())
    if (MachineLoop *TIL = MLI->getLoopFor(this)) {
      // If one or the other blocks were not in a loop, the new block is not
      // either, and thus LI doesn't need to be updated.
      if (MachineLoop *DestLoop = MLI->getLoopFor(Succ)) {
        if (TIL == DestLoop) {
          // Both in the same loop, the NMBB joins loop.
          DestLoop->addBasicBlockToLoop(NMBB, MLI->getBase());
        } else if (TIL->contains(DestLoop)) {
          // Edge from an outer loop to an inner loop.  Add to the outer loop.
          TIL->addBasicBlockToLoop(NMBB, MLI->getBase());
        } else if (DestLoop->contains(TIL)) {
          // Edge from an inner loop to an outer loop.  Add to the outer loop.
          DestLoop->addBasicBlockToLoop(NMBB, MLI->getBase());
        } else {
          // Edge from two loops with no containment relation.  Because these
          // are natural loops, we know that the destination block must be the
          // header of its loop (adding a branch into a loop elsewhere would
          // create an irreducible loop).
          assert(DestLoop->getHeader() == Succ &&
                 "Should not create irreducible loops!");
          if (MachineLoop *P = DestLoop->getParentLoop())
            P->addBasicBlockToLoop(NMBB, MLI->getBase());
        }
      }
    }

  return NMBB;
}
Пример #6
0
void Module::getExportedModules(SmallVectorImpl<Module *> &Exported) const {
  // All non-explicit submodules are exported.
  for (std::vector<Module *>::const_iterator I = SubModules.begin(),
                                             E = SubModules.end();
       I != E; ++I) {
    Module *Mod = *I;
    if (!Mod->IsExplicit)
      Exported.push_back(Mod);
  }

  // Find re-exported modules by filtering the list of imported modules.
  bool AnyWildcard = false;
  bool UnrestrictedWildcard = false;
  SmallVector<Module *, 4> WildcardRestrictions;
  for (unsigned I = 0, N = Exports.size(); I != N; ++I) {
    Module *Mod = Exports[I].getPointer();
    if (!Exports[I].getInt()) {
      // Export a named module directly; no wildcards involved.
      Exported.push_back(Mod);

      continue;
    }

    // Wildcard export: export all of the imported modules that match
    // the given pattern.
    AnyWildcard = true;
    if (UnrestrictedWildcard)
      continue;

    if (Module *Restriction = Exports[I].getPointer())
      WildcardRestrictions.push_back(Restriction);
    else {
      WildcardRestrictions.clear();
      UnrestrictedWildcard = true;
    }
  }

  // If there were any wildcards, push any imported modules that were
  // re-exported by the wildcard restriction.
  if (!AnyWildcard)
    return;

  for (unsigned I = 0, N = Imports.size(); I != N; ++I) {
    Module *Mod = Imports[I];
    bool Acceptable = UnrestrictedWildcard;
    if (!Acceptable) {
      // Check whether this module meets one of the restrictions.
      for (unsigned R = 0, NR = WildcardRestrictions.size(); R != NR; ++R) {
        Module *Restriction = WildcardRestrictions[R];
        if (Mod == Restriction || Mod->isSubModuleOf(Restriction)) {
          Acceptable = true;
          break;
        }
      }
    }

    if (!Acceptable)
      continue;

    Exported.push_back(Mod);
  }
}
Пример #7
0
int main(int argc_, const char **argv_) {
  llvm::sys::PrintStackTraceOnErrorSignal();
  llvm::PrettyStackTraceProgram X(argc_, argv_);

  if (llvm::sys::Process::FixupStandardFileDescriptors())
    return 1;

  SmallVector<const char *, 256> argv;
  llvm::SpecificBumpPtrAllocator<char> ArgAllocator;
  std::error_code EC = llvm::sys::Process::GetArgumentVector(
      argv, llvm::makeArrayRef(argv_, argc_), ArgAllocator);
  if (EC) {
    llvm::errs() << "error: couldn't get arguments: " << EC.message() << '\n';
    return 1;
  }

  std::string ProgName = normalizeProgramName(argv[0]);
  const DriverSuffix *DS = parseDriverSuffix(ProgName);

  llvm::BumpPtrAllocator A;
  llvm::StringSaver Saver(A);

  // Parse response files using the GNU syntax, unless we're in CL mode. There
  // are two ways to put clang in CL compatibility mode: argv[0] is either
  // clang-cl or cl, or --driver-mode=cl is on the command line. The normal
  // command line parsing can't happen until after response file parsing, so we
  // have to manually search for a --driver-mode=cl argument the hard way.
  // Finally, our -cc1 tools don't care which tokenization mode we use because
  // response files written by clang will tokenize the same way in either mode.
  llvm::cl::TokenizerCallback Tokenizer = &llvm::cl::TokenizeGNUCommandLine;
  if ((DS && DS->ModeFlag && strcmp(DS->ModeFlag, "--driver-mode=cl") == 0) ||
      std::find_if(argv.begin(), argv.end(), [](const char *F) {
        return F && strcmp(F, "--driver-mode=cl") == 0;
      }) != argv.end()) {
    Tokenizer = &llvm::cl::TokenizeWindowsCommandLine;
  }

  // Determines whether we want nullptr markers in argv to indicate response
  // files end-of-lines. We only use this for the /LINK driver argument.
  bool MarkEOLs = true;
  if (argv.size() > 1 && StringRef(argv[1]).startswith("-cc1"))
    MarkEOLs = false;
  llvm::cl::ExpandResponseFiles(Saver, Tokenizer, argv, MarkEOLs);

  // Handle -cc1 integrated tools, even if -cc1 was expanded from a response
  // file.
  auto FirstArg = std::find_if(argv.begin() + 1, argv.end(),
                               [](const char *A) { return A != nullptr; });
  if (FirstArg != argv.end() && StringRef(*FirstArg).startswith("-cc1")) {
    // If -cc1 came from a response file, remove the EOL sentinels.
    if (MarkEOLs) {
      auto newEnd = std::remove(argv.begin(), argv.end(), nullptr);
      argv.resize(newEnd - argv.begin());
    }
    return ExecuteCC1Tool(argv, argv[1] + 4);
  }

  bool CanonicalPrefixes = true;
  for (int i = 1, size = argv.size(); i < size; ++i) {
    // Skip end-of-line response file markers
    if (argv[i] == nullptr)
      continue;
    if (StringRef(argv[i]) == "-no-canonical-prefixes") {
      CanonicalPrefixes = false;
      break;
    }
  }

  // Handle CL and _CL_ which permits additional command line options to be
  // prepended or appended.
  if (Tokenizer == &llvm::cl::TokenizeWindowsCommandLine) {
    // Arguments in "CL" are prepended.
    llvm::Optional<std::string> OptCL = llvm::sys::Process::GetEnv("CL");
    if (OptCL.hasValue()) {
      SmallVector<const char *, 8> PrependedOpts;
      getCLEnvVarOptions(OptCL.getValue(), Saver, PrependedOpts);

      // Insert right after the program name to prepend to the argument list.
      argv.insert(argv.begin() + 1, PrependedOpts.begin(), PrependedOpts.end());
    }
    // Arguments in "_CL_" are appended.
    llvm::Optional<std::string> Opt_CL_ = llvm::sys::Process::GetEnv("_CL_");
    if (Opt_CL_.hasValue()) {
      SmallVector<const char *, 8> AppendedOpts;
      getCLEnvVarOptions(Opt_CL_.getValue(), Saver, AppendedOpts);

      // Insert at the end of the argument list to append.
      argv.append(AppendedOpts.begin(), AppendedOpts.end());
    }
  }

  std::set<std::string> SavedStrings;
  // Handle CCC_OVERRIDE_OPTIONS, used for editing a command line behind the
  // scenes.
  if (const char *OverrideStr = ::getenv("CCC_OVERRIDE_OPTIONS")) {
    // FIXME: Driver shouldn't take extra initial argument.
    ApplyQAOverride(argv, OverrideStr, SavedStrings);
  }

  std::string Path = GetExecutablePath(argv[0], CanonicalPrefixes);

  IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts =
      CreateAndPopulateDiagOpts(argv);

  TextDiagnosticPrinter *DiagClient
    = new TextDiagnosticPrinter(llvm::errs(), &*DiagOpts);
  FixupDiagPrefixExeName(DiagClient, Path);

  IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());

  DiagnosticsEngine Diags(DiagID, &*DiagOpts, DiagClient);

  if (!DiagOpts->DiagnosticSerializationFile.empty()) {
    auto SerializedConsumer =
        clang::serialized_diags::create(DiagOpts->DiagnosticSerializationFile,
                                        &*DiagOpts, /*MergeChildRecords=*/true);
    Diags.setClient(new ChainedDiagnosticConsumer(
        Diags.takeClient(), std::move(SerializedConsumer)));
  }

  ProcessWarningOptions(Diags, *DiagOpts, /*ReportDiags=*/false);

  Driver TheDriver(Path, llvm::sys::getDefaultTargetTriple(), Diags);
  SetInstallDir(argv, TheDriver, CanonicalPrefixes);

  llvm::InitializeAllTargets();
  insertArgsFromProgramName(ProgName, DS, argv, SavedStrings);

  SetBackdoorDriverOutputsFromEnvVars(TheDriver);

  std::unique_ptr<Compilation> C(TheDriver.BuildCompilation(argv));
  int Res = 0;
  SmallVector<std::pair<int, const Command *>, 4> FailingCommands;
  if (C.get())
    Res = TheDriver.ExecuteCompilation(*C, FailingCommands);

  // Force a crash to test the diagnostics.
  if (::getenv("FORCE_CLANG_DIAGNOSTICS_CRASH")) {
    Diags.Report(diag::err_drv_force_crash) << "FORCE_CLANG_DIAGNOSTICS_CRASH";

    // Pretend that every command failed.
    FailingCommands.clear();
    for (const auto &J : C->getJobs())
      if (const Command *C = dyn_cast<Command>(&J))
        FailingCommands.push_back(std::make_pair(-1, C));
  }

  for (const auto &P : FailingCommands) {
    int CommandRes = P.first;
    const Command *FailingCommand = P.second;
    if (!Res)
      Res = CommandRes;

    // If result status is < 0, then the driver command signalled an error.
    // If result status is 70, then the driver command reported a fatal error.
    // On Windows, abort will return an exit code of 3.  In these cases,
    // generate additional diagnostic information if possible.
    bool DiagnoseCrash = CommandRes < 0 || CommandRes == 70;
#ifdef LLVM_ON_WIN32
    DiagnoseCrash |= CommandRes == 3;
#endif
    if (DiagnoseCrash) {
      TheDriver.generateCompilationDiagnostics(*C, *FailingCommand);
      break;
    }
  }

  Diags.getClient()->finish();

  // If any timers were active but haven't been destroyed yet, print their
  // results now.  This happens in -disable-free mode.
  llvm::TimerGroup::printAll(llvm::errs());

  llvm::llvm_shutdown();

#ifdef LLVM_ON_WIN32
  // Exit status should not be negative on Win32, unless abnormal termination.
  // Once abnormal termiation was caught, negative status should not be
  // propagated.
  if (Res < 0)
    Res = 1;
#endif

  // If we have multiple failing commands, we return the result of the first
  // failing command.
  return Res;
}
Пример #8
0
int Compilation::performJobsImpl() {
  // Create a TaskQueue for execution.
  std::unique_ptr<TaskQueue> TQ;
  if (SkipTaskExecution)
    TQ.reset(new DummyTaskQueue(NumberOfParallelCommands));
  else
    TQ.reset(new TaskQueue(NumberOfParallelCommands));

  PerformJobsState State;

  using DependencyGraph = DependencyGraph<const Job *>;
  DependencyGraph DepGraph;
  SmallPtrSet<const Job *, 16> DeferredCommands;
  SmallVector<const Job *, 16> InitialOutOfDateCommands;

  DependencyGraph::MarkTracer ActualIncrementalTracer;
  DependencyGraph::MarkTracer *IncrementalTracer = nullptr;
  if (ShowIncrementalBuildDecisions)
    IncrementalTracer = &ActualIncrementalTracer;

  auto noteBuilding = [&] (const Job *cmd, StringRef reason) {
    if (!ShowIncrementalBuildDecisions)
      return;
    if (State.ScheduledCommands.count(cmd))
      return;
    llvm::outs() << "Queuing "
                 << llvm::sys::path::filename(cmd->getOutput().getBaseInput(0))
                 << " " << reason << "\n";
    IncrementalTracer->printPath(llvm::outs(), cmd,
                                 [](raw_ostream &out, const Job *base) {
      out << llvm::sys::path::filename(base->getOutput().getBaseInput(0));
    });
  };

  // Set up scheduleCommandIfNecessaryAndPossible.
  // This will only schedule the given command if it has not been scheduled
  // and if all of its inputs are in FinishedCommands.
  auto scheduleCommandIfNecessaryAndPossible = [&] (const Job *Cmd) {
    if (State.ScheduledCommands.count(Cmd))
      return;

    if (auto Blocking = findUnfinishedJob(Cmd->getInputs(),
                                          State.FinishedCommands)) {
      State.BlockingCommands[Blocking].push_back(Cmd);
      return;
    }

    assert(Cmd->getExtraEnvironment().empty() &&
           "not implemented for compilations with multiple jobs");
    State.ScheduledCommands.insert(Cmd);
    TQ->addTask(Cmd->getExecutable(), Cmd->getArguments(), llvm::None,
                (void *)Cmd);
  };

  // When a task finishes, we need to reevaluate the other commands that
  // might have been blocked.
  auto markFinished = [&] (const Job *Cmd) {
    State.FinishedCommands.insert(Cmd);

    auto BlockedIter = State.BlockingCommands.find(Cmd);
    if (BlockedIter != State.BlockingCommands.end()) {
      auto AllBlocked = std::move(BlockedIter->second);
      State.BlockingCommands.erase(BlockedIter);
      for (auto *Blocked : AllBlocked)
        scheduleCommandIfNecessaryAndPossible(Blocked);
    }
  };

  // Schedule all jobs we can.
  for (const Job *Cmd : getJobs()) {
    if (!getIncrementalBuildEnabled()) {
      scheduleCommandIfNecessaryAndPossible(Cmd);
      continue;
    }

    // Try to load the dependencies file for this job. If there isn't one, we
    // always have to run the job, but it doesn't affect any other jobs. If
    // there should be one but it's not present or can't be loaded, we have to
    // run all the jobs.
    // FIXME: We can probably do better here!
    Job::Condition Condition = Job::Condition::Always;
    StringRef DependenciesFile =
      Cmd->getOutput().getAdditionalOutputForType(types::TY_SwiftDeps);
    if (!DependenciesFile.empty()) {
      if (Cmd->getCondition() == Job::Condition::NewlyAdded) {
        DepGraph.addIndependentNode(Cmd);
      } else {
        switch (DepGraph.loadFromPath(Cmd, DependenciesFile)) {
        case DependencyGraphImpl::LoadResult::HadError:
          disableIncrementalBuild();
          for (const Job *Cmd : DeferredCommands)
            scheduleCommandIfNecessaryAndPossible(Cmd);
          DeferredCommands.clear();
          break;
        case DependencyGraphImpl::LoadResult::UpToDate:
          Condition = Cmd->getCondition();
          break;
        case DependencyGraphImpl::LoadResult::AffectsDownstream:
          llvm_unreachable("we haven't marked anything in this graph yet");
        }
      }
    }

    switch (Condition) {
    case Job::Condition::Always:
      if (getIncrementalBuildEnabled() && !DependenciesFile.empty()) {
        InitialOutOfDateCommands.push_back(Cmd);
        DepGraph.markIntransitive(Cmd);
      }
      SWIFT_FALLTHROUGH;
    case Job::Condition::RunWithoutCascading:
      noteBuilding(Cmd, "(initial)");
      scheduleCommandIfNecessaryAndPossible(Cmd);
      break;
    case Job::Condition::CheckDependencies:
      DeferredCommands.insert(Cmd);
      break;
    case Job::Condition::NewlyAdded:
      llvm_unreachable("handled above");
    }
  }

  if (getIncrementalBuildEnabled()) {
    SmallVector<const Job *, 16> AdditionalOutOfDateCommands;

    // We scheduled all of the files that have actually changed. Now add the
    // files that haven't changed, so that they'll get built in parallel if
    // possible and after the first set of files if it's not.
    for (auto *Cmd : InitialOutOfDateCommands) {
      DepGraph.markTransitive(AdditionalOutOfDateCommands, Cmd,
                              IncrementalTracer);
    }

    for (auto *transitiveCmd : AdditionalOutOfDateCommands)
      noteBuilding(transitiveCmd, "because of the initial set:");
    size_t firstSize = AdditionalOutOfDateCommands.size();

    // Check all cross-module dependencies as well.
    for (StringRef dependency : DepGraph.getExternalDependencies()) {
      llvm::sys::fs::file_status depStatus;
      if (!llvm::sys::fs::status(dependency, depStatus))
        if (depStatus.getLastModificationTime() < LastBuildTime)
          continue;

      // If the dependency has been modified since the oldest built file,
      // or if we can't stat it for some reason (perhaps it's been deleted?),
      // trigger rebuilds through the dependency graph.
      DepGraph.markExternal(AdditionalOutOfDateCommands, dependency);
    }

    for (auto *externalCmd :
            llvm::makeArrayRef(AdditionalOutOfDateCommands).slice(firstSize)) {
      noteBuilding(externalCmd, "because of external dependencies");
    }

    for (auto *AdditionalCmd : AdditionalOutOfDateCommands) {
      if (!DeferredCommands.count(AdditionalCmd))
        continue;
      scheduleCommandIfNecessaryAndPossible(AdditionalCmd);
      DeferredCommands.erase(AdditionalCmd);
    }
  }

  int Result = EXIT_SUCCESS;

  // Set up a callback which will be called immediately after a task has
  // started. This callback may be used to provide output indicating that the
  // task began.
  auto taskBegan = [this] (ProcessId Pid, void *Context) {
    // TODO: properly handle task began.
    const Job *BeganCmd = (const Job *)Context;

    // For verbose output, print out each command as it begins execution.
    if (Level == OutputLevel::Verbose)
      BeganCmd->printCommandLine(llvm::errs());
    else if (Level == OutputLevel::Parseable)
      parseable_output::emitBeganMessage(llvm::errs(), *BeganCmd, Pid);
  };

  // Set up a callback which will be called immediately after a task has
  // finished execution. This callback should determine if execution should
  // continue (if execution should stop, this callback should return true), and
  // it should also schedule any additional commands which we now know need
  // to run.
  auto taskFinished = [&] (ProcessId Pid, int ReturnCode, StringRef Output,
                           void *Context) -> TaskFinishedResponse {
    const Job *FinishedCmd = (const Job *)Context;

    if (Level == OutputLevel::Parseable) {
      // Parseable output was requested.
      parseable_output::emitFinishedMessage(llvm::errs(), *FinishedCmd, Pid,
                                            ReturnCode, Output);
    } else {
      // Otherwise, send the buffered output to stderr, though only if we
      // support getting buffered output.
      if (TaskQueue::supportsBufferingOutput())
        llvm::errs() << Output;
    }

    if (ReturnCode != EXIT_SUCCESS) {
      // The task failed, so return true without performing any further
      // dependency analysis.

      // Store this task's ReturnCode as our Result if we haven't stored
      // anything yet.
      if (Result == EXIT_SUCCESS)
        Result = ReturnCode;

      if (!isa<CompileJobAction>(FinishedCmd->getSource()) ||
          ReturnCode != EXIT_FAILURE) {
        Diags.diagnose(SourceLoc(), diag::error_command_failed,
                       FinishedCmd->getSource().getClassName(),
                       ReturnCode);
      }

      return ContinueBuildingAfterErrors ?
          TaskFinishedResponse::ContinueExecution :
          TaskFinishedResponse::StopExecution;
    }

    // When a task finishes, we need to reevaluate the other commands that
    // might have been blocked.
    markFinished(FinishedCmd);

    // In order to handle both old dependencies that have disappeared and new
    // dependencies that have arisen, we need to reload the dependency file.
    if (getIncrementalBuildEnabled()) {
      const CommandOutput &Output = FinishedCmd->getOutput();
      StringRef DependenciesFile =
        Output.getAdditionalOutputForType(types::TY_SwiftDeps);
      if (!DependenciesFile.empty()) {
        SmallVector<const Job *, 16> Dependents;
        bool wasCascading = DepGraph.isMarked(FinishedCmd);

        switch (DepGraph.loadFromPath(FinishedCmd, DependenciesFile)) {
        case DependencyGraphImpl::LoadResult::HadError:
          disableIncrementalBuild();
          for (const Job *Cmd : DeferredCommands)
            scheduleCommandIfNecessaryAndPossible(Cmd);
          DeferredCommands.clear();
          Dependents.clear();
          break;
        case DependencyGraphImpl::LoadResult::UpToDate:
          if (!wasCascading)
            break;
          SWIFT_FALLTHROUGH;
        case DependencyGraphImpl::LoadResult::AffectsDownstream:
          DepGraph.markTransitive(Dependents, FinishedCmd);
          break;
        }

        for (const Job *Cmd : Dependents) {
          DeferredCommands.erase(Cmd);
          noteBuilding(Cmd, "because of dependencies discovered later");
          scheduleCommandIfNecessaryAndPossible(Cmd);
        }
      }
    }

    return TaskFinishedResponse::ContinueExecution;
  };

  auto taskSignalled = [&] (ProcessId Pid, StringRef ErrorMsg, StringRef Output,
                            void *Context) -> TaskFinishedResponse {
    const Job *SignalledCmd = (const Job *)Context;

    if (Level == OutputLevel::Parseable) {
      // Parseable output was requested.
      parseable_output::emitSignalledMessage(llvm::errs(), *SignalledCmd, Pid,
                                             ErrorMsg, Output);
    } else {
      // Otherwise, send the buffered output to stderr, though only if we
      // support getting buffered output.
      if (TaskQueue::supportsBufferingOutput())
        llvm::errs() << Output;
    }

    if (!ErrorMsg.empty())
      Diags.diagnose(SourceLoc(), diag::error_unable_to_execute_command,
                     ErrorMsg);

    Diags.diagnose(SourceLoc(), diag::error_command_signalled,
                   SignalledCmd->getSource().getClassName());

    // Since the task signalled, unconditionally set result to -2.
    Result = -2;

    return TaskFinishedResponse::StopExecution;
  };

  do {
    // Ask the TaskQueue to execute.
    TQ->execute(taskBegan, taskFinished, taskSignalled);

    // Mark all remaining deferred commands as skipped.
    for (const Job *Cmd : DeferredCommands) {
      if (Level == OutputLevel::Parseable) {
        // Provide output indicating this command was skipped if parseable output
        // was requested.
        parseable_output::emitSkippedMessage(llvm::errs(), *Cmd);
      }

      State.ScheduledCommands.insert(Cmd);
      markFinished(Cmd);
    }

    // ...which may allow us to go on and do later tasks.
  } while (Result == 0 && TQ->hasRemainingTasks());

  if (Result == 0) {
    assert(State.BlockingCommands.empty() &&
           "some blocking commands never finished properly");
  } else {
    // Make sure we record any files that still need to be rebuilt.
    for (const Job *Cmd : getJobs()) {
      // Skip files that don't use dependency analysis.
      StringRef DependenciesFile =
          Cmd->getOutput().getAdditionalOutputForType(types::TY_SwiftDeps);
      if (DependenciesFile.empty())
        continue;

      // Don't worry about commands that finished or weren't going to run.
      if (State.FinishedCommands.count(Cmd))
        continue;
      if (!State.ScheduledCommands.count(Cmd))
        continue;

      bool isCascading = true;
      if (getIncrementalBuildEnabled())
        isCascading = DepGraph.isMarked(Cmd);
      State.UnfinishedCommands.insert({Cmd, isCascading});
    }
  }

  if (!CompilationRecordPath.empty() && !SkipTaskExecution) {
    InputInfoMap InputInfo;
    populateInputInfoMap(InputInfo, State);
    checkForOutOfDateInputs(Diags, InputInfo);
    writeCompilationRecord(CompilationRecordPath, ArgsHash, BuildStartTime,
                           InputInfo);
  }

  if (Result == 0)
    Result = Diags.hadAnyError();
  return Result;
}
/// IsProfitableToFoldIntoAddressingMode - It is possible for the addressing
/// mode of the machine to fold the specified instruction into a load or store
/// that ultimately uses it.  However, the specified instruction has multiple
/// uses.  Given this, it may actually increase register pressure to fold it
/// into the load.  For example, consider this code:
///
///     X = ...
///     Y = X+1
///     use(Y)   -> nonload/store
///     Z = Y+1
///     load Z
///
/// In this case, Y has multiple uses, and can be folded into the load of Z
/// (yielding load [X+2]).  However, doing this will cause both "X" and "X+1" to
/// be live at the use(Y) line.  If we don't fold Y into load Z, we use one
/// fewer register.  Since Y can't be folded into "use(Y)" we don't increase the
/// number of computations either.
///
/// Note that this (like most of CodeGenPrepare) is just a rough heuristic.  If
/// X was live across 'load Z' for other reasons, we actually *would* want to
/// fold the addressing mode in the Z case.  This would make Y die earlier.
bool AddressingModeMatcher::
IsProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore,
                                     ExtAddrMode &AMAfter) {
  if (IgnoreProfitability) return true;
  
  // AMBefore is the addressing mode before this instruction was folded into it,
  // and AMAfter is the addressing mode after the instruction was folded.  Get
  // the set of registers referenced by AMAfter and subtract out those
  // referenced by AMBefore: this is the set of values which folding in this
  // address extends the lifetime of.
  //
  // Note that there are only two potential values being referenced here,
  // BaseReg and ScaleReg (global addresses are always available, as are any
  // folded immediates).
  Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg;
  
  // If the BaseReg or ScaledReg was referenced by the previous addrmode, their
  // lifetime wasn't extended by adding this instruction.
  if (ValueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg))
    BaseReg = 0;
  if (ValueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg))
    ScaledReg = 0;

  // If folding this instruction (and it's subexprs) didn't extend any live
  // ranges, we're ok with it.
  if (BaseReg == 0 && ScaledReg == 0)
    return true;

  // If all uses of this instruction are ultimately load/store/inlineasm's,
  // check to see if their addressing modes will include this instruction.  If
  // so, we can fold it into all uses, so it doesn't matter if it has multiple
  // uses.
  SmallVector<std::pair<Instruction*,unsigned>, 16> MemoryUses;
  SmallPtrSet<Instruction*, 16> ConsideredInsts;
  if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI))
    return false;  // Has a non-memory, non-foldable use!
  
  // Now that we know that all uses of this instruction are part of a chain of
  // computation involving only operations that could theoretically be folded
  // into a memory use, loop over each of these uses and see if they could
  // *actually* fold the instruction.
  SmallVector<Instruction*, 32> MatchedAddrModeInsts;
  for (unsigned i = 0, e = MemoryUses.size(); i != e; ++i) {
    Instruction *User = MemoryUses[i].first;
    unsigned OpNo = MemoryUses[i].second;
    
    // Get the access type of this use.  If the use isn't a pointer, we don't
    // know what it accesses.
    Value *Address = User->getOperand(OpNo);
    if (!Address->getType()->isPointerTy())
      return false;
    const Type *AddressAccessTy =
      cast<PointerType>(Address->getType())->getElementType();
    
    // Do a match against the root of this address, ignoring profitability. This
    // will tell us if the addressing mode for the memory operation will
    // *actually* cover the shared instruction.
    ExtAddrMode Result;
    AddressingModeMatcher Matcher(MatchedAddrModeInsts, TLI, AddressAccessTy,
                                  MemoryInst, Result);
    Matcher.IgnoreProfitability = true;
    bool Success = Matcher.MatchAddr(Address, 0);
    Success = Success; assert(Success && "Couldn't select *anything*?");

    // If the match didn't cover I, then it won't be shared by it.
    if (std::find(MatchedAddrModeInsts.begin(), MatchedAddrModeInsts.end(),
                  I) == MatchedAddrModeInsts.end())
      return false;
    
    MatchedAddrModeInsts.clear();
  }
  
  return true;
}
Пример #10
0
/// updateCallSites - Update all sites that call F to use NF.
CallGraphNode *SRETPromotion::updateCallSites(Function *F, Function *NF) {
  CallGraph &CG = getAnalysis<CallGraph>();
  SmallVector<Value*, 16> Args;

  // Attributes - Keep track of the parameter attributes for the arguments.
  SmallVector<AttributeWithIndex, 8> ArgAttrsVec;

  // Get a new callgraph node for NF.
  CallGraphNode *NF_CGN = CG.getOrInsertFunction(NF);

  while (!F->use_empty()) {
    CallSite CS = CallSite::get(*F->use_begin());
    Instruction *Call = CS.getInstruction();

    const AttrListPtr &PAL = F->getAttributes();
    // Add any return attributes.
    if (Attributes attrs = PAL.getRetAttributes())
      ArgAttrsVec.push_back(AttributeWithIndex::get(0, attrs));

    // Copy arguments, however skip first one.
    CallSite::arg_iterator AI = CS.arg_begin(), AE = CS.arg_end();
    Value *FirstCArg = *AI;
    ++AI;
    // 0th parameter attribute is reserved for return type.
    // 1th parameter attribute is for first 1st sret argument.
    unsigned ParamIndex = 2; 
    while (AI != AE) {
      Args.push_back(*AI); 
      if (Attributes Attrs = PAL.getParamAttributes(ParamIndex))
        ArgAttrsVec.push_back(AttributeWithIndex::get(ParamIndex - 1, Attrs));
      ++ParamIndex;
      ++AI;
    }

    // Add any function attributes.
    if (Attributes attrs = PAL.getFnAttributes())
      ArgAttrsVec.push_back(AttributeWithIndex::get(~0, attrs));
    
    AttrListPtr NewPAL = AttrListPtr::get(ArgAttrsVec.begin(), ArgAttrsVec.end());
    
    // Build new call instruction.
    Instruction *New;
    if (InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
      New = InvokeInst::Create(NF, II->getNormalDest(), II->getUnwindDest(),
                               Args.begin(), Args.end(), "", Call);
      cast<InvokeInst>(New)->setCallingConv(CS.getCallingConv());
      cast<InvokeInst>(New)->setAttributes(NewPAL);
    } else {
      New = CallInst::Create(NF, Args.begin(), Args.end(), "", Call);
      cast<CallInst>(New)->setCallingConv(CS.getCallingConv());
      cast<CallInst>(New)->setAttributes(NewPAL);
      if (cast<CallInst>(Call)->isTailCall())
        cast<CallInst>(New)->setTailCall();
    }
    Args.clear();
    ArgAttrsVec.clear();
    New->takeName(Call);

    // Update the callgraph to know that the callsite has been transformed.
    CallGraphNode *CalleeNode = CG[Call->getParent()->getParent()];
    CalleeNode->removeCallEdgeFor(Call);
    CalleeNode->addCalledFunction(New, NF_CGN);
    
    // Update all users of sret parameter to extract value using extractvalue.
    for (Value::use_iterator UI = FirstCArg->use_begin(), 
           UE = FirstCArg->use_end(); UI != UE; ) {
      User *U2 = *UI++;
      CallInst *C2 = dyn_cast<CallInst>(U2);
      if (C2 && (C2 == Call))
        continue;
      
      GetElementPtrInst *UGEP = cast<GetElementPtrInst>(U2);
      ConstantInt *Idx = cast<ConstantInt>(UGEP->getOperand(2));
      Value *GR = ExtractValueInst::Create(New, Idx->getZExtValue(),
                                           "evi", UGEP);
      while(!UGEP->use_empty()) {
        // isSafeToUpdateAllCallers has checked that all GEP uses are
        // LoadInsts
        LoadInst *L = cast<LoadInst>(*UGEP->use_begin());
        L->replaceAllUsesWith(GR);
        L->eraseFromParent();
      }
      UGEP->eraseFromParent();
      continue;
    }
    Call->eraseFromParent();
  }
  
  return NF_CGN;
}
Пример #11
0
static bool simplifyLoopInst(Loop &L, DominatorTree &DT, LoopInfo &LI,
                             AssumptionCache &AC, const TargetLibraryInfo &TLI,
                             MemorySSAUpdater *MSSAU) {
  const DataLayout &DL = L.getHeader()->getModule()->getDataLayout();
  SimplifyQuery SQ(DL, &TLI, &DT, &AC);

  // On the first pass over the loop body we try to simplify every instruction.
  // On subsequent passes, we can restrict this to only simplifying instructions
  // where the inputs have been updated. We end up needing two sets: one
  // containing the instructions we are simplifying in *this* pass, and one for
  // the instructions we will want to simplify in the *next* pass. We use
  // pointers so we can swap between two stably allocated sets.
  SmallPtrSet<const Instruction *, 8> S1, S2, *ToSimplify = &S1, *Next = &S2;

  // Track the PHI nodes that have already been visited during each iteration so
  // that we can identify when it is necessary to iterate.
  SmallPtrSet<PHINode *, 4> VisitedPHIs;

  // While simplifying we may discover dead code or cause code to become dead.
  // Keep track of all such instructions and we will delete them at the end.
  SmallVector<Instruction *, 8> DeadInsts;

  // First we want to create an RPO traversal of the loop body. By processing in
  // RPO we can ensure that definitions are processed prior to uses (for non PHI
  // uses) in all cases. This ensures we maximize the simplifications in each
  // iteration over the loop and minimizes the possible causes for continuing to
  // iterate.
  LoopBlocksRPO RPOT(&L);
  RPOT.perform(&LI);
  MemorySSA *MSSA = MSSAU ? MSSAU->getMemorySSA() : nullptr;

  bool Changed = false;
  for (;;) {
    if (MSSAU && VerifyMemorySSA)
      MSSA->verifyMemorySSA();
    for (BasicBlock *BB : RPOT) {
      for (Instruction &I : *BB) {
        if (auto *PI = dyn_cast<PHINode>(&I))
          VisitedPHIs.insert(PI);

        if (I.use_empty()) {
          if (isInstructionTriviallyDead(&I, &TLI))
            DeadInsts.push_back(&I);
          continue;
        }

        // We special case the first iteration which we can detect due to the
        // empty `ToSimplify` set.
        bool IsFirstIteration = ToSimplify->empty();

        if (!IsFirstIteration && !ToSimplify->count(&I))
          continue;

        Value *V = SimplifyInstruction(&I, SQ.getWithInstruction(&I));
        if (!V || !LI.replacementPreservesLCSSAForm(&I, V))
          continue;

        for (Value::use_iterator UI = I.use_begin(), UE = I.use_end();
             UI != UE;) {
          Use &U = *UI++;
          auto *UserI = cast<Instruction>(U.getUser());
          U.set(V);

          // If the instruction is used by a PHI node we have already processed
          // we'll need to iterate on the loop body to converge, so add it to
          // the next set.
          if (auto *UserPI = dyn_cast<PHINode>(UserI))
            if (VisitedPHIs.count(UserPI)) {
              Next->insert(UserPI);
              continue;
            }

          // If we are only simplifying targeted instructions and the user is an
          // instruction in the loop body, add it to our set of targeted
          // instructions. Because we process defs before uses (outside of PHIs)
          // we won't have visited it yet.
          //
          // We also skip any uses outside of the loop being simplified. Those
          // should always be PHI nodes due to LCSSA form, and we don't want to
          // try to simplify those away.
          assert((L.contains(UserI) || isa<PHINode>(UserI)) &&
                 "Uses outside the loop should be PHI nodes due to LCSSA!");
          if (!IsFirstIteration && L.contains(UserI))
            ToSimplify->insert(UserI);
        }

        if (MSSAU)
          if (Instruction *SimpleI = dyn_cast_or_null<Instruction>(V))
            if (MemoryAccess *MA = MSSA->getMemoryAccess(&I))
              if (MemoryAccess *ReplacementMA = MSSA->getMemoryAccess(SimpleI))
                MA->replaceAllUsesWith(ReplacementMA);

        assert(I.use_empty() && "Should always have replaced all uses!");
        if (isInstructionTriviallyDead(&I, &TLI))
          DeadInsts.push_back(&I);
        ++NumSimplified;
        Changed = true;
      }
    }

    // Delete any dead instructions found thus far now that we've finished an
    // iteration over all instructions in all the loop blocks.
    if (!DeadInsts.empty()) {
      Changed = true;
      RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, &TLI, MSSAU);
    }

    if (MSSAU && VerifyMemorySSA)
      MSSA->verifyMemorySSA();

    // If we never found a PHI that needs to be simplified in the next
    // iteration, we're done.
    if (Next->empty())
      break;

    // Otherwise, put the next set in place for the next iteration and reset it
    // and the visited PHIs for that iteration.
    std::swap(Next, ToSimplify);
    Next->clear();
    VisitedPHIs.clear();
    DeadInsts.clear();
  }

  return Changed;
}
Пример #12
0
/// processImplicitDefs - Process IMPLICIT_DEF instructions and make sure
/// there is one implicit_def for each use. Add isUndef marker to
/// implicit_def defs and their uses.
bool ProcessImplicitDefs::runOnMachineFunction(MachineFunction &fn) {

  DEBUG(dbgs() << "********** PROCESS IMPLICIT DEFS **********\n"
               << "********** Function: "
               << ((Value*)fn.getFunction())->getName() << '\n');

  bool Changed = false;

  TII = fn.getTarget().getInstrInfo();
  TRI = fn.getTarget().getRegisterInfo();
  MRI = &fn.getRegInfo();
  LV = &getAnalysis<LiveVariables>();

  SmallSet<unsigned, 8> ImpDefRegs;
  SmallVector<MachineInstr*, 8> ImpDefMIs;
  SmallVector<MachineInstr*, 4> RUses;
  SmallPtrSet<MachineBasicBlock*,16> Visited;
  SmallPtrSet<MachineInstr*, 8> ModInsts;

  MachineBasicBlock *Entry = fn.begin();
  for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*,16> >
         DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
       DFI != E; ++DFI) {
    MachineBasicBlock *MBB = *DFI;
    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
         I != E; ) {
      MachineInstr *MI = &*I;
      ++I;
      if (MI->isImplicitDef()) {
        ImpDefMIs.push_back(MI);
        // Is this a sub-register read-modify-write?
        if (MI->getOperand(0).readsReg())
          continue;
        unsigned Reg = MI->getOperand(0).getReg();
        ImpDefRegs.insert(Reg);
        if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
          for (const unsigned *SS = TRI->getSubRegisters(Reg); *SS; ++SS)
            ImpDefRegs.insert(*SS);
        }
        continue;
      }

      // Eliminate %reg1032:sub<def> = COPY undef.
      if (MI->isCopy() && MI->getOperand(0).readsReg()) {
        MachineOperand &MO = MI->getOperand(1);
        if (MO.isUndef() || ImpDefRegs.count(MO.getReg())) {
          if (MO.isKill()) {
            LiveVariables::VarInfo& vi = LV->getVarInfo(MO.getReg());
            vi.removeKill(MI);
          }
          unsigned Reg = MI->getOperand(0).getReg();
          MI->eraseFromParent();
          Changed = true;

          // A REG_SEQUENCE may have been expanded into partial definitions.
          // If this was the last one, mark Reg as implicitly defined.
          if (TargetRegisterInfo::isVirtualRegister(Reg) && MRI->def_empty(Reg))
            ImpDefRegs.insert(Reg);
          continue;
        }
      }

      bool ChangedToImpDef = false;
      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
        MachineOperand& MO = MI->getOperand(i);
        if (!MO.isReg() || !MO.readsReg())
          continue;
        unsigned Reg = MO.getReg();
        if (!Reg)
          continue;
        if (!ImpDefRegs.count(Reg))
          continue;
        // Use is a copy, just turn it into an implicit_def.
        if (CanTurnIntoImplicitDef(MI, Reg, i, ImpDefRegs)) {
          bool isKill = MO.isKill();
          MI->setDesc(TII->get(TargetOpcode::IMPLICIT_DEF));
          for (int j = MI->getNumOperands() - 1, ee = 0; j > ee; --j)
            MI->RemoveOperand(j);
          if (isKill) {
            ImpDefRegs.erase(Reg);
            LiveVariables::VarInfo& vi = LV->getVarInfo(Reg);
            vi.removeKill(MI);
          }
          ChangedToImpDef = true;
          Changed = true;
          break;
        }

        Changed = true;
        MO.setIsUndef();
        // This is a partial register redef of an implicit def.
        // Make sure the whole register is defined by the instruction.
        if (MO.isDef()) {
          MI->addRegisterDefined(Reg);
          continue;
        }
        if (MO.isKill() || MI->isRegTiedToDefOperand(i)) {
          // Make sure other reads of Reg are also marked <undef>.
          for (unsigned j = i+1; j != e; ++j) {
            MachineOperand &MOJ = MI->getOperand(j);
            if (MOJ.isReg() && MOJ.getReg() == Reg && MOJ.readsReg())
              MOJ.setIsUndef();
          }
          ImpDefRegs.erase(Reg);
        }
      }

      if (ChangedToImpDef) {
        // Backtrack to process this new implicit_def.
        --I;
      } else {
        for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
          MachineOperand& MO = MI->getOperand(i);
          if (!MO.isReg() || !MO.isDef())
            continue;
          ImpDefRegs.erase(MO.getReg());
        }
      }
    }

    // Any outstanding liveout implicit_def's?
    for (unsigned i = 0, e = ImpDefMIs.size(); i != e; ++i) {
      MachineInstr *MI = ImpDefMIs[i];
      unsigned Reg = MI->getOperand(0).getReg();
      if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
          !ImpDefRegs.count(Reg)) {
        // Delete all "local" implicit_def's. That include those which define
        // physical registers since they cannot be liveout.
        MI->eraseFromParent();
        Changed = true;
        continue;
      }

      // If there are multiple defs of the same register and at least one
      // is not an implicit_def, do not insert implicit_def's before the
      // uses.
      bool Skip = false;
      SmallVector<MachineInstr*, 4> DeadImpDefs;
      for (MachineRegisterInfo::def_iterator DI = MRI->def_begin(Reg),
             DE = MRI->def_end(); DI != DE; ++DI) {
        MachineInstr *DeadImpDef = &*DI;
        if (!DeadImpDef->isImplicitDef()) {
          Skip = true;
          break;
        }
        DeadImpDefs.push_back(DeadImpDef);
      }
      if (Skip)
        continue;

      // The only implicit_def which we want to keep are those that are live
      // out of its block.
      for (unsigned j = 0, ee = DeadImpDefs.size(); j != ee; ++j)
        DeadImpDefs[j]->eraseFromParent();
      Changed = true;

      // Process each use instruction once.
      for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(Reg),
             UE = MRI->use_end(); UI != UE; ++UI) {
        if (UI.getOperand().isUndef())
          continue;
        MachineInstr *RMI = &*UI;
        if (ModInsts.insert(RMI))
          RUses.push_back(RMI);
      }

      for (unsigned i = 0, e = RUses.size(); i != e; ++i) {
        MachineInstr *RMI = RUses[i];

        // Turn a copy use into an implicit_def.
        if (isUndefCopy(RMI, Reg, ImpDefRegs)) {
          RMI->setDesc(TII->get(TargetOpcode::IMPLICIT_DEF));

          bool isKill = false;
          SmallVector<unsigned, 4> Ops;
          for (unsigned j = 0, ee = RMI->getNumOperands(); j != ee; ++j) {
            MachineOperand &RRMO = RMI->getOperand(j);
            if (RRMO.isReg() && RRMO.getReg() == Reg) {
              Ops.push_back(j);
              if (RRMO.isKill())
                isKill = true;
            }
          }
          // Leave the other operands along.
          for (unsigned j = 0, ee = Ops.size(); j != ee; ++j) {
            unsigned OpIdx = Ops[j];
            RMI->RemoveOperand(OpIdx-j);
          }

          // Update LiveVariables varinfo if the instruction is a kill.
          if (isKill) {
            LiveVariables::VarInfo& vi = LV->getVarInfo(Reg);
            vi.removeKill(RMI);
          }
          continue;
        }

        // Replace Reg with a new vreg that's marked implicit.
        const TargetRegisterClass* RC = MRI->getRegClass(Reg);
        unsigned NewVReg = MRI->createVirtualRegister(RC);
        bool isKill = true;
        for (unsigned j = 0, ee = RMI->getNumOperands(); j != ee; ++j) {
          MachineOperand &RRMO = RMI->getOperand(j);
          if (RRMO.isReg() && RRMO.getReg() == Reg) {
            RRMO.setReg(NewVReg);
            RRMO.setIsUndef();
            if (isKill) {
              // Only the first operand of NewVReg is marked kill.
              RRMO.setIsKill();
              isKill = false;
            }
          }
        }
      }
      RUses.clear();
      ModInsts.clear();
    }
    ImpDefRegs.clear();
    ImpDefMIs.clear();
  }

  return Changed;
}
Пример #13
0
/// ValueEnumerator - Enumerate module-level information.
ValueEnumerator::ValueEnumerator(const Module *M) {
  // Enumerate the global variables.
  for (Module::const_global_iterator I = M->global_begin(),
         E = M->global_end(); I != E; ++I)
    EnumerateValue(I);

  // Enumerate the functions.
  for (Module::const_iterator I = M->begin(), E = M->end(); I != E; ++I) {
    EnumerateValue(I);
    EnumerateAttributes(cast<Function>(I)->getAttributes());
  }

  // Enumerate the aliases.
  for (Module::const_alias_iterator I = M->alias_begin(), E = M->alias_end();
       I != E; ++I)
    EnumerateValue(I);

  // Remember what is the cutoff between globalvalue's and other constants.
  unsigned FirstConstant = Values.size();

  // Enumerate the global variable initializers.
  for (Module::const_global_iterator I = M->global_begin(),
         E = M->global_end(); I != E; ++I)
    if (I->hasInitializer())
      EnumerateValue(I->getInitializer());

  // Enumerate the aliasees.
  for (Module::const_alias_iterator I = M->alias_begin(), E = M->alias_end();
       I != E; ++I)
    EnumerateValue(I->getAliasee());

  // Insert constants and metadata that are named at module level into the slot 
  // pool so that the module symbol table can refer to them...
  EnumerateValueSymbolTable(M->getValueSymbolTable());
  EnumerateNamedMetadata(M);

  SmallVector<std::pair<unsigned, MDNode*>, 8> MDs;

  // Enumerate types used by function bodies and argument lists.
  for (Module::const_iterator F = M->begin(), E = M->end(); F != E; ++F) {

    for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
         I != E; ++I)
      EnumerateType(I->getType());

    for (Function::const_iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
      for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I!=E;++I){
        for (User::const_op_iterator OI = I->op_begin(), E = I->op_end();
             OI != E; ++OI) {
          if (MDNode *MD = dyn_cast<MDNode>(*OI))
            if (MD->isFunctionLocal() && MD->getFunction())
              // These will get enumerated during function-incorporation.
              continue;
          EnumerateOperandType(*OI);
        }
        EnumerateType(I->getType());
        if (const CallInst *CI = dyn_cast<CallInst>(I))
          EnumerateAttributes(CI->getAttributes());
        else if (const InvokeInst *II = dyn_cast<InvokeInst>(I))
          EnumerateAttributes(II->getAttributes());

        // Enumerate metadata attached with this instruction.
        MDs.clear();
        I->getAllMetadataOtherThanDebugLoc(MDs);
        for (unsigned i = 0, e = MDs.size(); i != e; ++i)
          EnumerateMetadata(MDs[i].second);

        if (!I->getDebugLoc().isUnknown()) {
          MDNode *Scope, *IA;
          I->getDebugLoc().getScopeAndInlinedAt(Scope, IA, I->getContext());
          if (Scope) EnumerateMetadata(Scope);
          if (IA) EnumerateMetadata(IA);
        }
      }
  }

  // Optimize constant ordering.
  OptimizeConstants(FirstConstant, Values.size());
}
Пример #14
0
void ConnectedVNInfoEqClasses::Distribute(LiveInterval &LI, LiveInterval *LIV[],
                                          MachineRegisterInfo &MRI) {
  // Rewrite instructions.
  for (MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(LI.reg),
       RE = MRI.reg_end(); RI != RE;) {
    MachineOperand &MO = *RI;
    MachineInstr *MI = RI->getParent();
    ++RI;
    const VNInfo *VNI;
    if (MI->isDebugValue()) {
      // DBG_VALUE instructions don't have slot indexes, so get the index of
      // the instruction before them. The value is defined there too.
      SlotIndex Idx = LIS.getSlotIndexes()->getIndexBefore(*MI);
      VNI = LI.Query(Idx).valueOut();
    } else {
      SlotIndex Idx = LIS.getInstructionIndex(*MI);
      LiveQueryResult LRQ = LI.Query(Idx);
      VNI = MO.readsReg() ? LRQ.valueIn() : LRQ.valueDefined();
    }
    // In the case of an <undef> use that isn't tied to any def, VNI will be
    // NULL. If the use is tied to a def, VNI will be the defined value.
    if (!VNI)
      continue;
    if (unsigned EqClass = getEqClass(VNI))
      MO.setReg(LIV[EqClass-1]->reg);
  }

  // Distribute subregister liveranges.
  if (LI.hasSubRanges()) {
    unsigned NumComponents = EqClass.getNumClasses();
    SmallVector<unsigned, 8> VNIMapping;
    SmallVector<LiveInterval::SubRange*, 8> SubRanges;
    BumpPtrAllocator &Allocator = LIS.getVNInfoAllocator();
    for (LiveInterval::SubRange &SR : LI.subranges()) {
      // Create new subranges in the split intervals and construct a mapping
      // for the VNInfos in the subrange.
      unsigned NumValNos = SR.valnos.size();
      VNIMapping.clear();
      VNIMapping.reserve(NumValNos);
      SubRanges.clear();
      SubRanges.resize(NumComponents-1, nullptr);
      for (unsigned I = 0; I < NumValNos; ++I) {
        const VNInfo &VNI = *SR.valnos[I];
        unsigned ComponentNum;
        if (VNI.isUnused()) {
          ComponentNum = 0;
        } else {
          const VNInfo *MainRangeVNI = LI.getVNInfoAt(VNI.def);
          assert(MainRangeVNI != nullptr
                 && "SubRange def must have corresponding main range def");
          ComponentNum = getEqClass(MainRangeVNI);
          if (ComponentNum > 0 && SubRanges[ComponentNum-1] == nullptr) {
            SubRanges[ComponentNum-1]
              = LIV[ComponentNum-1]->createSubRange(Allocator, SR.LaneMask);
          }
        }
        VNIMapping.push_back(ComponentNum);
      }
      DistributeRange(SR, SubRanges.data(), VNIMapping);
    }
    LI.removeEmptySubRanges();
  }

  // Distribute main liverange.
  DistributeRange(LI, LIV, EqClass);
}
Пример #15
0
bool HexagonExpandCondsets::runOnMachineFunction(MachineFunction &MF) {
  if (skipFunction(*MF.getFunction()))
    return false;

  HII = static_cast<const HexagonInstrInfo*>(MF.getSubtarget().getInstrInfo());
  TRI = MF.getSubtarget().getRegisterInfo();
  MDT = &getAnalysis<MachineDominatorTree>();
  LIS = &getAnalysis<LiveIntervals>();
  MRI = &MF.getRegInfo();

  DEBUG(LIS->print(dbgs() << "Before expand-condsets\n",
                   MF.getFunction()->getParent()));

  bool Changed = false;
  std::set<unsigned> CoalUpd, PredUpd;

  SmallVector<MachineInstr*,16> Condsets;
  for (auto &B : MF)
    for (auto &I : B)
      if (isCondset(I))
        Condsets.push_back(&I);

  // Try to coalesce the target of a mux with one of its sources.
  // This could eliminate a register copy in some circumstances.
  Changed |= coalesceSegments(Condsets, CoalUpd);

  // Update kill flags on all source operands. This is done here because
  // at this moment (when expand-condsets runs), there are no kill flags
  // in the IR (they have been removed by live range analysis).
  // Updating them right before we split is the easiest, because splitting
  // adds definitions which would interfere with updating kills afterwards.
  std::set<unsigned> KillUpd;
  for (MachineInstr *MI : Condsets)
    for (MachineOperand &Op : MI->operands())
      if (Op.isReg() && Op.isUse())
        if (!CoalUpd.count(Op.getReg()))
          KillUpd.insert(Op.getReg());
  updateLiveness(KillUpd, false, true, false);
  DEBUG(LIS->print(dbgs() << "After coalescing\n",
                   MF.getFunction()->getParent()));

  // First, simply split all muxes into a pair of conditional transfers
  // and update the live intervals to reflect the new arrangement. The
  // goal is to update the kill flags, since predication will rely on
  // them.
  for (MachineInstr *MI : Condsets)
    Changed |= split(*MI, PredUpd);
  Condsets.clear(); // The contents of Condsets are invalid here anyway.

  // Do not update live ranges after splitting. Recalculation of live
  // intervals removes kill flags, which were preserved by splitting on
  // the source operands of condsets. These kill flags are needed by
  // predication, and after splitting they are difficult to recalculate
  // (because of predicated defs), so make sure they are left untouched.
  // Predication does not use live intervals.
  DEBUG(LIS->print(dbgs() << "After splitting\n",
                   MF.getFunction()->getParent()));

  // Traverse all blocks and collapse predicable instructions feeding
  // conditional transfers into predicated instructions.
  // Walk over all the instructions again, so we may catch pre-existing
  // cases that were not created in the previous step.
  for (auto &B : MF)
    Changed |= predicateInBlock(B, PredUpd);
  DEBUG(LIS->print(dbgs() << "After predicating\n",
                   MF.getFunction()->getParent()));

  PredUpd.insert(CoalUpd.begin(), CoalUpd.end());
  updateLiveness(PredUpd, true, true, true);

  DEBUG({
    if (Changed)
      LIS->print(dbgs() << "After expand-condsets\n",
                 MF.getFunction()->getParent());
  });
Пример #16
0
//===----------------------------------------------------------------------===//
// main for opt
//
int main(int argc, char **argv) {
  sys::PrintStackTraceOnErrorSignal(argv[0]);
  llvm::PrettyStackTraceProgram X(argc, argv);

  // Enable debug stream buffering.
  EnableDebugBuffering = true;

  llvm_shutdown_obj Y;  // Call llvm_shutdown() on exit.
  LLVMContext Context;

  InitializeAllTargets();
  InitializeAllTargetMCs();
  InitializeAllAsmPrinters();
  InitializeAllAsmParsers();

  // Initialize passes
  PassRegistry &Registry = *PassRegistry::getPassRegistry();
  initializeCore(Registry);
  initializeCoroutines(Registry);
  initializeScalarOpts(Registry);
  initializeObjCARCOpts(Registry);
  initializeVectorization(Registry);
  initializeIPO(Registry);
  initializeAnalysis(Registry);
  initializeTransformUtils(Registry);
  initializeInstCombine(Registry);
  initializeInstrumentation(Registry);
  initializeTarget(Registry);
  // For codegen passes, only passes that do IR to IR transformation are
  // supported.
  initializeScalarizeMaskedMemIntrinPass(Registry);
  initializeCodeGenPreparePass(Registry);
  initializeAtomicExpandPass(Registry);
  initializeRewriteSymbolsLegacyPassPass(Registry);
  initializeWinEHPreparePass(Registry);
  initializeDwarfEHPreparePass(Registry);
  initializeSafeStackLegacyPassPass(Registry);
  initializeSjLjEHPreparePass(Registry);
  initializePreISelIntrinsicLoweringLegacyPassPass(Registry);
  initializeGlobalMergePass(Registry);
  initializeInterleavedAccessPass(Registry);
  initializeCountingFunctionInserterPass(Registry);
  initializeUnreachableBlockElimLegacyPassPass(Registry);
  initializeExpandReductionsPass(Registry);

#ifdef LINK_POLLY_INTO_TOOLS
  polly::initializePollyPasses(Registry);
#endif

  cl::ParseCommandLineOptions(argc, argv,
    "llvm .bc -> .bc modular optimizer and analysis printer\n");

  if (AnalyzeOnly && NoOutput) {
    errs() << argv[0] << ": analyze mode conflicts with no-output mode.\n";
    return 1;
  }

  SMDiagnostic Err;

  Context.setDiscardValueNames(DiscardValueNames);
  if (!DisableDITypeMap)
    Context.enableDebugTypeODRUniquing();

  if (PassRemarksWithHotness)
    Context.setDiagnosticsHotnessRequested(true);

  if (PassRemarksHotnessThreshold)
    Context.setDiagnosticsHotnessThreshold(PassRemarksHotnessThreshold);

  std::unique_ptr<ToolOutputFile> OptRemarkFile;
  if (RemarksFilename != "") {
    std::error_code EC;
    OptRemarkFile =
        llvm::make_unique<ToolOutputFile>(RemarksFilename, EC, sys::fs::F_None);
    if (EC) {
      errs() << EC.message() << '\n';
      return 1;
    }
    Context.setDiagnosticsOutputFile(
        llvm::make_unique<yaml::Output>(OptRemarkFile->os()));
  }

  // Load the input module...
  std::unique_ptr<Module> M =
      parseIRFile(InputFilename, Err, Context, !NoVerify);

  if (!M) {
    Err.print(argv[0], errs());
    return 1;
  }

  // Strip debug info before running the verifier.
  if (StripDebug)
    StripDebugInfo(*M);

  // Immediately run the verifier to catch any problems before starting up the
  // pass pipelines.  Otherwise we can crash on broken code during
  // doInitialization().
  if (!NoVerify && verifyModule(*M, &errs())) {
    errs() << argv[0] << ": " << InputFilename
           << ": error: input module is broken!\n";
    return 1;
  }

  // If we are supposed to override the target triple or data layout, do so now.
  if (!TargetTriple.empty())
    M->setTargetTriple(Triple::normalize(TargetTriple));
  if (!ClDataLayout.empty())
    M->setDataLayout(ClDataLayout);

  // Figure out what stream we are supposed to write to...
  std::unique_ptr<ToolOutputFile> Out;
  std::unique_ptr<ToolOutputFile> ThinLinkOut;
  if (NoOutput) {
    if (!OutputFilename.empty())
      errs() << "WARNING: The -o (output filename) option is ignored when\n"
                "the --disable-output option is used.\n";
  } else {
    // Default to standard output.
    if (OutputFilename.empty())
      OutputFilename = "-";

    std::error_code EC;
    Out.reset(new ToolOutputFile(OutputFilename, EC, sys::fs::F_None));
    if (EC) {
      errs() << EC.message() << '\n';
      return 1;
    }

    if (!ThinLinkBitcodeFile.empty()) {
      ThinLinkOut.reset(
          new ToolOutputFile(ThinLinkBitcodeFile, EC, sys::fs::F_None));
      if (EC) {
        errs() << EC.message() << '\n';
        return 1;
      }
    }
  }

  Triple ModuleTriple(M->getTargetTriple());
  std::string CPUStr, FeaturesStr;
  TargetMachine *Machine = nullptr;
  const TargetOptions Options = InitTargetOptionsFromCodeGenFlags();

  if (ModuleTriple.getArch()) {
    CPUStr = getCPUStr();
    FeaturesStr = getFeaturesStr();
    Machine = GetTargetMachine(ModuleTriple, CPUStr, FeaturesStr, Options);
  }

  std::unique_ptr<TargetMachine> TM(Machine);

  // Override function attributes based on CPUStr, FeaturesStr, and command line
  // flags.
  setFunctionAttributes(CPUStr, FeaturesStr, *M);

  // If the output is set to be emitted to standard out, and standard out is a
  // console, print out a warning message and refuse to do it.  We don't
  // impress anyone by spewing tons of binary goo to a terminal.
  if (!Force && !NoOutput && !AnalyzeOnly && !OutputAssembly)
    if (CheckBitcodeOutputToConsole(Out->os(), !Quiet))
      NoOutput = true;

  if (PassPipeline.getNumOccurrences() > 0) {
    OutputKind OK = OK_NoOutput;
    if (!NoOutput)
      OK = OutputAssembly
               ? OK_OutputAssembly
               : (OutputThinLTOBC ? OK_OutputThinLTOBitcode : OK_OutputBitcode);

    VerifierKind VK = VK_VerifyInAndOut;
    if (NoVerify)
      VK = VK_NoVerifier;
    else if (VerifyEach)
      VK = VK_VerifyEachPass;

    // The user has asked to use the new pass manager and provided a pipeline
    // string. Hand off the rest of the functionality to the new code for that
    // layer.
    return runPassPipeline(argv[0], *M, TM.get(), Out.get(), ThinLinkOut.get(),
                           OptRemarkFile.get(), PassPipeline, OK, VK,
                           PreserveAssemblyUseListOrder,
                           PreserveBitcodeUseListOrder, EmitSummaryIndex,
                           EmitModuleHash)
               ? 0
               : 1;
  }

  // Create a PassManager to hold and optimize the collection of passes we are
  // about to build.
  //
  legacy::PassManager Passes;

  // Add an appropriate TargetLibraryInfo pass for the module's triple.
  TargetLibraryInfoImpl TLII(ModuleTriple);

  // The -disable-simplify-libcalls flag actually disables all builtin optzns.
  if (DisableSimplifyLibCalls)
    TLII.disableAllFunctions();
  Passes.add(new TargetLibraryInfoWrapperPass(TLII));

  // Add internal analysis passes from the target machine.
  Passes.add(createTargetTransformInfoWrapperPass(TM ? TM->getTargetIRAnalysis()
                                                     : TargetIRAnalysis()));

  std::unique_ptr<legacy::FunctionPassManager> FPasses;
  if (OptLevelO0 || OptLevelO1 || OptLevelO2 || OptLevelOs || OptLevelOz ||
      OptLevelO3) {
    FPasses.reset(new legacy::FunctionPassManager(M.get()));
    FPasses->add(createTargetTransformInfoWrapperPass(
        TM ? TM->getTargetIRAnalysis() : TargetIRAnalysis()));
  }

  if (PrintBreakpoints) {
    // Default to standard output.
    if (!Out) {
      if (OutputFilename.empty())
        OutputFilename = "-";

      std::error_code EC;
      Out = llvm::make_unique<ToolOutputFile>(OutputFilename, EC,
                                              sys::fs::F_None);
      if (EC) {
        errs() << EC.message() << '\n';
        return 1;
      }
    }
    Passes.add(createBreakpointPrinter(Out->os()));
    NoOutput = true;
  }

  if (TM) {
    // FIXME: We should dyn_cast this when supported.
    auto &LTM = static_cast<LLVMTargetMachine &>(*TM);
    Pass *TPC = LTM.createPassConfig(Passes);
    Passes.add(TPC);
  }

  // Create a new optimization pass for each one specified on the command line
  for (unsigned i = 0; i < PassList.size(); ++i) {
    if (StandardLinkOpts &&
        StandardLinkOpts.getPosition() < PassList.getPosition(i)) {
      AddStandardLinkPasses(Passes);
      StandardLinkOpts = false;
    }

    if (OptLevelO0 && OptLevelO0.getPosition() < PassList.getPosition(i)) {
      AddOptimizationPasses(Passes, *FPasses, TM.get(), 0, 0);
      OptLevelO0 = false;
    }

    if (OptLevelO1 && OptLevelO1.getPosition() < PassList.getPosition(i)) {
      AddOptimizationPasses(Passes, *FPasses, TM.get(), 1, 0);
      OptLevelO1 = false;
    }

    if (OptLevelO2 && OptLevelO2.getPosition() < PassList.getPosition(i)) {
      AddOptimizationPasses(Passes, *FPasses, TM.get(), 2, 0);
      OptLevelO2 = false;
    }

    if (OptLevelOs && OptLevelOs.getPosition() < PassList.getPosition(i)) {
      AddOptimizationPasses(Passes, *FPasses, TM.get(), 2, 1);
      OptLevelOs = false;
    }

    if (OptLevelOz && OptLevelOz.getPosition() < PassList.getPosition(i)) {
      AddOptimizationPasses(Passes, *FPasses, TM.get(), 2, 2);
      OptLevelOz = false;
    }

    if (OptLevelO3 && OptLevelO3.getPosition() < PassList.getPosition(i)) {
      AddOptimizationPasses(Passes, *FPasses, TM.get(), 3, 0);
      OptLevelO3 = false;
    }

    const PassInfo *PassInf = PassList[i];
    Pass *P = nullptr;
    if (PassInf->getNormalCtor())
      P = PassInf->getNormalCtor()();
    else
      errs() << argv[0] << ": cannot create pass: "******"\n";
    if (P) {
      PassKind Kind = P->getPassKind();
      addPass(Passes, P);

      if (AnalyzeOnly) {
        switch (Kind) {
        case PT_BasicBlock:
          Passes.add(createBasicBlockPassPrinter(PassInf, Out->os(), Quiet));
          break;
        case PT_Region:
          Passes.add(createRegionPassPrinter(PassInf, Out->os(), Quiet));
          break;
        case PT_Loop:
          Passes.add(createLoopPassPrinter(PassInf, Out->os(), Quiet));
          break;
        case PT_Function:
          Passes.add(createFunctionPassPrinter(PassInf, Out->os(), Quiet));
          break;
        case PT_CallGraphSCC:
          Passes.add(createCallGraphPassPrinter(PassInf, Out->os(), Quiet));
          break;
        default:
          Passes.add(createModulePassPrinter(PassInf, Out->os(), Quiet));
          break;
        }
      }
    }

    if (PrintEachXForm)
      Passes.add(
          createPrintModulePass(errs(), "", PreserveAssemblyUseListOrder));
  }

  if (StandardLinkOpts) {
    AddStandardLinkPasses(Passes);
    StandardLinkOpts = false;
  }

  if (OptLevelO0)
    AddOptimizationPasses(Passes, *FPasses, TM.get(), 0, 0);

  if (OptLevelO1)
    AddOptimizationPasses(Passes, *FPasses, TM.get(), 1, 0);

  if (OptLevelO2)
    AddOptimizationPasses(Passes, *FPasses, TM.get(), 2, 0);

  if (OptLevelOs)
    AddOptimizationPasses(Passes, *FPasses, TM.get(), 2, 1);

  if (OptLevelOz)
    AddOptimizationPasses(Passes, *FPasses, TM.get(), 2, 2);

  if (OptLevelO3)
    AddOptimizationPasses(Passes, *FPasses, TM.get(), 3, 0);

  if (FPasses) {
    FPasses->doInitialization();
    for (Function &F : *M)
      FPasses->run(F);
    FPasses->doFinalization();
  }

  // Check that the module is well formed on completion of optimization
  if (!NoVerify && !VerifyEach)
    Passes.add(createVerifierPass());

  // In run twice mode, we want to make sure the output is bit-by-bit
  // equivalent if we run the pass manager again, so setup two buffers and
  // a stream to write to them. Note that llc does something similar and it
  // may be worth to abstract this out in the future.
  SmallVector<char, 0> Buffer;
  SmallVector<char, 0> CompileTwiceBuffer;
  std::unique_ptr<raw_svector_ostream> BOS;
  raw_ostream *OS = nullptr;

  // Write bitcode or assembly to the output as the last step...
  if (!NoOutput && !AnalyzeOnly) {
    assert(Out);
    OS = &Out->os();
    if (RunTwice) {
      BOS = make_unique<raw_svector_ostream>(Buffer);
      OS = BOS.get();
    }
    if (OutputAssembly) {
      if (EmitSummaryIndex)
        report_fatal_error("Text output is incompatible with -module-summary");
      if (EmitModuleHash)
        report_fatal_error("Text output is incompatible with -module-hash");
      Passes.add(createPrintModulePass(*OS, "", PreserveAssemblyUseListOrder));
    } else if (OutputThinLTOBC)
      Passes.add(createWriteThinLTOBitcodePass(
          *OS, ThinLinkOut ? &ThinLinkOut->os() : nullptr));
    else
      Passes.add(createBitcodeWriterPass(*OS, PreserveBitcodeUseListOrder,
                                         EmitSummaryIndex, EmitModuleHash));
  }

  // Before executing passes, print the final values of the LLVM options.
  cl::PrintOptionValues();

  // If requested, run all passes again with the same pass manager to catch
  // bugs caused by persistent state in the passes
  if (RunTwice) {
      std::unique_ptr<Module> M2(CloneModule(M.get()));
      Passes.run(*M2);
      CompileTwiceBuffer = Buffer;
      Buffer.clear();
  }

  // Now that we have all of the passes ready, run them.
  Passes.run(*M);

  // Compare the two outputs and make sure they're the same
  if (RunTwice) {
    assert(Out);
    if (Buffer.size() != CompileTwiceBuffer.size() ||
        (memcmp(Buffer.data(), CompileTwiceBuffer.data(), Buffer.size()) !=
         0)) {
      errs() << "Running the pass manager twice changed the output.\n"
                "Writing the result of the second run to the specified output.\n"
                "To generate the one-run comparison binary, just run without\n"
                "the compile-twice option\n";
      Out->os() << BOS->str();
      Out->keep();
      if (OptRemarkFile)
        OptRemarkFile->keep();
      return 1;
    }
    Out->os() << BOS->str();
  }

  // Declare success.
  if (!NoOutput || PrintBreakpoints)
    Out->keep();

  if (OptRemarkFile)
    OptRemarkFile->keep();

  if (ThinLinkOut)
    ThinLinkOut->keep();

  return 0;
}
Пример #17
0
bool LoopInstSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
  DominatorTree *DT = getAnalysisIfAvailable<DominatorTree>();
  LoopInfo *LI = &getAnalysis<LoopInfo>();
  const DataLayout *TD = getAnalysisIfAvailable<DataLayout>();
  const TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfo>();

  SmallVector<BasicBlock*, 8> ExitBlocks;
  L->getUniqueExitBlocks(ExitBlocks);
  array_pod_sort(ExitBlocks.begin(), ExitBlocks.end());

  SmallPtrSet<const Instruction*, 8> S1, S2, *ToSimplify = &S1, *Next = &S2;

  // The bit we are stealing from the pointer represents whether this basic
  // block is the header of a subloop, in which case we only process its phis.
  typedef PointerIntPair<BasicBlock*, 1> WorklistItem;
  SmallVector<WorklistItem, 16> VisitStack;
  SmallPtrSet<BasicBlock*, 32> Visited;

  bool Changed = false;
  bool LocalChanged;
  do {
    LocalChanged = false;

    VisitStack.clear();
    Visited.clear();

    VisitStack.push_back(WorklistItem(L->getHeader(), false));

    while (!VisitStack.empty()) {
      WorklistItem Item = VisitStack.pop_back_val();
      BasicBlock *BB = Item.getPointer();
      bool IsSubloopHeader = Item.getInt();

      // Simplify instructions in the current basic block.
      for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); BI != BE;) {
        Instruction *I = BI++;

        // The first time through the loop ToSimplify is empty and we try to
        // simplify all instructions. On later iterations ToSimplify is not
        // empty and we only bother simplifying instructions that are in it.
        if (!ToSimplify->empty() && !ToSimplify->count(I))
          continue;

        // Don't bother simplifying unused instructions.
        if (!I->use_empty()) {
          Value *V = SimplifyInstruction(I, TD, TLI, DT);
          if (V && LI->replacementPreservesLCSSAForm(I, V)) {
            // Mark all uses for resimplification next time round the loop.
            for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
                 UI != UE; ++UI)
              Next->insert(cast<Instruction>(*UI));

            I->replaceAllUsesWith(V);
            LocalChanged = true;
            ++NumSimplified;
          }
        }
        LocalChanged |= RecursivelyDeleteTriviallyDeadInstructions(I, TLI);

        if (IsSubloopHeader && !isa<PHINode>(I))
          break;
      }

      // Add all successors to the worklist, except for loop exit blocks and the
      // bodies of subloops. We visit the headers of loops so that we can process
      // their phis, but we contract the rest of the subloop body and only follow
      // edges leading back to the original loop.
      for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB); SI != SE;
           ++SI) {
        BasicBlock *SuccBB = *SI;
        if (!Visited.insert(SuccBB))
          continue;

        const Loop *SuccLoop = LI->getLoopFor(SuccBB);
        if (SuccLoop && SuccLoop->getHeader() == SuccBB
                     && L->contains(SuccLoop)) {
          VisitStack.push_back(WorklistItem(SuccBB, true));

          SmallVector<BasicBlock*, 8> SubLoopExitBlocks;
          SuccLoop->getExitBlocks(SubLoopExitBlocks);

          for (unsigned i = 0; i < SubLoopExitBlocks.size(); ++i) {
            BasicBlock *ExitBB = SubLoopExitBlocks[i];
            if (LI->getLoopFor(ExitBB) == L && Visited.insert(ExitBB))
              VisitStack.push_back(WorklistItem(ExitBB, false));
          }

          continue;
        }

        bool IsExitBlock = std::binary_search(ExitBlocks.begin(),
                                              ExitBlocks.end(), SuccBB);
        if (IsExitBlock)
          continue;

        VisitStack.push_back(WorklistItem(SuccBB, false));
      }
    }

    // Place the list of instructions to simplify on the next loop iteration
    // into ToSimplify.
    std::swap(ToSimplify, Next);
    Next->clear();

    Changed |= LocalChanged;
  } while (LocalChanged);

  return Changed;
}
Пример #18
0
void StackColoring::calculateLiveIntervals(unsigned NumSlots) {
  SmallVector<SlotIndex, 16> Starts;
  SmallVector<SlotIndex, 16> Finishes;

  // For each block, find which slots are active within this block
  // and update the live intervals.
  for (MachineFunction::iterator MBB = MF->begin(), MBBe = MF->end();
       MBB != MBBe; ++MBB) {
    Starts.clear();
    Starts.resize(NumSlots);
    Finishes.clear();
    Finishes.resize(NumSlots);

    // Create the interval for the basic blocks with lifetime markers in them.
    for (SmallVector<MachineInstr*, 8>::iterator it = Markers.begin(),
         e = Markers.end(); it != e; ++it) {
      MachineInstr *MI = *it;
      if (MI->getParent() != MBB)
        continue;

      assert((MI->getOpcode() == TargetOpcode::LIFETIME_START ||
              MI->getOpcode() == TargetOpcode::LIFETIME_END) &&
             "Invalid Lifetime marker");

      bool IsStart = MI->getOpcode() == TargetOpcode::LIFETIME_START;
      MachineOperand &Mo = MI->getOperand(0);
      int Slot = Mo.getIndex();
      assert(Slot >= 0 && "Invalid slot");

      SlotIndex ThisIndex = Indexes->getInstructionIndex(MI);

      if (IsStart) {
        if (!Starts[Slot].isValid() || Starts[Slot] > ThisIndex)
          Starts[Slot] = ThisIndex;
      } else {
        if (!Finishes[Slot].isValid() || Finishes[Slot] < ThisIndex)
          Finishes[Slot] = ThisIndex;
      }
    }

    // Create the interval of the blocks that we previously found to be 'alive'.
    BitVector Alive = BlockLiveness[MBB].LiveIn;
    Alive |= BlockLiveness[MBB].LiveOut;

    if (Alive.any()) {
      for (int pos = Alive.find_first(); pos != -1;
           pos = Alive.find_next(pos)) {
        if (!Starts[pos].isValid())
          Starts[pos] = Indexes->getMBBStartIdx(MBB);
        if (!Finishes[pos].isValid())
          Finishes[pos] = Indexes->getMBBEndIdx(MBB);
      }
    }

    for (unsigned i = 0; i < NumSlots; ++i) {
      assert(Starts[i].isValid() == Finishes[i].isValid() && "Unmatched range");
      if (!Starts[i].isValid())
        continue;

      assert(Starts[i] && Finishes[i] && "Invalid interval");
      VNInfo *ValNum = Intervals[i]->getValNumInfo(0);
      SlotIndex S = Starts[i];
      SlotIndex F = Finishes[i];
      if (S < F) {
        // We have a single consecutive region.
        Intervals[i]->addRange(LiveRange(S, F, ValNum));
      } else {
        // We have two non consecutive regions. This happens when
        // LIFETIME_START appears after the LIFETIME_END marker.
        SlotIndex NewStart = Indexes->getMBBStartIdx(MBB);
        SlotIndex NewFin = Indexes->getMBBEndIdx(MBB);
        Intervals[i]->addRange(LiveRange(NewStart, F, ValNum));
        Intervals[i]->addRange(LiveRange(S, NewFin, ValNum));
      }
    }
  }
}
Пример #19
0
// RemoveDeadStuffFromFunction - Remove any arguments and return values from F
// that are not in LiveValues. Transform the function and all of the callees of
// the function to not have these arguments and return values.
//
bool DAE::RemoveDeadStuffFromFunction(Function *F) {
  // Don't modify fully live functions
  if (LiveFunctions.count(F))
    return false;

  // Start by computing a new prototype for the function, which is the same as
  // the old function, but has fewer arguments and a different return type.
  FunctionType *FTy = F->getFunctionType();
  std::vector<Type*> Params;

  // Keep track of if we have a live 'returned' argument
  bool HasLiveReturnedArg = false;

  // Set up to build a new list of parameter attributes.
  SmallVector<AttributeSet, 8> AttributesVec;
  const AttributeSet &PAL = F->getAttributes();

  // Remember which arguments are still alive.
  SmallVector<bool, 10> ArgAlive(FTy->getNumParams(), false);
  // Construct the new parameter list from non-dead arguments. Also construct
  // a new set of parameter attributes to correspond. Skip the first parameter
  // attribute, since that belongs to the return value.
  unsigned i = 0;
  for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end();
       I != E; ++I, ++i) {
    RetOrArg Arg = CreateArg(F, i);
    if (LiveValues.erase(Arg)) {
      Params.push_back(I->getType());
      ArgAlive[i] = true;

      // Get the original parameter attributes (skipping the first one, that is
      // for the return value.
      if (PAL.hasAttributes(i + 1)) {
        AttrBuilder B(PAL, i + 1);
        if (B.contains(Attribute::Returned))
          HasLiveReturnedArg = true;
        AttributesVec.
          push_back(AttributeSet::get(F->getContext(), Params.size(), B));
      }
    } else {
      ++NumArgumentsEliminated;
      DEBUG(dbgs() << "DAE - Removing argument " << i << " (" << I->getName()
            << ") from " << F->getName() << "\n");
    }
  }

  // Find out the new return value.
  Type *RetTy = FTy->getReturnType();
  Type *NRetTy = NULL;
  unsigned RetCount = NumRetVals(F);

  // -1 means unused, other numbers are the new index
  SmallVector<int, 5> NewRetIdxs(RetCount, -1);
  std::vector<Type*> RetTypes;

  // If there is a function with a live 'returned' argument but a dead return
  // value, then there are two possible actions:
  // 1) Eliminate the return value and take off the 'returned' attribute on the
  //    argument.
  // 2) Retain the 'returned' attribute and treat the return value (but not the
  //    entire function) as live so that it is not eliminated.
  // 
  // It's not clear in the general case which option is more profitable because,
  // even in the absence of explicit uses of the return value, code generation
  // is free to use the 'returned' attribute to do things like eliding
  // save/restores of registers across calls. Whether or not this happens is
  // target and ABI-specific as well as depending on the amount of register
  // pressure, so there's no good way for an IR-level pass to figure this out.
  //
  // Fortunately, the only places where 'returned' is currently generated by
  // the FE are places where 'returned' is basically free and almost always a
  // performance win, so the second option can just be used always for now.
  //
  // This should be revisited if 'returned' is ever applied more liberally.
  if (RetTy->isVoidTy() || HasLiveReturnedArg) {
    NRetTy = RetTy;
  } else {
    StructType *STy = dyn_cast<StructType>(RetTy);
    if (STy)
      // Look at each of the original return values individually.
      for (unsigned i = 0; i != RetCount; ++i) {
        RetOrArg Ret = CreateRet(F, i);
        if (LiveValues.erase(Ret)) {
          RetTypes.push_back(STy->getElementType(i));
          NewRetIdxs[i] = RetTypes.size() - 1;
        } else {
          ++NumRetValsEliminated;
          DEBUG(dbgs() << "DAE - Removing return value " << i << " from "
                << F->getName() << "\n");
        }
      }
    else
      // We used to return a single value.
      if (LiveValues.erase(CreateRet(F, 0))) {
        RetTypes.push_back(RetTy);
        NewRetIdxs[0] = 0;
      } else {
        DEBUG(dbgs() << "DAE - Removing return value from " << F->getName()
              << "\n");
        ++NumRetValsEliminated;
      }
    if (RetTypes.size() > 1)
      // More than one return type? Return a struct with them. Also, if we used
      // to return a struct and didn't change the number of return values,
      // return a struct again. This prevents changing {something} into
      // something and {} into void.
      // Make the new struct packed if we used to return a packed struct
      // already.
      NRetTy = StructType::get(STy->getContext(), RetTypes, STy->isPacked());
    else if (RetTypes.size() == 1)
      // One return type? Just a simple value then, but only if we didn't use to
      // return a struct with that simple value before.
      NRetTy = RetTypes.front();
    else if (RetTypes.size() == 0)
      // No return types? Make it void, but only if we didn't use to return {}.
      NRetTy = Type::getVoidTy(F->getContext());
  }

  assert(NRetTy && "No new return type found?");

  // The existing function return attributes.
  AttributeSet RAttrs = PAL.getRetAttributes();

  // Remove any incompatible attributes, but only if we removed all return
  // values. Otherwise, ensure that we don't have any conflicting attributes
  // here. Currently, this should not be possible, but special handling might be
  // required when new return value attributes are added.
  if (NRetTy->isVoidTy())
    RAttrs =
      AttributeSet::get(NRetTy->getContext(), AttributeSet::ReturnIndex,
                        AttrBuilder(RAttrs, AttributeSet::ReturnIndex).
         removeAttributes(AttributeFuncs::
                          typeIncompatible(NRetTy, AttributeSet::ReturnIndex),
                          AttributeSet::ReturnIndex));
  else
    assert(!AttrBuilder(RAttrs, AttributeSet::ReturnIndex).
             hasAttributes(AttributeFuncs::
                           typeIncompatible(NRetTy, AttributeSet::ReturnIndex),
                           AttributeSet::ReturnIndex) &&
           "Return attributes no longer compatible?");

  if (RAttrs.hasAttributes(AttributeSet::ReturnIndex))
    AttributesVec.push_back(AttributeSet::get(NRetTy->getContext(), RAttrs));

  if (PAL.hasAttributes(AttributeSet::FunctionIndex))
    AttributesVec.push_back(AttributeSet::get(F->getContext(),
                                              PAL.getFnAttributes()));

  // Reconstruct the AttributesList based on the vector we constructed.
  AttributeSet NewPAL = AttributeSet::get(F->getContext(), AttributesVec);

  // Create the new function type based on the recomputed parameters.
  FunctionType *NFTy = FunctionType::get(NRetTy, Params, FTy->isVarArg());

  // No change?
  if (NFTy == FTy)
    return false;

  // Create the new function body and insert it into the module...
  Function *NF = Function::Create(NFTy, F->getLinkage());
  NF->copyAttributesFrom(F);
  NF->setAttributes(NewPAL);
  // Insert the new function before the old function, so we won't be processing
  // it again.
  F->getParent()->getFunctionList().insert(F, NF);
  NF->takeName(F);

  // Loop over all of the callers of the function, transforming the call sites
  // to pass in a smaller number of arguments into the new function.
  //
  std::vector<Value*> Args;
  while (!F->use_empty()) {
    CallSite CS(F->use_back());
    Instruction *Call = CS.getInstruction();

    AttributesVec.clear();
    const AttributeSet &CallPAL = CS.getAttributes();

    // The call return attributes.
    AttributeSet RAttrs = CallPAL.getRetAttributes();

    // Adjust in case the function was changed to return void.
    RAttrs =
      AttributeSet::get(NF->getContext(), AttributeSet::ReturnIndex,
                        AttrBuilder(RAttrs, AttributeSet::ReturnIndex).
        removeAttributes(AttributeFuncs::
                         typeIncompatible(NF->getReturnType(),
                                          AttributeSet::ReturnIndex),
                         AttributeSet::ReturnIndex));
    if (RAttrs.hasAttributes(AttributeSet::ReturnIndex))
      AttributesVec.push_back(AttributeSet::get(NF->getContext(), RAttrs));

    // Declare these outside of the loops, so we can reuse them for the second
    // loop, which loops the varargs.
    CallSite::arg_iterator I = CS.arg_begin();
    unsigned i = 0;
    // Loop over those operands, corresponding to the normal arguments to the
    // original function, and add those that are still alive.
    for (unsigned e = FTy->getNumParams(); i != e; ++I, ++i)
      if (ArgAlive[i]) {
        Args.push_back(*I);
        // Get original parameter attributes, but skip return attributes.
        if (CallPAL.hasAttributes(i + 1)) {
          AttrBuilder B(CallPAL, i + 1);
          // If the return type has changed, then get rid of 'returned' on the
          // call site. The alternative is to make all 'returned' attributes on
          // call sites keep the return value alive just like 'returned'
          // attributes on function declaration but it's less clearly a win
          // and this is not an expected case anyway
          if (NRetTy != RetTy && B.contains(Attribute::Returned))
            B.removeAttribute(Attribute::Returned);
          AttributesVec.
            push_back(AttributeSet::get(F->getContext(), Args.size(), B));
        }
      }

    // Push any varargs arguments on the list. Don't forget their attributes.
    for (CallSite::arg_iterator E = CS.arg_end(); I != E; ++I, ++i) {
      Args.push_back(*I);
      if (CallPAL.hasAttributes(i + 1)) {
        AttrBuilder B(CallPAL, i + 1);
        AttributesVec.
          push_back(AttributeSet::get(F->getContext(), Args.size(), B));
      }
    }

    if (CallPAL.hasAttributes(AttributeSet::FunctionIndex))
      AttributesVec.push_back(AttributeSet::get(Call->getContext(),
                                                CallPAL.getFnAttributes()));

    // Reconstruct the AttributesList based on the vector we constructed.
    AttributeSet NewCallPAL = AttributeSet::get(F->getContext(), AttributesVec);

    Instruction *New;
    if (InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
      New = InvokeInst::Create(NF, II->getNormalDest(), II->getUnwindDest(),
                               Args, "", Call);
      cast<InvokeInst>(New)->setCallingConv(CS.getCallingConv());
      cast<InvokeInst>(New)->setAttributes(NewCallPAL);
    } else {
      New = CallInst::Create(NF, Args, "", Call);
      cast<CallInst>(New)->setCallingConv(CS.getCallingConv());
      cast<CallInst>(New)->setAttributes(NewCallPAL);
      if (cast<CallInst>(Call)->isTailCall())
        cast<CallInst>(New)->setTailCall();
    }
    New->setDebugLoc(Call->getDebugLoc());

    Args.clear();

    if (!Call->use_empty()) {
      if (New->getType() == Call->getType()) {
        // Return type not changed? Just replace users then.
        Call->replaceAllUsesWith(New);
        New->takeName(Call);
      } else if (New->getType()->isVoidTy()) {
        // Our return value has uses, but they will get removed later on.
        // Replace by null for now.
        if (!Call->getType()->isX86_MMXTy())
          Call->replaceAllUsesWith(Constant::getNullValue(Call->getType()));
      } else {
        assert(RetTy->isStructTy() &&
               "Return type changed, but not into a void. The old return type"
               " must have been a struct!");
        Instruction *InsertPt = Call;
        if (InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
          BasicBlock::iterator IP = II->getNormalDest()->begin();
          while (isa<PHINode>(IP)) ++IP;
          InsertPt = IP;
        }

        // We used to return a struct. Instead of doing smart stuff with all the
        // uses of this struct, we will just rebuild it using
        // extract/insertvalue chaining and let instcombine clean that up.
        //
        // Start out building up our return value from undef
        Value *RetVal = UndefValue::get(RetTy);
        for (unsigned i = 0; i != RetCount; ++i)
          if (NewRetIdxs[i] != -1) {
            Value *V;
            if (RetTypes.size() > 1)
              // We are still returning a struct, so extract the value from our
              // return value
              V = ExtractValueInst::Create(New, NewRetIdxs[i], "newret",
                                           InsertPt);
            else
              // We are now returning a single element, so just insert that
              V = New;
            // Insert the value at the old position
            RetVal = InsertValueInst::Create(RetVal, V, i, "oldret", InsertPt);
          }
        // Now, replace all uses of the old call instruction with the return
        // struct we built
        Call->replaceAllUsesWith(RetVal);
        New->takeName(Call);
      }
    }

    // Finally, remove the old call from the program, reducing the use-count of
    // F.
    Call->eraseFromParent();
  }

  // Since we have now created the new function, splice the body of the old
  // function right into the new function, leaving the old rotting hulk of the
  // function empty.
  NF->getBasicBlockList().splice(NF->begin(), F->getBasicBlockList());

  // Loop over the argument list, transferring uses of the old arguments over to
  // the new arguments, also transferring over the names as well.
  i = 0;
  for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(),
       I2 = NF->arg_begin(); I != E; ++I, ++i)
    if (ArgAlive[i]) {
      // If this is a live argument, move the name and users over to the new
      // version.
      I->replaceAllUsesWith(I2);
      I2->takeName(I);
      ++I2;
    } else {
      // If this argument is dead, replace any uses of it with null constants
      // (these are guaranteed to become unused later on).
      if (!I->getType()->isX86_MMXTy())
        I->replaceAllUsesWith(Constant::getNullValue(I->getType()));
    }

  // If we change the return value of the function we must rewrite any return
  // instructions.  Check this now.
  if (F->getReturnType() != NF->getReturnType())
    for (Function::iterator BB = NF->begin(), E = NF->end(); BB != E; ++BB)
      if (ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator())) {
        Value *RetVal;

        if (NFTy->getReturnType()->isVoidTy()) {
          RetVal = 0;
        } else {
          assert (RetTy->isStructTy());
          // The original return value was a struct, insert
          // extractvalue/insertvalue chains to extract only the values we need
          // to return and insert them into our new result.
          // This does generate messy code, but we'll let it to instcombine to
          // clean that up.
          Value *OldRet = RI->getOperand(0);
          // Start out building up our return value from undef
          RetVal = UndefValue::get(NRetTy);
          for (unsigned i = 0; i != RetCount; ++i)
            if (NewRetIdxs[i] != -1) {
              ExtractValueInst *EV = ExtractValueInst::Create(OldRet, i,
                                                              "oldret", RI);
              if (RetTypes.size() > 1) {
                // We're still returning a struct, so reinsert the value into
                // our new return value at the new index

                RetVal = InsertValueInst::Create(RetVal, EV, NewRetIdxs[i],
                                                 "newret", RI);
              } else {
                // We are now only returning a simple value, so just return the
                // extracted value.
                RetVal = EV;
              }
            }
        }
        // Replace the return instruction with one returning the new return
        // value (possibly 0 if we became void).
        ReturnInst::Create(F->getContext(), RetVal, RI);
        BB->getInstList().erase(RI);
      }

  // Patch the pointer to LLVM function in debug info descriptor.
  FunctionDIMap::iterator DI = FunctionDIs.find(F);
  if (DI != FunctionDIs.end())
    DI->second.replaceFunction(NF);

  // Now that the old function is dead, delete it.
  F->eraseFromParent();

  return true;
}
Пример #20
0
bool ReduceCrashingBlocks::TestBlocks(std::vector<const BasicBlock *> &BBs) {
  // Clone the program to try hacking it apart...
  ValueToValueMapTy VMap;
  Module *M = CloneModule(BD.getProgram(), VMap).release();

  // Convert list to set for fast lookup...
  SmallPtrSet<BasicBlock *, 8> Blocks;
  for (unsigned i = 0, e = BBs.size(); i != e; ++i)
    Blocks.insert(cast<BasicBlock>(VMap[BBs[i]]));

  outs() << "Checking for crash with only these blocks:";
  unsigned NumPrint = Blocks.size();
  if (NumPrint > 10)
    NumPrint = 10;
  for (unsigned i = 0, e = NumPrint; i != e; ++i)
    outs() << " " << BBs[i]->getName();
  if (NumPrint < Blocks.size())
    outs() << "... <" << Blocks.size() << " total>";
  outs() << ": ";

  // Loop over and delete any hack up any blocks that are not listed...
  for (Module::iterator I = M->begin(), E = M->end(); I != E; ++I)
    for (Function::iterator BB = I->begin(), E = I->end(); BB != E; ++BB)
      if (!Blocks.count(&*BB) && BB->getTerminator()->getNumSuccessors()) {
        // Loop over all of the successors of this block, deleting any PHI nodes
        // that might include it.
        for (succ_iterator SI = succ_begin(&*BB), E = succ_end(&*BB); SI != E;
             ++SI)
          (*SI)->removePredecessor(&*BB);

        TerminatorInst *BBTerm = BB->getTerminator();
        if (BBTerm->isEHPad() || BBTerm->getType()->isTokenTy())
          continue;
        if (!BBTerm->getType()->isVoidTy())
          BBTerm->replaceAllUsesWith(Constant::getNullValue(BBTerm->getType()));

        // Replace the old terminator instruction.
        BB->getInstList().pop_back();
        new UnreachableInst(BB->getContext(), &*BB);
      }

  // The CFG Simplifier pass may delete one of the basic blocks we are
  // interested in.  If it does we need to take the block out of the list.  Make
  // a "persistent mapping" by turning basic blocks into <function, name> pairs.
  // This won't work well if blocks are unnamed, but that is just the risk we
  // have to take.
  std::vector<std::pair<std::string, std::string>> BlockInfo;

  for (BasicBlock *BB : Blocks)
    BlockInfo.emplace_back(BB->getParent()->getName(), BB->getName());

  SmallVector<BasicBlock *, 16> ToProcess;
  for (auto &F : *M) {
    for (auto &BB : F)
      if (!Blocks.count(&BB))
        ToProcess.push_back(&BB);
    simpleSimplifyCfg(F, ToProcess);
    ToProcess.clear();
  }
  // Verify we didn't break anything
  std::vector<std::string> Passes;
  Passes.push_back("verify");
  std::unique_ptr<Module> New = BD.runPassesOn(M, Passes);
  delete M;
  if (!New) {
    errs() << "verify failed!\n";
    exit(1);
  }
  M = New.release();

  // Try running on the hacked up program...
  if (TestFn(BD, M)) {
    BD.setNewProgram(M); // It crashed, keep the trimmed version...

    // Make sure to use basic block pointers that point into the now-current
    // module, and that they don't include any deleted blocks.
    BBs.clear();
    const ValueSymbolTable &GST = M->getValueSymbolTable();
    for (unsigned i = 0, e = BlockInfo.size(); i != e; ++i) {
      Function *F = cast<Function>(GST.lookup(BlockInfo[i].first));
      Value *V = F->getValueSymbolTable()->lookup(BlockInfo[i].second);
      if (V && V->getType() == Type::getLabelTy(V->getContext()))
        BBs.push_back(cast<BasicBlock>(V));
    }
    return true;
  }
  delete M; // It didn't crash, try something else.
  return false;
}
Пример #21
0
void TypeFinder::run(const Module &M, bool onlyNamed) {
  OnlyNamed = onlyNamed;

  // Get types from global variables.
  for (Module::const_global_iterator I = M.global_begin(),
         E = M.global_end(); I != E; ++I) {
    incorporateType(I->getType());
    if (I->hasInitializer())
      incorporateValue(I->getInitializer());
  }

  // Get types from aliases.
  for (Module::const_alias_iterator I = M.alias_begin(),
         E = M.alias_end(); I != E; ++I) {
    incorporateType(I->getType());
    if (const Value *Aliasee = I->getAliasee())
      incorporateValue(Aliasee);
  }

  // Get types from functions.
  SmallVector<std::pair<unsigned, MDNode*>, 4> MDForInst;
  for (Module::const_iterator FI = M.begin(), E = M.end(); FI != E; ++FI) {
    incorporateType(FI->getType());

    if (FI->hasPrefixData())
      incorporateValue(FI->getPrefixData());

    // First incorporate the arguments.
    for (Function::const_arg_iterator AI = FI->arg_begin(),
           AE = FI->arg_end(); AI != AE; ++AI)
      incorporateValue(AI);

    for (Function::const_iterator BB = FI->begin(), E = FI->end();
         BB != E;++BB)
      for (BasicBlock::const_iterator II = BB->begin(),
             E = BB->end(); II != E; ++II) {
        const Instruction &I = *II;

        // Incorporate the type of the instruction.
        incorporateType(I.getType());

        // Incorporate non-instruction operand types. (We are incorporating all
        // instructions with this loop.)
        for (User::const_op_iterator OI = I.op_begin(), OE = I.op_end();
             OI != OE; ++OI)
          if (!isa<Instruction>(OI))
            incorporateValue(*OI);

        // Incorporate types hiding in metadata.
        I.getAllMetadataOtherThanDebugLoc(MDForInst);
        for (unsigned i = 0, e = MDForInst.size(); i != e; ++i)
          incorporateMDNode(MDForInst[i].second);

        MDForInst.clear();
      }
  }

  for (Module::const_named_metadata_iterator I = M.named_metadata_begin(),
         E = M.named_metadata_end(); I != E; ++I) {
    const NamedMDNode *NMD = I;
    for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i)
      incorporateMDNode(NMD->getOperand(i));
  }
}
Пример #22
0
bool ReduceCrashingConditionals::TestBlocks(
    std::vector<const BasicBlock *> &BBs) {
  // Clone the program to try hacking it apart...
  ValueToValueMapTy VMap;
  Module *M = CloneModule(BD.getProgram(), VMap).release();

  // Convert list to set for fast lookup...
  SmallPtrSet<const BasicBlock *, 8> Blocks;
  for (const auto *BB : BBs)
    Blocks.insert(cast<BasicBlock>(VMap[BB]));

  outs() << "Checking for crash with changing conditionals to always jump to "
         << (Direction ? "true" : "false") << ":";
  unsigned NumPrint = Blocks.size();
  if (NumPrint > 10)
    NumPrint = 10;
  for (unsigned i = 0, e = NumPrint; i != e; ++i)
    outs() << " " << BBs[i]->getName();
  if (NumPrint < Blocks.size())
    outs() << "... <" << Blocks.size() << " total>";
  outs() << ": ";

  // Loop over and delete any hack up any blocks that are not listed...
  for (auto &F : *M)
    for (auto &BB : F)
      if (!Blocks.count(&BB)) {
        auto *BR = dyn_cast<BranchInst>(BB.getTerminator());
        if (!BR || !BR->isConditional())
          continue;
        if (Direction)
          BR->setCondition(ConstantInt::getTrue(BR->getContext()));
        else
          BR->setCondition(ConstantInt::getFalse(BR->getContext()));
      }

  // The following may destroy some blocks, so we save them first
  std::vector<std::pair<std::string, std::string>> BlockInfo;

  for (const BasicBlock *BB : Blocks)
    BlockInfo.emplace_back(BB->getParent()->getName(), BB->getName());

  SmallVector<BasicBlock *, 16> ToProcess;
  for (auto &F : *M) {
    for (auto &BB : F)
      if (!Blocks.count(&BB))
        ToProcess.push_back(&BB);
    simpleSimplifyCfg(F, ToProcess);
    ToProcess.clear();
  }
  // Verify we didn't break anything
  std::vector<std::string> Passes;
  Passes.push_back("verify");
  std::unique_ptr<Module> New = BD.runPassesOn(M, Passes);
  delete M;
  if (!New) {
    errs() << "verify failed!\n";
    exit(1);
  }
  M = New.release();

  // Try running on the hacked up program...
  if (TestFn(BD, M)) {
    BD.setNewProgram(M); // It crashed, keep the trimmed version...

    // Make sure to use basic block pointers that point into the now-current
    // module, and that they don't include any deleted blocks.
    BBs.clear();
    const ValueSymbolTable &GST = M->getValueSymbolTable();
    for (auto &BI : BlockInfo) {
      auto *F = cast<Function>(GST.lookup(BI.first));
      Value *V = F->getValueSymbolTable()->lookup(BI.second);
      if (V && V->getType() == Type::getLabelTy(V->getContext()))
        BBs.push_back(cast<BasicBlock>(V));
    }
    return true;
  }
  delete M; // It didn't crash, try something else.
  return false;
}
Пример #23
0
void MachineBasicBlock::updateTerminator() {
  const TargetInstrInfo *TII = getParent()->getTarget().getInstrInfo();
  // A block with no successors has no concerns with fall-through edges.
  if (this->succ_empty()) return;

  MachineBasicBlock *TBB = 0, *FBB = 0;
  SmallVector<MachineOperand, 4> Cond;
  DebugLoc dl;  // FIXME: this is nowhere
  bool B = TII->AnalyzeBranch(*this, TBB, FBB, Cond);
  (void) B;
  assert(!B && "UpdateTerminators requires analyzable predecessors!");
  if (Cond.empty()) {
    if (TBB) {
      // The block has an unconditional branch. If its successor is now
      // its layout successor, delete the branch.
      if (isLayoutSuccessor(TBB))
        TII->RemoveBranch(*this);
    } else {
      // The block has an unconditional fallthrough. If its successor is not
      // its layout successor, insert a branch. First we have to locate the
      // only non-landing-pad successor, as that is the fallthrough block.
      for (succ_iterator SI = succ_begin(), SE = succ_end(); SI != SE; ++SI) {
        if ((*SI)->isLandingPad())
          continue;
        assert(!TBB && "Found more than one non-landing-pad successor!");
        TBB = *SI;
      }

      // If there is no non-landing-pad successor, the block has no
      // fall-through edges to be concerned with.
      if (!TBB)
        return;

      // Finally update the unconditional successor to be reached via a branch
      // if it would not be reached by fallthrough.
      if (!isLayoutSuccessor(TBB))
        TII->InsertBranch(*this, TBB, 0, Cond, dl);
    }
  } else {
    if (FBB) {
      // The block has a non-fallthrough conditional branch. If one of its
      // successors is its layout successor, rewrite it to a fallthrough
      // conditional branch.
      if (isLayoutSuccessor(TBB)) {
        if (TII->ReverseBranchCondition(Cond))
          return;
        TII->RemoveBranch(*this);
        TII->InsertBranch(*this, FBB, 0, Cond, dl);
      } else if (isLayoutSuccessor(FBB)) {
        TII->RemoveBranch(*this);
        TII->InsertBranch(*this, TBB, 0, Cond, dl);
      }
    } else {
      // Walk through the successors and find the successor which is not
      // a landing pad and is not the conditional branch destination (in TBB)
      // as the fallthrough successor.
      MachineBasicBlock *FallthroughBB = 0;
      for (succ_iterator SI = succ_begin(), SE = succ_end(); SI != SE; ++SI) {
        if ((*SI)->isLandingPad() || *SI == TBB)
          continue;
        assert(!FallthroughBB && "Found more than one fallthrough successor.");
        FallthroughBB = *SI;
      }
      if (!FallthroughBB && canFallThrough()) {
        // We fallthrough to the same basic block as the conditional jump
        // targets. Remove the conditional jump, leaving unconditional
        // fallthrough.
        // FIXME: This does not seem like a reasonable pattern to support, but it
        // has been seen in the wild coming out of degenerate ARM test cases.
        TII->RemoveBranch(*this);

        // Finally update the unconditional successor to be reached via a branch
        // if it would not be reached by fallthrough.
        if (!isLayoutSuccessor(TBB))
          TII->InsertBranch(*this, TBB, 0, Cond, dl);
        return;
      }

      // The block has a fallthrough conditional branch.
      if (isLayoutSuccessor(TBB)) {
        if (TII->ReverseBranchCondition(Cond)) {
          // We can't reverse the condition, add an unconditional branch.
          Cond.clear();
          TII->InsertBranch(*this, FallthroughBB, 0, Cond, dl);
          return;
        }
        TII->RemoveBranch(*this);
        TII->InsertBranch(*this, FallthroughBB, 0, Cond, dl);
      } else if (!isLayoutSuccessor(FallthroughBB)) {
        TII->RemoveBranch(*this);
        TII->InsertBranch(*this, TBB, FallthroughBB, Cond, dl);
      }
    }
  }
}
Пример #24
0
void SIFoldOperands::foldInstOperand(MachineInstr &MI,
                                     MachineOperand &OpToFold) const {
  // We need mutate the operands of new mov instructions to add implicit
  // uses of EXEC, but adding them invalidates the use_iterator, so defer
  // this.
  SmallVector<MachineInstr *, 4> CopiesToReplace;
  SmallVector<FoldCandidate, 4> FoldList;
  MachineOperand &Dst = MI.getOperand(0);

  bool FoldingImm = OpToFold.isImm() || OpToFold.isFI();
  if (FoldingImm) {
    unsigned NumLiteralUses = 0;
    MachineOperand *NonInlineUse = nullptr;
    int NonInlineUseOpNo = -1;

    MachineRegisterInfo::use_iterator NextUse;
    for (MachineRegisterInfo::use_iterator
           Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
         Use != E; Use = NextUse) {
      NextUse = std::next(Use);
      MachineInstr *UseMI = Use->getParent();
      unsigned OpNo = Use.getOperandNo();

      // Folding the immediate may reveal operations that can be constant
      // folded or replaced with a copy. This can happen for example after
      // frame indices are lowered to constants or from splitting 64-bit
      // constants.
      //
      // We may also encounter cases where one or both operands are
      // immediates materialized into a register, which would ordinarily not
      // be folded due to multiple uses or operand constraints.

      if (OpToFold.isImm() && tryConstantFoldOp(*MRI, TII, UseMI, &OpToFold)) {
        DEBUG(dbgs() << "Constant folded " << *UseMI <<'\n');

        // Some constant folding cases change the same immediate's use to a new
        // instruction, e.g. and x, 0 -> 0. Make sure we re-visit the user
        // again. The same constant folded instruction could also have a second
        // use operand.
        NextUse = MRI->use_begin(Dst.getReg());
        FoldList.clear();
        continue;
      }

      // Try to fold any inline immediate uses, and then only fold other
      // constants if they have one use.
      //
      // The legality of the inline immediate must be checked based on the use
      // operand, not the defining instruction, because 32-bit instructions
      // with 32-bit inline immediate sources may be used to materialize
      // constants used in 16-bit operands.
      //
      // e.g. it is unsafe to fold:
      //  s_mov_b32 s0, 1.0    // materializes 0x3f800000
      //  v_add_f16 v0, v1, s0 // 1.0 f16 inline immediate sees 0x00003c00

      // Folding immediates with more than one use will increase program size.
      // FIXME: This will also reduce register usage, which may be better
      // in some cases. A better heuristic is needed.
      if (isInlineConstantIfFolded(TII, *UseMI, OpNo, OpToFold)) {
        foldOperand(OpToFold, UseMI, OpNo, FoldList, CopiesToReplace);
      } else {
        if (++NumLiteralUses == 1) {
          NonInlineUse = &*Use;
          NonInlineUseOpNo = OpNo;
        }
      }
    }

    if (NumLiteralUses == 1) {
      MachineInstr *UseMI = NonInlineUse->getParent();
      foldOperand(OpToFold, UseMI, NonInlineUseOpNo, FoldList, CopiesToReplace);
    }
  } else {
    // Folding register.
    for (MachineRegisterInfo::use_iterator
           Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
         Use != E; ++Use) {
      MachineInstr *UseMI = Use->getParent();

      foldOperand(OpToFold, UseMI, Use.getOperandNo(),
                  FoldList, CopiesToReplace);
    }
  }

  MachineFunction *MF = MI.getParent()->getParent();
  // Make sure we add EXEC uses to any new v_mov instructions created.
  for (MachineInstr *Copy : CopiesToReplace)
    Copy->addImplicitDefUseOperands(*MF);

  for (FoldCandidate &Fold : FoldList) {
    if (updateOperand(Fold, *TRI)) {
      // Clear kill flags.
      if (Fold.isReg()) {
        assert(Fold.OpToFold && Fold.OpToFold->isReg());
        // FIXME: Probably shouldn't bother trying to fold if not an
        // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR
        // copies.
        MRI->clearKillFlags(Fold.OpToFold->getReg());
      }
      DEBUG(dbgs() << "Folded source from " << MI << " into OpNo " <<
            static_cast<int>(Fold.UseOpNo) << " of " << *Fold.UseMI << '\n');
      tryFoldInst(TII, Fold.UseMI);
    } else if (Fold.isCommuted()) {
      // Restoring instruction's original operand order if fold has failed.
      TII->commuteInstruction(*Fold.UseMI, false);
    }
  }
}
void GlobalModuleIndexBuilder::writeIndex(llvm::BitstreamWriter &Stream) {
    using namespace llvm;

    // Emit the file header.
    Stream.Emit((unsigned)'B', 8);
    Stream.Emit((unsigned)'C', 8);
    Stream.Emit((unsigned)'G', 8);
    Stream.Emit((unsigned)'I', 8);

    // Write the block-info block, which describes the records in this bitcode
    // file.
    emitBlockInfoBlock(Stream);

    Stream.EnterSubblock(GLOBAL_INDEX_BLOCK_ID, 3);

    // Write the metadata.
    SmallVector<uint64_t, 2> Record;
    Record.push_back(CurrentVersion);
    Stream.EmitRecord(INDEX_METADATA, Record);

    // Write the set of known module files.
    for (ModuleFilesMap::iterator M = ModuleFiles.begin(),
            MEnd = ModuleFiles.end();
            M != MEnd; ++M) {
        Record.clear();
        Record.push_back(M->second.ID);
        Record.push_back(M->first->getSize());
        Record.push_back(M->first->getModificationTime());

        // File name
        StringRef Name(M->first->getName());
        Record.push_back(Name.size());
        Record.append(Name.begin(), Name.end());

        // Dependencies
        Record.push_back(M->second.Dependencies.size());
        Record.append(M->second.Dependencies.begin(), M->second.Dependencies.end());
        Stream.EmitRecord(MODULE, Record);
    }

    // Write the identifier -> module file mapping.
    {
        OnDiskChainedHashTableGenerator<IdentifierIndexWriterTrait> Generator;
        IdentifierIndexWriterTrait Trait;

        // Populate the hash table.
        for (InterestingIdentifierMap::iterator I = InterestingIdentifiers.begin(),
                IEnd = InterestingIdentifiers.end();
                I != IEnd; ++I) {
            Generator.insert(I->first(), I->second, Trait);
        }

        // Create the on-disk hash table in a buffer.
        SmallString<4096> IdentifierTable;
        uint32_t BucketOffset;
        {
            llvm::raw_svector_ostream Out(IdentifierTable);
            // Make sure that no bucket is at offset 0
            clang::io::Emit32(Out, 0);
            BucketOffset = Generator.Emit(Out, Trait);
        }

        // Create a blob abbreviation
        BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
        Abbrev->Add(BitCodeAbbrevOp(IDENTIFIER_INDEX));
        Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
        Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
        unsigned IDTableAbbrev = Stream.EmitAbbrev(Abbrev);

        // Write the identifier table
        Record.clear();
        Record.push_back(IDENTIFIER_INDEX);
        Record.push_back(BucketOffset);
        Stream.EmitRecordWithBlob(IDTableAbbrev, Record, IdentifierTable.str());
    }

    Stream.ExitBlock();
}
Пример #26
0
/// DetermineInsertionPoint - At this point, we're committed to promoting the
/// alloca using IDF's, and the standard SSA construction algorithm.  Determine
/// which blocks need phi nodes and see if we can optimize out some work by
/// avoiding insertion of dead phi nodes.
void PromoteMem2Reg::DetermineInsertionPoint(AllocaInst *AI, unsigned AllocaNum,
                                             AllocaInfo &Info) {
  // Unique the set of defining blocks for efficient lookup.
  SmallPtrSet<BasicBlock*, 32> DefBlocks;
  DefBlocks.insert(Info.DefiningBlocks.begin(), Info.DefiningBlocks.end());

  // Determine which blocks the value is live in.  These are blocks which lead
  // to uses.
  SmallPtrSet<BasicBlock*, 32> LiveInBlocks;
  ComputeLiveInBlocks(AI, Info, DefBlocks, LiveInBlocks);

  // Use a priority queue keyed on dominator tree level so that inserted nodes
  // are handled from the bottom of the dominator tree upwards.
  typedef std::priority_queue<DomTreeNodePair, SmallVector<DomTreeNodePair, 32>,
                              DomTreeNodeCompare> IDFPriorityQueue;
  IDFPriorityQueue PQ;

  for (SmallPtrSet<BasicBlock*, 32>::const_iterator I = DefBlocks.begin(),
       E = DefBlocks.end(); I != E; ++I) {
    if (DomTreeNode *Node = DT.getNode(*I))
      PQ.push(std::make_pair(Node, DomLevels[Node]));
  }

  SmallVector<std::pair<unsigned, BasicBlock*>, 32> DFBlocks;
  SmallPtrSet<DomTreeNode*, 32> Visited;
  SmallVector<DomTreeNode*, 32> Worklist;
  while (!PQ.empty()) {
    DomTreeNodePair RootPair = PQ.top();
    PQ.pop();
    DomTreeNode *Root = RootPair.first;
    unsigned RootLevel = RootPair.second;

    // Walk all dominator tree children of Root, inspecting their CFG edges with
    // targets elsewhere on the dominator tree. Only targets whose level is at
    // most Root's level are added to the iterated dominance frontier of the
    // definition set.

    Worklist.clear();
    Worklist.push_back(Root);

    while (!Worklist.empty()) {
      DomTreeNode *Node = Worklist.pop_back_val();
      BasicBlock *BB = Node->getBlock();

      for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB); SI != SE;
           ++SI) {
        DomTreeNode *SuccNode = DT.getNode(*SI);

        // Quickly skip all CFG edges that are also dominator tree edges instead
        // of catching them below.
        if (SuccNode->getIDom() == Node)
          continue;

        unsigned SuccLevel = DomLevels[SuccNode];
        if (SuccLevel > RootLevel)
          continue;

        if (!Visited.insert(SuccNode))
          continue;

        BasicBlock *SuccBB = SuccNode->getBlock();
        if (!LiveInBlocks.count(SuccBB))
          continue;

        DFBlocks.push_back(std::make_pair(BBNumbers[SuccBB], SuccBB));
        if (!DefBlocks.count(SuccBB))
          PQ.push(std::make_pair(SuccNode, SuccLevel));
      }

      for (DomTreeNode::iterator CI = Node->begin(), CE = Node->end(); CI != CE;
           ++CI) {
        if (!Visited.count(*CI))
          Worklist.push_back(*CI);
      }
    }
  }

  if (DFBlocks.size() > 1)
    std::sort(DFBlocks.begin(), DFBlocks.end());

  unsigned CurrentVersion = 0;
  for (unsigned i = 0, e = DFBlocks.size(); i != e; ++i)
    QueuePhiNode(DFBlocks[i].second, AllocaNum, CurrentVersion);
}
Пример #27
0
/// ParseBlock - Read a block, updating statistics, etc.
static bool ParseBlock(BitstreamCursor &Stream, unsigned IndentLevel) {
    std::string Indent(IndentLevel*2, ' ');
    uint64_t BlockBitStart = Stream.GetCurrentBitNo();
    unsigned BlockID = Stream.ReadSubBlockID();

    // Get the statistics for this BlockID.
    PerBlockIDStats &BlockStats = BlockIDStats[BlockID];

    BlockStats.NumInstances++;

    // BLOCKINFO is a special part of the stream.
    if (BlockID == bitc::BLOCKINFO_BLOCK_ID) {
        if (Dump) outs() << Indent << "<BLOCKINFO_BLOCK/>\n";
        if (Stream.ReadBlockInfoBlock())
            return Error("Malformed BlockInfoBlock");
        uint64_t BlockBitEnd = Stream.GetCurrentBitNo();
        BlockStats.NumBits += BlockBitEnd-BlockBitStart;
        return false;
    }

    unsigned NumWords = 0;
    if (Stream.EnterSubBlock(BlockID, &NumWords))
        return Error("Malformed block record");

    const char *BlockName = 0;
    if (Dump) {
        outs() << Indent << "<";
        if ((BlockName = GetBlockName(BlockID, *Stream.getBitStreamReader())))
            outs() << BlockName;
        else
            outs() << "UnknownBlock" << BlockID;

        if (NonSymbolic && BlockName)
            outs() << " BlockID=" << BlockID;

        outs() << " NumWords=" << NumWords
               << " BlockCodeSize=" << Stream.GetAbbrevIDWidth() << ">\n";
    }

    SmallVector<uint64_t, 64> Record;

    // Read all the records for this block.
    while (1) {
        if (Stream.AtEndOfStream())
            return Error("Premature end of bitstream");

        uint64_t RecordStartBit = Stream.GetCurrentBitNo();

        // Read the code for this record.
        unsigned AbbrevID = Stream.ReadCode();
        switch (AbbrevID) {
        case bitc::END_BLOCK: {
            if (Stream.ReadBlockEnd())
                return Error("Error at end of block");
            uint64_t BlockBitEnd = Stream.GetCurrentBitNo();
            BlockStats.NumBits += BlockBitEnd-BlockBitStart;
            if (Dump) {
                outs() << Indent << "</";
                if (BlockName)
                    outs() << BlockName << ">\n";
                else
                    outs() << "UnknownBlock" << BlockID << ">\n";
            }
            return false;
        }
        case bitc::ENTER_SUBBLOCK: {
            uint64_t SubBlockBitStart = Stream.GetCurrentBitNo();
            if (ParseBlock(Stream, IndentLevel+1))
                return true;
            ++BlockStats.NumSubBlocks;
            uint64_t SubBlockBitEnd = Stream.GetCurrentBitNo();

            // Don't include subblock sizes in the size of this block.
            BlockBitStart += SubBlockBitEnd-SubBlockBitStart;
            break;
        }
        case bitc::DEFINE_ABBREV:
            Stream.ReadAbbrevRecord();
            ++BlockStats.NumAbbrevs;
            break;
        default:
            Record.clear();

            ++BlockStats.NumRecords;
            if (AbbrevID != bitc::UNABBREV_RECORD)
                ++BlockStats.NumAbbreviatedRecords;

            const char *BlobStart = 0;
            unsigned BlobLen = 0;
            unsigned Code = Stream.ReadRecord(AbbrevID, Record, BlobStart, BlobLen);



            // Increment the # occurrences of this code.
            if (BlockStats.CodeFreq.size() <= Code)
                BlockStats.CodeFreq.resize(Code+1);
            BlockStats.CodeFreq[Code].NumInstances++;
            BlockStats.CodeFreq[Code].TotalBits +=
                Stream.GetCurrentBitNo()-RecordStartBit;
            if (AbbrevID != bitc::UNABBREV_RECORD)
                BlockStats.CodeFreq[Code].NumAbbrev++;

            if (Dump) {
                outs() << Indent << "  <";
                if (const char *CodeName =
                            GetCodeName(Code, BlockID, *Stream.getBitStreamReader()))
                    outs() << CodeName;
                else
                    outs() << "UnknownCode" << Code;
                if (NonSymbolic &&
                        GetCodeName(Code, BlockID, *Stream.getBitStreamReader()))
                    outs() << " codeid=" << Code;
                if (AbbrevID != bitc::UNABBREV_RECORD)
                    outs() << " abbrevid=" << AbbrevID;

                for (unsigned i = 0, e = Record.size(); i != e; ++i)
                    outs() << " op" << i << "=" << (int64_t)Record[i];

                outs() << "/>";

                if (BlobStart) {
                    outs() << " blob data = ";
                    bool BlobIsPrintable = true;
                    for (unsigned i = 0; i != BlobLen; ++i)
                        if (!isprint(BlobStart[i])) {
                            BlobIsPrintable = false;
                            break;
                        }

                    if (BlobIsPrintable)
                        outs() << "'" << std::string(BlobStart, BlobStart+BlobLen) <<"'";
                    else
                        outs() << "unprintable, " << BlobLen << " bytes.";
                }

                outs() << "\n";
            }

            break;
        }
    }
}
Пример #28
0
static int compileModule(char **argv, LLVMContext &Context) {
  // Load the module to be compiled...
  SMDiagnostic Err;
  std::unique_ptr<Module> M;
  std::unique_ptr<MIRParser> MIR;
  Triple TheTriple;

  bool SkipModule = MCPU == "help" ||
                    (!MAttrs.empty() && MAttrs.front() == "help");

  // If user just wants to list available options, skip module loading
  if (!SkipModule) {
    if (StringRef(InputFilename).endswith_lower(".mir")) {
      MIR = createMIRParserFromFile(InputFilename, Err, Context);
      if (MIR)
        M = MIR->parseLLVMModule();
    } else
      M = parseIRFile(InputFilename, Err, Context);
    if (!M) {
      Err.print(argv[0], errs());
      return 1;
    }

    // Verify module immediately to catch problems before doInitialization() is
    // called on any passes.
    if (!NoVerify && verifyModule(*M, &errs())) {
      errs() << argv[0] << ": " << InputFilename
             << ": error: input module is broken!\n";
      return 1;
    }

    // If we are supposed to override the target triple, do so now.
    if (!TargetTriple.empty())
      M->setTargetTriple(Triple::normalize(TargetTriple));
    TheTriple = Triple(M->getTargetTriple());
  } else {
    TheTriple = Triple(Triple::normalize(TargetTriple));
  }

  if (TheTriple.getTriple().empty())
    TheTriple.setTriple(sys::getDefaultTargetTriple());

  // Get the target specific parser.
  std::string Error;
  const Target *TheTarget = TargetRegistry::lookupTarget(MArch, TheTriple,
                                                         Error);
  if (!TheTarget) {
    errs() << argv[0] << ": " << Error;
    return 1;
  }

  std::string CPUStr = getCPUStr(), FeaturesStr = getFeaturesStr();

  CodeGenOpt::Level OLvl = CodeGenOpt::Default;
  switch (OptLevel) {
  default:
    errs() << argv[0] << ": invalid optimization level.\n";
    return 1;
  case ' ': break;
  case '0': OLvl = CodeGenOpt::None; break;
  case '1': OLvl = CodeGenOpt::Less; break;
  case '2': OLvl = CodeGenOpt::Default; break;
  case '3': OLvl = CodeGenOpt::Aggressive; break;
  }

  TargetOptions Options = InitTargetOptionsFromCodeGenFlags();
  Options.DisableIntegratedAS = NoIntegratedAssembler;
  Options.MCOptions.ShowMCEncoding = ShowMCEncoding;
  Options.MCOptions.MCUseDwarfDirectory = EnableDwarfDirectory;
  Options.MCOptions.AsmVerbose = AsmVerbose;
  Options.MCOptions.PreserveAsmComments = PreserveComments;
  Options.MCOptions.IASSearchPaths = IncludeDirs;
  Options.MCOptions.SplitDwarfFile = SplitDwarfFile;

  std::unique_ptr<TargetMachine> Target(
      TheTarget->createTargetMachine(TheTriple.getTriple(), CPUStr, FeaturesStr,
                                     Options, getRelocModel(), CMModel, OLvl));

  assert(Target && "Could not allocate target machine!");

  // If we don't have a module then just exit now. We do this down
  // here since the CPU/Feature help is underneath the target machine
  // creation.
  if (SkipModule)
    return 0;

  assert(M && "Should have exited if we didn't have a module!");
  if (FloatABIForCalls != FloatABI::Default)
    Options.FloatABIType = FloatABIForCalls;

  // Figure out where we are going to send the output.
  std::unique_ptr<tool_output_file> Out =
      GetOutputStream(TheTarget->getName(), TheTriple.getOS(), argv[0]);
  if (!Out) return 1;

  // Build up all of the passes that we want to do to the module.
  legacy::PassManager PM;

  // Add an appropriate TargetLibraryInfo pass for the module's triple.
  TargetLibraryInfoImpl TLII(Triple(M->getTargetTriple()));

  // The -disable-simplify-libcalls flag actually disables all builtin optzns.
  if (DisableSimplifyLibCalls)
    TLII.disableAllFunctions();
  PM.add(new TargetLibraryInfoWrapperPass(TLII));

  // Add the target data from the target machine, if it exists, or the module.
  M->setDataLayout(Target->createDataLayout());

  // Override function attributes based on CPUStr, FeaturesStr, and command line
  // flags.
  setFunctionAttributes(CPUStr, FeaturesStr, *M);

  if (RelaxAll.getNumOccurrences() > 0 &&
      FileType != TargetMachine::CGFT_ObjectFile)
    errs() << argv[0]
             << ": warning: ignoring -mc-relax-all because filetype != obj";

  {
    raw_pwrite_stream *OS = &Out->os();

    // Manually do the buffering rather than using buffer_ostream,
    // so we can memcmp the contents in CompileTwice mode
    SmallVector<char, 0> Buffer;
    std::unique_ptr<raw_svector_ostream> BOS;
    if ((FileType != TargetMachine::CGFT_AssemblyFile &&
         !Out->os().supportsSeeking()) ||
        CompileTwice) {
      BOS = make_unique<raw_svector_ostream>(Buffer);
      OS = BOS.get();
    }

    if (!RunPassNames->empty()) {
      if (!StartAfter.empty() || !StopAfter.empty() || !StartBefore.empty() ||
          !StopBefore.empty()) {
        errs() << argv[0] << ": start-after and/or stop-after passes are "
                             "redundant when run-pass is specified.\n";
        return 1;
      }
      if (!MIR) {
        errs() << argv[0] << ": run-pass needs a .mir input.\n";
        return 1;
      }
      LLVMTargetMachine &LLVMTM = static_cast<LLVMTargetMachine&>(*Target);
      TargetPassConfig &TPC = *LLVMTM.createPassConfig(PM);
      PM.add(&TPC);
      MachineModuleInfo *MMI = new MachineModuleInfo(&LLVMTM);
      MMI->setMachineFunctionInitializer(MIR.get());
      PM.add(MMI);
      TPC.printAndVerify("");

      for (const std::string &RunPassName : *RunPassNames) {
        if (addPass(PM, argv[0], RunPassName, TPC))
          return 1;
      }
      PM.add(createPrintMIRPass(*OS));
    } else {
      const char *argv0 = argv[0];
      AnalysisID StartBeforeID = getPassID(argv0, "start-before", StartBefore);
      AnalysisID StartAfterID = getPassID(argv0, "start-after", StartAfter);
      AnalysisID StopAfterID = getPassID(argv0, "stop-after", StopAfter);
      AnalysisID StopBeforeID = getPassID(argv0, "stop-before", StopBefore);

      if (StartBeforeID && StartAfterID) {
        errs() << argv[0] << ": -start-before and -start-after specified!\n";
        return 1;
      }
      if (StopBeforeID && StopAfterID) {
        errs() << argv[0] << ": -stop-before and -stop-after specified!\n";
        return 1;
      }

      // Ask the target to add backend passes as necessary.
      if (Target->addPassesToEmitFile(PM, *OS, FileType, NoVerify,
                                      StartBeforeID, StartAfterID, StopBeforeID,
                                      StopAfterID, MIR.get())) {
        errs() << argv[0] << ": target does not support generation of this"
               << " file type!\n";
        return 1;
      }
    }

    // Before executing passes, print the final values of the LLVM options.
    cl::PrintOptionValues();

    // If requested, run the pass manager over the same module again,
    // to catch any bugs due to persistent state in the passes. Note that
    // opt has the same functionality, so it may be worth abstracting this out
    // in the future.
    SmallVector<char, 0> CompileTwiceBuffer;
    if (CompileTwice) {
      std::unique_ptr<Module> M2(llvm::CloneModule(M.get()));
      PM.run(*M2);
      CompileTwiceBuffer = Buffer;
      Buffer.clear();
    }

    PM.run(*M);

    auto HasError = *static_cast<bool *>(Context.getDiagnosticContext());
    if (HasError)
      return 1;

    // Compare the two outputs and make sure they're the same
    if (CompileTwice) {
      if (Buffer.size() != CompileTwiceBuffer.size() ||
          (memcmp(Buffer.data(), CompileTwiceBuffer.data(), Buffer.size()) !=
           0)) {
        errs()
            << "Running the pass manager twice changed the output.\n"
               "Writing the result of the second run to the specified output\n"
               "To generate the one-run comparison binary, just run without\n"
               "the compile-twice option\n";
        Out->os() << Buffer;
        Out->keep();
        return 1;
      }
    }

    if (BOS) {
      Out->os() << Buffer;
    }
  }

  // Declare success.
  Out->keep();

  return 0;
}
Пример #29
0
ValueEnumerator::ValueEnumerator(const Module &M,
                                 bool ShouldPreserveUseListOrder)
    : ShouldPreserveUseListOrder(ShouldPreserveUseListOrder) {
  if (ShouldPreserveUseListOrder)
    UseListOrders = predictUseListOrder(M);

  // Enumerate the global variables.
  for (const GlobalVariable &GV : M.globals())
    EnumerateValue(&GV);

  // Enumerate the functions.
  for (const Function & F : M) {
    EnumerateValue(&F);
    EnumerateAttributes(F.getAttributes());
  }

  // Enumerate the aliases.
  for (const GlobalAlias &GA : M.aliases())
    EnumerateValue(&GA);

  // Enumerate the ifuncs.
  for (const GlobalIFunc &GIF : M.ifuncs())
    EnumerateValue(&GIF);

  // Remember what is the cutoff between globalvalue's and other constants.
  unsigned FirstConstant = Values.size();

  // Enumerate the global variable initializers.
  for (const GlobalVariable &GV : M.globals())
    if (GV.hasInitializer())
      EnumerateValue(GV.getInitializer());

  // Enumerate the aliasees.
  for (const GlobalAlias &GA : M.aliases())
    EnumerateValue(GA.getAliasee());

  // Enumerate the ifunc resolvers.
  for (const GlobalIFunc &GIF : M.ifuncs())
    EnumerateValue(GIF.getResolver());

  // Enumerate any optional Function data.
  for (const Function &F : M)
    for (const Use &U : F.operands())
      EnumerateValue(U.get());

  // Enumerate the metadata type.
  //
  // TODO: Move this to ValueEnumerator::EnumerateOperandType() once bitcode
  // only encodes the metadata type when it's used as a value.
  EnumerateType(Type::getMetadataTy(M.getContext()));

  // Insert constants and metadata that are named at module level into the slot
  // pool so that the module symbol table can refer to them...
  EnumerateValueSymbolTable(M.getValueSymbolTable());
  EnumerateNamedMetadata(M);

  SmallVector<std::pair<unsigned, MDNode *>, 8> MDs;
  for (const GlobalVariable &GV : M.globals()) {
    MDs.clear();
    GV.getAllMetadata(MDs);
    for (const auto &I : MDs)
      // FIXME: Pass GV to EnumerateMetadata and arrange for the bitcode writer
      // to write metadata to the global variable's own metadata block
      // (PR28134).
      EnumerateMetadata(nullptr, I.second);
  }

  // Enumerate types used by function bodies and argument lists.
  for (const Function &F : M) {
    for (const Argument &A : F.args())
      EnumerateType(A.getType());

    // Enumerate metadata attached to this function.
    MDs.clear();
    F.getAllMetadata(MDs);
    for (const auto &I : MDs)
      EnumerateMetadata(F.isDeclaration() ? nullptr : &F, I.second);

    for (const BasicBlock &BB : F)
      for (const Instruction &I : BB) {
        for (const Use &Op : I.operands()) {
          auto *MD = dyn_cast<MetadataAsValue>(&Op);
          if (!MD) {
            EnumerateOperandType(Op);
            continue;
          }

          // Local metadata is enumerated during function-incorporation.
          if (isa<LocalAsMetadata>(MD->getMetadata()))
            continue;

          EnumerateMetadata(&F, MD->getMetadata());
        }
        EnumerateType(I.getType());
        if (const CallInst *CI = dyn_cast<CallInst>(&I))
          EnumerateAttributes(CI->getAttributes());
        else if (const InvokeInst *II = dyn_cast<InvokeInst>(&I))
          EnumerateAttributes(II->getAttributes());

        // Enumerate metadata attached with this instruction.
        MDs.clear();
        I.getAllMetadataOtherThanDebugLoc(MDs);
        for (unsigned i = 0, e = MDs.size(); i != e; ++i)
          EnumerateMetadata(&F, MDs[i].second);

        // Don't enumerate the location directly -- it has a special record
        // type -- but enumerate its operands.
        if (DILocation *L = I.getDebugLoc())
          for (const Metadata *Op : L->operands())
            EnumerateMetadata(&F, Op);
      }
  }

  // Optimize constant ordering.
  OptimizeConstants(FirstConstant, Values.size());

  // Organize metadata ordering.
  organizeMetadata();
}
Пример #30
0
void
CodeGenModule::EmitCXXGlobalInitFunc() {
  while (!CXXGlobalInits.empty() && !CXXGlobalInits.back())
    CXXGlobalInits.pop_back();

  if (CXXGlobalInits.empty() && PrioritizedCXXGlobalInits.empty())
    return;

  llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);


  // Create our global initialization function.
  if (!PrioritizedCXXGlobalInits.empty()) {
    SmallVector<llvm::Function *, 8> LocalCXXGlobalInits;
    llvm::array_pod_sort(PrioritizedCXXGlobalInits.begin(), 
                         PrioritizedCXXGlobalInits.end());
    // Iterate over "chunks" of ctors with same priority and emit each chunk
    // into separate function. Note - everything is sorted first by priority,
    // second - by lex order, so we emit ctor functions in proper order.
    for (SmallVectorImpl<GlobalInitData >::iterator
           I = PrioritizedCXXGlobalInits.begin(),
           E = PrioritizedCXXGlobalInits.end(); I != E; ) {
      SmallVectorImpl<GlobalInitData >::iterator
        PrioE = std::upper_bound(I + 1, E, *I, GlobalInitPriorityCmp());

      LocalCXXGlobalInits.clear();
      unsigned Priority = I->first.priority;
      // Compute the function suffix from priority. Prepend with zeroes to make
      // sure the function names are also ordered as priorities.
      std::string PrioritySuffix = llvm::utostr(Priority);
      // Priority is always <= 65535 (enforced by sema).
      PrioritySuffix = std::string(6-PrioritySuffix.size(), '0')+PrioritySuffix;
      llvm::Function *Fn = CreateGlobalInitOrDestructFunction(
          FTy, "_GLOBAL__I_" + PrioritySuffix);

      for (; I < PrioE; ++I)
        LocalCXXGlobalInits.push_back(I->second);

      CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, LocalCXXGlobalInits);
      AddGlobalCtor(Fn, Priority);
    }
  }

  SmallString<128> FileName;
  SourceManager &SM = Context.getSourceManager();
  if (const FileEntry *MainFile = SM.getFileEntryForID(SM.getMainFileID())) {
    // Include the filename in the symbol name. Including "sub_" matches gcc and
    // makes sure these symbols appear lexicographically behind the symbols with
    // priority emitted above.
    FileName = llvm::sys::path::filename(MainFile->getName());
  } else {
    FileName = SmallString<128>("<null>");
  }

  for (size_t i = 0; i < FileName.size(); ++i) {
    // Replace everything that's not [a-zA-Z0-9._] with a _. This set happens
    // to be the set of C preprocessing numbers.
    if (!isPreprocessingNumberBody(FileName[i]))
      FileName[i] = '_';
  }

  llvm::Function *Fn = CreateGlobalInitOrDestructFunction(
      FTy, llvm::Twine("_GLOBAL__sub_I_", FileName));

  CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, CXXGlobalInits);
  AddGlobalCtor(Fn);

  CXXGlobalInits.clear();
  PrioritizedCXXGlobalInits.clear();
}