Ejemplo n.º 1
0
// Check if a call and subsequent A2_tfrpi instructions should maintain
// scheduling affinity. We are looking for the TFRI to be consumed in
// the next instruction. This should help reduce the instances of
// double register pairs being allocated and scheduled before a call
// when not used until after the call. This situation is exacerbated
// by the fact that we allocate the pair from the callee saves list,
// leading to excess spills and restores.
bool HexagonCallMutation::shouldTFRICallBind(const HexagonInstrInfo &HII,
      const SUnit &Inst1, const SUnit &Inst2) const {
  if (Inst1.getInstr()->getOpcode() != Hexagon::A2_tfrpi)
    return false;

  // TypeXTYPE are 64 bit operations.
  if (HII.getType(Inst2.getInstr()) == HexagonII::TypeXTYPE)
    return true;
  return false;
}
Ejemplo n.º 2
0
/// MO is an operand of SU's instruction that defines a physical register. Add
/// data dependencies from SU to any uses of the physical register.
void ScheduleDAGInstrs::addPhysRegDataDeps(SUnit *SU, unsigned OperIdx) {
  const MachineOperand &MO = SU->getInstr()->getOperand(OperIdx);
  assert(MO.isDef() && "expect physreg def");

  // Ask the target if address-backscheduling is desirable, and if so how much.
  const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>();

  for (MCRegAliasIterator Alias(MO.getReg(), TRI, true);
       Alias.isValid(); ++Alias) {
    if (!Uses.contains(*Alias))
      continue;
    std::vector<PhysRegSUOper> &UseList = Uses[*Alias];
    for (unsigned i = 0, e = UseList.size(); i != e; ++i) {
      SUnit *UseSU = UseList[i].SU;
      if (UseSU == SU)
        continue;

      SDep dep(SU, SDep::Data, 1, *Alias);

      // Adjust the dependence latency using operand def/use information,
      // then allow the target to perform its own adjustments.
      int UseOp = UseList[i].OpIdx;
      MachineInstr *RegUse = UseOp < 0 ? 0 : UseSU->getInstr();
      dep.setLatency(
        SchedModel.computeOperandLatency(SU->getInstr(), OperIdx,
                                         RegUse, UseOp, /*FindMin=*/false));
      dep.setMinLatency(
        SchedModel.computeOperandLatency(SU->getInstr(), OperIdx,
                                         RegUse, UseOp, /*FindMin=*/true));

      ST.adjustSchedDependency(SU, UseSU, dep);
      UseSU->addPred(dep);
    }
  }
}
Ejemplo n.º 3
0
/// addVRegDefDeps - Add register output and data dependencies from this SUnit
/// to instructions that occur later in the same scheduling region if they read
/// from or write to the virtual register defined at OperIdx.
///
/// TODO: Hoist loop induction variable increments. This has to be
/// reevaluated. Generally, IV scheduling should be done before coalescing.
void ScheduleDAGInstrs::addVRegDefDeps(SUnit *SU, unsigned OperIdx) {
  const MachineInstr *MI = SU->getInstr();
  unsigned Reg = MI->getOperand(OperIdx).getReg();

  // Singly defined vregs do not have output/anti dependencies.
  // The current operand is a def, so we have at least one.
  // Check here if there are any others...
  if (MRI.hasOneDef(Reg))
    return;

  // Add output dependence to the next nearest def of this vreg.
  //
  // Unless this definition is dead, the output dependence should be
  // transitively redundant with antidependencies from this definition's
  // uses. We're conservative for now until we have a way to guarantee the uses
  // are not eliminated sometime during scheduling. The output dependence edge
  // is also useful if output latency exceeds def-use latency.
  VReg2SUnitMap::iterator DefI = VRegDefs.find(Reg);
  if (DefI == VRegDefs.end())
    VRegDefs.insert(VReg2SUnit(Reg, SU));
  else {
    SUnit *DefSU = DefI->SU;
    if (DefSU != SU && DefSU != &ExitSU) {
      unsigned OutLatency = TII->getOutputLatency(InstrItins, MI, OperIdx,
                                                  DefSU->getInstr());
      DefSU->addPred(SDep(SU, SDep::Output, OutLatency, Reg));
    }
    DefI->SU = SU;
  }
}
Ejemplo n.º 4
0
/// Select a bundle for the current cycle. The selected instructions are
/// put into bundle in the correct issue order. If no instruction can be
/// issued, false is returned.
bool PatmosLatencyQueue::selectBundle(std::vector<SUnit*> &Bundle)
{
  if (AvailableQueue.empty()) return false;

  // Find best bundle:
  // - Ensure that instructions that MUST be scheduled go into the bundle.
  // - find best pair of available programs, e.g. two stores with exclusive
  //   predicates and highest ILP/.., but only if at least one of those instr.
  //   has high priority.
  // - find best instructions that fit into the bundle with highest ILP/..
  //
  // Instructions are built up into a bundle in Bundle. Instructions are removed
  // from AvailableQueue in scheduled() once the instruction is actually picked.

  unsigned CurrWidth = 0;
  // If the bundle is not empty, we should calculate the initial width
  assert(Bundle.empty());

  std::vector<bool> Selected;
  Selected.resize(AvailableQueue.size());

  // Make sure that all instructions with ScheduleLow flag go into the bundle.
  for (unsigned i = 0; i < AvailableQueue.size() && CurrWidth < IssueWidth; i++)
  {
    SUnit *SU = AvailableQueue[i];
    if (!SU->isScheduleLow) break;

    if (addToBundle(Bundle, SU, CurrWidth)) {
      Selected[i] = true;
    }
  }

  // Check if any of the highest <IssueWidth> instructions can be
  // scheduled only with a single other instruction in this queue, or if there
  // is any instruction in the queue that can only be scheduled with the highest
  // ones. Pick them in any case



  // TODO magic goes here..



  // Try to fill up the bundle with instructions from the queue by best effort
  for (unsigned i = 0; i < AvailableQueue.size() && CurrWidth < IssueWidth; i++)
  {
    if (Selected[i]) continue;
    SUnit *SU = AvailableQueue[i];

    // check the width. ignore the width for the first instruction to allow
    // ALUl even when bundling is disabled.
    unsigned width = PII.getIssueWidth(SU->getInstr());
    if (!Bundle.empty() && CurrWidth + width > IssueWidth) continue;

    addToBundle(Bundle, SU, CurrWidth);
  }

  return true;
}
Ejemplo n.º 5
0
/// MO is an operand of SU's instruction that defines a physical register. Add
/// data dependencies from SU to any uses of the physical register.
void ScheduleDAGInstrs::addPhysRegDataDeps(SUnit *SU, unsigned OperIdx) {
  const MachineOperand &MO = SU->getInstr()->getOperand(OperIdx);
  assert(MO.isDef() && "expect physreg def");

  // Ask the target if address-backscheduling is desirable, and if so how much.
  const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>();
  unsigned SpecialAddressLatency = ST.getSpecialAddressLatency();
  unsigned DataLatency = SU->Latency;

  for (MCRegAliasIterator Alias(MO.getReg(), TRI, true);
       Alias.isValid(); ++Alias) {
    if (!Uses.contains(*Alias))
      continue;
    std::vector<PhysRegSUOper> &UseList = Uses[*Alias];
    for (unsigned i = 0, e = UseList.size(); i != e; ++i) {
      SUnit *UseSU = UseList[i].SU;
      if (UseSU == SU)
        continue;
      MachineInstr *UseMI = UseSU->getInstr();
      int UseOp = UseList[i].OpIdx;
      unsigned LDataLatency = DataLatency;
      // Optionally add in a special extra latency for nodes that
      // feed addresses.
      // TODO: Perhaps we should get rid of
      // SpecialAddressLatency and just move this into
      // adjustSchedDependency for the targets that care about it.
      if (SpecialAddressLatency != 0 && !UnitLatencies &&
          UseSU != &ExitSU) {
        const MCInstrDesc &UseMCID = UseMI->getDesc();
        int RegUseIndex = UseMI->findRegisterUseOperandIdx(*Alias);
        assert(RegUseIndex >= 0 && "UseMI doesn't use register!");
        if (RegUseIndex >= 0 &&
            (UseMI->mayLoad() || UseMI->mayStore()) &&
            (unsigned)RegUseIndex < UseMCID.getNumOperands() &&
            UseMCID.OpInfo[RegUseIndex].isLookupPtrRegClass())
          LDataLatency += SpecialAddressLatency;
      }
      // Adjust the dependence latency using operand def/use
      // information (if any), and then allow the target to
      // perform its own adjustments.
      SDep dep(SU, SDep::Data, LDataLatency, *Alias);
      if (!UnitLatencies) {
        unsigned Latency =
          TII->computeOperandLatency(InstrItins, SU->getInstr(), OperIdx,
                                     (UseOp < 0 ? 0 : UseMI), UseOp);
        dep.setLatency(Latency);
        unsigned MinLatency =
          TII->computeOperandLatency(InstrItins, SU->getInstr(), OperIdx,
                                     (UseOp < 0 ? 0 : UseMI), UseOp,
                                     /*FindMin=*/true);
        dep.setMinLatency(MinLatency);

        ST.adjustSchedDependency(SU, UseSU, dep);
      }
      UseSU->addPred(dep);
    }
  }
}
Ejemplo n.º 6
0
/// Change the latency between the two SUnits.
void HexagonSubtarget::changeLatency(SUnit *Src, SmallVector<SDep, 4> &Deps,
      SUnit *Dst, unsigned Lat) const {
  MachineInstr &SrcI = *Src->getInstr();
  for (auto &I : Deps) {
    if (I.getSUnit() != Dst)
      continue;
    I.setLatency(Lat);
    SUnit *UpdateDst = I.getSUnit();
    updateLatency(SrcI, *UpdateDst->getInstr(), I);
    // Update the latency of opposite edge too.
    for (auto &PI : UpdateDst->Preds) {
      if (PI.getSUnit() != Src || !PI.isAssignedRegDep())
        continue;
      PI.setLatency(Lat);
      updateLatency(SrcI, *UpdateDst->getInstr(), PI);
    }
  }
}
Ejemplo n.º 7
0
/// MO is an operand of SU's instruction that defines a physical register. Add
/// data dependencies from SU to any uses of the physical register.
void ScheduleDAGInstrs::addPhysRegDataDeps(SUnit *SU,
                                           const MachineOperand &MO) {
  assert(MO.isDef() && "expect physreg def");

  // Ask the target if address-backscheduling is desirable, and if so how much.
  const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>();
  unsigned SpecialAddressLatency = ST.getSpecialAddressLatency();
  unsigned DataLatency = SU->Latency;

  for (const unsigned *Alias = TRI->getOverlaps(MO.getReg()); *Alias; ++Alias) {
    if (!Uses.contains(*Alias))
      continue;
    std::vector<SUnit*> &UseList = Uses[*Alias];
    for (unsigned i = 0, e = UseList.size(); i != e; ++i) {
      SUnit *UseSU = UseList[i];
      if (UseSU == SU)
        continue;
      unsigned LDataLatency = DataLatency;
      // Optionally add in a special extra latency for nodes that
      // feed addresses.
      // TODO: Perhaps we should get rid of
      // SpecialAddressLatency and just move this into
      // adjustSchedDependency for the targets that care about it.
      if (SpecialAddressLatency != 0 && !UnitLatencies &&
          UseSU != &ExitSU) {
        MachineInstr *UseMI = UseSU->getInstr();
        const MCInstrDesc &UseMCID = UseMI->getDesc();
        int RegUseIndex = UseMI->findRegisterUseOperandIdx(*Alias);
        assert(RegUseIndex >= 0 && "UseMI doesn't use register!");
        if (RegUseIndex >= 0 &&
            (UseMI->mayLoad() || UseMI->mayStore()) &&
            (unsigned)RegUseIndex < UseMCID.getNumOperands() &&
            UseMCID.OpInfo[RegUseIndex].isLookupPtrRegClass())
          LDataLatency += SpecialAddressLatency;
      }
      // Adjust the dependence latency using operand def/use
      // information (if any), and then allow the target to
      // perform its own adjustments.
      const SDep& dep = SDep(SU, SDep::Data, LDataLatency, *Alias);
      if (!UnitLatencies) {
        ComputeOperandLatency(SU, UseSU, const_cast<SDep &>(dep));
        ST.adjustSchedDependency(SU, UseSU, const_cast<SDep &>(dep));
      }
      UseSU->addPred(dep);
    }
  }
}
Ejemplo n.º 8
0
// EmitSchedule - Emit the machine code in scheduled order.
MachineBasicBlock *ScheduleDAGInstrs::EmitSchedule() {
  // For MachineInstr-based scheduling, we're rescheduling the instructions in
  // the block, so start by removing them from the block.
  while (Begin != InsertPos) {
    MachineBasicBlock::iterator I = Begin;
    ++Begin;
    BB->remove(I);
  }

  // First reinsert any remaining debug_values; these are either constants,
  // or refer to live-in registers.  The beginning of the block is the right
  // place for the latter.  The former might reasonably be placed elsewhere
  // using some kind of ordering algorithm, but right now it doesn't matter.
  for (int i = DbgValueVec.size()-1; i>=0; --i)
    if (DbgValueVec[i])
      BB->insert(InsertPos, DbgValueVec[i]);

  // Then re-insert them according to the given schedule.
  for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
    SUnit *SU = Sequence[i];
    if (!SU) {
      // Null SUnit* is a noop.
      EmitNoop();
      continue;
    }

    BB->insert(InsertPos, SU->getInstr());
    for (unsigned i = 0, e = SU->DbgInstrList.size() ; i < e ; ++i)
      BB->insert(InsertPos, SU->DbgInstrList[i]);
  }

  // Update the Begin iterator, as the first instruction in the block
  // may have been scheduled later.
  if (!DbgValueVec.empty()) {
    for (int i = DbgValueVec.size()-1; i>=0; --i)
      if (DbgValueVec[i]!=0) {
        Begin = DbgValueVec[DbgValueVec.size()-1];
        break;
      }
  } else if (!Sequence.empty())
    Begin = Sequence[0]->getInstr();

  DbgValueVec.clear();
  return BB;
}
Ejemplo n.º 9
0
  void apply(ScheduleDAGInstrs *DAGInstrs) override {
    ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs);

    SUnit *SUa = nullptr;
    // Search for two consequent memory operations and link them
    // to prevent scheduler from moving them apart.
    // In DAG pre-process SUnits are in the original order of
    // the instructions before scheduling.
    for (SUnit &SU : DAG->SUnits) {
      MachineInstr &MI2 = *SU.getInstr();
      if (!MI2.mayLoad() && !MI2.mayStore()) {
        SUa = nullptr;
        continue;
      }
      if (!SUa) {
        SUa = &SU;
        continue;
      }

      MachineInstr &MI1 = *SUa->getInstr();
      if ((TII->isVMEM(MI1) && TII->isVMEM(MI2)) ||
          (TII->isFLAT(MI1) && TII->isFLAT(MI2)) ||
          (TII->isSMRD(MI1) && TII->isSMRD(MI2)) ||
          (TII->isDS(MI1)   && TII->isDS(MI2))) {
        SU.addPredBarrier(SUa);

        for (const SDep &SI : SU.Preds) {
          if (SI.getSUnit() != SUa)
            SUa->addPred(SDep(SI.getSUnit(), SDep::Artificial));
        }

        if (&SU != &DAG->ExitSU) {
          for (const SDep &SI : SUa->Succs) {
            if (SI.getSUnit() != &SU)
              SI.getSUnit()->addPred(SDep(&SU, SDep::Artificial));
          }
        }
      }

      SUa = &SU;
    }
  }
Ejemplo n.º 10
0
/// Create an SUnit for each real instruction, numbered in top-down toplological
/// order. The instruction order A < B, implies that no edge exists from B to A.
///
/// Map each real instruction to its SUnit.
///
/// After initSUnits, the SUnits vector cannot be resized and the scheduler may
/// hang onto SUnit pointers. We may relax this in the future by using SUnit IDs
/// instead of pointers.
///
/// MachineScheduler relies on initSUnits numbering the nodes by their order in
/// the original instruction list.
void ScheduleDAGInstrs::initSUnits() {
  // We'll be allocating one SUnit for each real instruction in the region,
  // which is contained within a basic block.
  SUnits.reserve(BB->size());

  for (MachineBasicBlock::iterator I = RegionBegin; I != RegionEnd; ++I) {
    MachineInstr *MI = I;
    if (MI->isDebugValue())
      continue;

    SUnit *SU = newSUnit(MI);
    MISUnitMap[MI] = SU;

    SU->isCall = MI->isCall();
    SU->isCommutable = MI->isCommutable();

    // Assign the Latency field of SU using target-provided information.
    SU->Latency = SchedModel.computeInstrLatency(SU->getInstr());
  }
}
Ejemplo n.º 11
0
unsigned CriticalAntiDepBreaker::
BreakAntiDependencies(std::vector<SUnit>& SUnits,
                      MachineBasicBlock::iterator& Begin,
                      MachineBasicBlock::iterator& End,
                      unsigned InsertPosIndex) {
  // The code below assumes that there is at least one instruction,
  // so just duck out immediately if the block is empty.
  if (SUnits.empty()) return 0;

  // Find the node at the bottom of the critical path.
  SUnit *Max = 0;
  for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
    SUnit *SU = &SUnits[i];
    if (!Max || SU->getDepth() + SU->Latency > Max->getDepth() + Max->Latency)
      Max = SU;
  }

#ifndef NDEBUG
  {
    DEBUG(errs() << "Critical path has total latency "
          << (Max->getDepth() + Max->Latency) << "\n");
    DEBUG(errs() << "Available regs:");
    for (unsigned Reg = 0; Reg < TRI->getNumRegs(); ++Reg) {
      if (KillIndices[Reg] == ~0u)
        DEBUG(errs() << " " << TRI->getName(Reg));
    }
    DEBUG(errs() << '\n');
  }
#endif

  // Track progress along the critical path through the SUnit graph as we walk
  // the instructions.
  SUnit *CriticalPathSU = Max;
  MachineInstr *CriticalPathMI = CriticalPathSU->getInstr();

  // Consider this pattern:
  //   A = ...
  //   ... = A
  //   A = ...
  //   ... = A
  //   A = ...
  //   ... = A
  //   A = ...
  //   ... = A
  // There are three anti-dependencies here, and without special care,
  // we'd break all of them using the same register:
  //   A = ...
  //   ... = A
  //   B = ...
  //   ... = B
  //   B = ...
  //   ... = B
  //   B = ...
  //   ... = B
  // because at each anti-dependence, B is the first register that
  // isn't A which is free.  This re-introduces anti-dependencies
  // at all but one of the original anti-dependencies that we were
  // trying to break.  To avoid this, keep track of the most recent
  // register that each register was replaced with, avoid
  // using it to repair an anti-dependence on the same register.
  // This lets us produce this:
  //   A = ...
  //   ... = A
  //   B = ...
  //   ... = B
  //   C = ...
  //   ... = C
  //   B = ...
  //   ... = B
  // This still has an anti-dependence on B, but at least it isn't on the
  // original critical path.
  //
  // TODO: If we tracked more than one register here, we could potentially
  // fix that remaining critical edge too. This is a little more involved,
  // because unlike the most recent register, less recent registers should
  // still be considered, though only if no other registers are available.
  unsigned LastNewReg[TargetRegisterInfo::FirstVirtualRegister] = {};

  // Attempt to break anti-dependence edges on the critical path. Walk the
  // instructions from the bottom up, tracking information about liveness
  // as we go to help determine which registers are available.
  unsigned Broken = 0;
  unsigned Count = InsertPosIndex - 1;
  for (MachineBasicBlock::iterator I = End, E = Begin;
       I != E; --Count) {
    MachineInstr *MI = --I;

    // Check if this instruction has a dependence on the critical path that
    // is an anti-dependence that we may be able to break. If it is, set
    // AntiDepReg to the non-zero register associated with the anti-dependence.
    //
    // We limit our attention to the critical path as a heuristic to avoid
    // breaking anti-dependence edges that aren't going to significantly
    // impact the overall schedule. There are a limited number of registers
    // and we want to save them for the important edges.
    // 
    // TODO: Instructions with multiple defs could have multiple
    // anti-dependencies. The current code here only knows how to break one
    // edge per instruction. Note that we'd have to be able to break all of
    // the anti-dependencies in an instruction in order to be effective.
    unsigned AntiDepReg = 0;
    if (MI == CriticalPathMI) {
      if (SDep *Edge = CriticalPathStep(CriticalPathSU)) {
        SUnit *NextSU = Edge->getSUnit();

        // Only consider anti-dependence edges.
        if (Edge->getKind() == SDep::Anti) {
          AntiDepReg = Edge->getReg();
          assert(AntiDepReg != 0 && "Anti-dependence on reg0?");
          if (!AllocatableSet.test(AntiDepReg))
            // Don't break anti-dependencies on non-allocatable registers.
            AntiDepReg = 0;
          else if (KeepRegs.count(AntiDepReg))
            // Don't break anti-dependencies if an use down below requires
            // this exact register.
            AntiDepReg = 0;
          else {
            // If the SUnit has other dependencies on the SUnit that it
            // anti-depends on, don't bother breaking the anti-dependency
            // since those edges would prevent such units from being
            // scheduled past each other regardless.
            //
            // Also, if there are dependencies on other SUnits with the
            // same register as the anti-dependency, don't attempt to
            // break it.
            for (SUnit::pred_iterator P = CriticalPathSU->Preds.begin(),
                 PE = CriticalPathSU->Preds.end(); P != PE; ++P)
              if (P->getSUnit() == NextSU ?
                    (P->getKind() != SDep::Anti || P->getReg() != AntiDepReg) :
                    (P->getKind() == SDep::Data && P->getReg() == AntiDepReg)) {
                AntiDepReg = 0;
                break;
              }
          }
        }
        CriticalPathSU = NextSU;
        CriticalPathMI = CriticalPathSU->getInstr();
      } else {
        // We've reached the end of the critical path.
        CriticalPathSU = 0;
        CriticalPathMI = 0;
      }
    }

    PrescanInstruction(MI);

    if (MI->getDesc().hasExtraDefRegAllocReq())
      // If this instruction's defs have special allocation requirement, don't
      // break this anti-dependency.
      AntiDepReg = 0;
    else if (AntiDepReg) {
      // If this instruction has a use of AntiDepReg, breaking it
      // is invalid.
      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
        MachineOperand &MO = MI->getOperand(i);
        if (!MO.isReg()) continue;
        unsigned Reg = MO.getReg();
        if (Reg == 0) continue;
        if (MO.isUse() && AntiDepReg == Reg) {
          AntiDepReg = 0;
          break;
        }
      }
    }

    // Determine AntiDepReg's register class, if it is live and is
    // consistently used within a single class.
    const TargetRegisterClass *RC = AntiDepReg != 0 ? Classes[AntiDepReg] : 0;
    assert((AntiDepReg == 0 || RC != NULL) &&
           "Register should be live if it's causing an anti-dependence!");
    if (RC == reinterpret_cast<TargetRegisterClass *>(-1))
      AntiDepReg = 0;

    // Look for a suitable register to use to break the anti-depenence.
    //
    // TODO: Instead of picking the first free register, consider which might
    // be the best.
    if (AntiDepReg != 0) {
      if (unsigned NewReg = findSuitableFreeRegister(AntiDepReg,
                                                     LastNewReg[AntiDepReg],
                                                     RC)) {
        DEBUG(errs() << "Breaking anti-dependence edge on "
              << TRI->getName(AntiDepReg)
              << " with " << RegRefs.count(AntiDepReg) << " references"
              << " using " << TRI->getName(NewReg) << "!\n");

        // Update the references to the old register to refer to the new
        // register.
        std::pair<std::multimap<unsigned, MachineOperand *>::iterator,
                  std::multimap<unsigned, MachineOperand *>::iterator>
           Range = RegRefs.equal_range(AntiDepReg);
        for (std::multimap<unsigned, MachineOperand *>::iterator
             Q = Range.first, QE = Range.second; Q != QE; ++Q)
          Q->second->setReg(NewReg);

        // We just went back in time and modified history; the
        // liveness information for the anti-depenence reg is now
        // inconsistent. Set the state as if it were dead.
        Classes[NewReg] = Classes[AntiDepReg];
        DefIndices[NewReg] = DefIndices[AntiDepReg];
        KillIndices[NewReg] = KillIndices[AntiDepReg];
        assert(((KillIndices[NewReg] == ~0u) !=
                (DefIndices[NewReg] == ~0u)) &&
             "Kill and Def maps aren't consistent for NewReg!");

        Classes[AntiDepReg] = 0;
        DefIndices[AntiDepReg] = KillIndices[AntiDepReg];
        KillIndices[AntiDepReg] = ~0u;
        assert(((KillIndices[AntiDepReg] == ~0u) !=
                (DefIndices[AntiDepReg] == ~0u)) &&
             "Kill and Def maps aren't consistent for AntiDepReg!");

        RegRefs.erase(AntiDepReg);
        LastNewReg[AntiDepReg] = NewReg;
        ++Broken;
      }
    }

    ScanInstruction(MI, Count);
  }

  return Broken;
}
Ejemplo n.º 12
0
void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
  // We'll be allocating one SUnit for each instruction, plus one for
  // the region exit node.
  SUnits.reserve(BB->size());

  // We build scheduling units by walking a block's instruction list from bottom
  // to top.

  // Remember where a generic side-effecting instruction is as we procede.
  SUnit *BarrierChain = 0, *AliasChain = 0;

  // Memory references to specific known memory locations are tracked
  // so that they can be given more precise dependencies. We track
  // separately the known memory locations that may alias and those
  // that are known not to alias
  std::map<const Value *, SUnit *> AliasMemDefs, NonAliasMemDefs;
  std::map<const Value *, std::vector<SUnit *> > AliasMemUses, NonAliasMemUses;

  // Keep track of dangling debug references to registers.
  std::vector<std::pair<MachineInstr*, unsigned> >
    DanglingDebugValue(TRI->getNumRegs(),
    std::make_pair(static_cast<MachineInstr*>(0), 0));

  // Check to see if the scheduler cares about latencies.
  bool UnitLatencies = ForceUnitLatencies();

  // Ask the target if address-backscheduling is desirable, and if so how much.
  const TargetSubtarget &ST = TM.getSubtarget<TargetSubtarget>();
  unsigned SpecialAddressLatency = ST.getSpecialAddressLatency();

  // Remove any stale debug info; sometimes BuildSchedGraph is called again
  // without emitting the info from the previous call.
  DbgValueVec.clear();

  // Model data dependencies between instructions being scheduled and the
  // ExitSU.
  AddSchedBarrierDeps();

  // Walk the list of instructions, from bottom moving up.
  for (MachineBasicBlock::iterator MII = InsertPos, MIE = Begin;
       MII != MIE; --MII) {
    MachineInstr *MI = prior(MII);
    // DBG_VALUE does not have SUnit's built, so just remember these for later
    // reinsertion.
    if (MI->isDebugValue()) {
      if (MI->getNumOperands()==3 && MI->getOperand(0).isReg() &&
          MI->getOperand(0).getReg())
        DanglingDebugValue[MI->getOperand(0).getReg()] =
             std::make_pair(MI, DbgValueVec.size());
      DbgValueVec.push_back(MI);
      continue;
    }
    const TargetInstrDesc &TID = MI->getDesc();
    assert(!TID.isTerminator() && !MI->isLabel() &&
           "Cannot schedule terminators or labels!");
    // Create the SUnit for this MI.
    SUnit *SU = NewSUnit(MI);
    SU->isCall = TID.isCall();
    SU->isCommutable = TID.isCommutable();

    // Assign the Latency field of SU using target-provided information.
    if (UnitLatencies)
      SU->Latency = 1;
    else
      ComputeLatency(SU);

    // Add register-based dependencies (data, anti, and output).
    for (unsigned j = 0, n = MI->getNumOperands(); j != n; ++j) {
      const MachineOperand &MO = MI->getOperand(j);
      if (!MO.isReg()) continue;
      unsigned Reg = MO.getReg();
      if (Reg == 0) continue;

      assert(TRI->isPhysicalRegister(Reg) && "Virtual register encountered!");

      if (MO.isDef() && DanglingDebugValue[Reg].first!=0) {
        SU->DbgInstrList.push_back(DanglingDebugValue[Reg].first);
        DbgValueVec[DanglingDebugValue[Reg].second] = 0;
        DanglingDebugValue[Reg] = std::make_pair((MachineInstr*)0, 0);
      }

      std::vector<SUnit *> &UseList = Uses[Reg];
      std::vector<SUnit *> &DefList = Defs[Reg];
      // Optionally add output and anti dependencies. For anti
      // dependencies we use a latency of 0 because for a multi-issue
      // target we want to allow the defining instruction to issue
      // in the same cycle as the using instruction.
      // TODO: Using a latency of 1 here for output dependencies assumes
      //       there's no cost for reusing registers.
      SDep::Kind Kind = MO.isUse() ? SDep::Anti : SDep::Output;
      unsigned AOLatency = (Kind == SDep::Anti) ? 0 : 1;
      for (unsigned i = 0, e = DefList.size(); i != e; ++i) {
        SUnit *DefSU = DefList[i];
        if (DefSU == &ExitSU)
          continue;
        if (DefSU != SU &&
            (Kind != SDep::Output || !MO.isDead() ||
             !DefSU->getInstr()->registerDefIsDead(Reg)))
          DefSU->addPred(SDep(SU, Kind, AOLatency, /*Reg=*/Reg));
      }
      for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
        std::vector<SUnit *> &DefList = Defs[*Alias];
        for (unsigned i = 0, e = DefList.size(); i != e; ++i) {
          SUnit *DefSU = DefList[i];
          if (DefSU == &ExitSU)
            continue;
          if (DefSU != SU &&
              (Kind != SDep::Output || !MO.isDead() ||
               !DefSU->getInstr()->registerDefIsDead(*Alias)))
            DefSU->addPred(SDep(SU, Kind, AOLatency, /*Reg=*/ *Alias));
        }
      }

      if (MO.isDef()) {
        // Add any data dependencies.
        unsigned DataLatency = SU->Latency;
        for (unsigned i = 0, e = UseList.size(); i != e; ++i) {
          SUnit *UseSU = UseList[i];
          if (UseSU == SU)
            continue;
          unsigned LDataLatency = DataLatency;
          // Optionally add in a special extra latency for nodes that
          // feed addresses.
          // TODO: Do this for register aliases too.
          // TODO: Perhaps we should get rid of
          // SpecialAddressLatency and just move this into
          // adjustSchedDependency for the targets that care about it.
          if (SpecialAddressLatency != 0 && !UnitLatencies &&
              UseSU != &ExitSU) {
            MachineInstr *UseMI = UseSU->getInstr();
            const TargetInstrDesc &UseTID = UseMI->getDesc();
            int RegUseIndex = UseMI->findRegisterUseOperandIdx(Reg);
            assert(RegUseIndex >= 0 && "UseMI doesn's use register!");
            if (RegUseIndex >= 0 &&
                (UseTID.mayLoad() || UseTID.mayStore()) &&
                (unsigned)RegUseIndex < UseTID.getNumOperands() &&
                UseTID.OpInfo[RegUseIndex].isLookupPtrRegClass())
              LDataLatency += SpecialAddressLatency;
          }
          // Adjust the dependence latency using operand def/use
          // information (if any), and then allow the target to
          // perform its own adjustments.
          const SDep& dep = SDep(SU, SDep::Data, LDataLatency, Reg);
          if (!UnitLatencies) {
            ComputeOperandLatency(SU, UseSU, const_cast<SDep &>(dep));
            ST.adjustSchedDependency(SU, UseSU, const_cast<SDep &>(dep));
          }
          UseSU->addPred(dep);
        }
        for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
          std::vector<SUnit *> &UseList = Uses[*Alias];
          for (unsigned i = 0, e = UseList.size(); i != e; ++i) {
            SUnit *UseSU = UseList[i];
            if (UseSU == SU)
              continue;
            const SDep& dep = SDep(SU, SDep::Data, DataLatency, *Alias);
            if (!UnitLatencies) {
              ComputeOperandLatency(SU, UseSU, const_cast<SDep &>(dep));
              ST.adjustSchedDependency(SU, UseSU, const_cast<SDep &>(dep));
            }
            UseSU->addPred(dep);
          }
        }

        // If a def is going to wrap back around to the top of the loop,
        // backschedule it.
        if (!UnitLatencies && DefList.empty()) {
          LoopDependencies::LoopDeps::iterator I = LoopRegs.Deps.find(Reg);
          if (I != LoopRegs.Deps.end()) {
            const MachineOperand *UseMO = I->second.first;
            unsigned Count = I->second.second;
            const MachineInstr *UseMI = UseMO->getParent();
            unsigned UseMOIdx = UseMO - &UseMI->getOperand(0);
            const TargetInstrDesc &UseTID = UseMI->getDesc();
            // TODO: If we knew the total depth of the region here, we could
            // handle the case where the whole loop is inside the region but
            // is large enough that the isScheduleHigh trick isn't needed.
            if (UseMOIdx < UseTID.getNumOperands()) {
              // Currently, we only support scheduling regions consisting of
              // single basic blocks. Check to see if the instruction is in
              // the same region by checking to see if it has the same parent.
              if (UseMI->getParent() != MI->getParent()) {
                unsigned Latency = SU->Latency;
                if (UseTID.OpInfo[UseMOIdx].isLookupPtrRegClass())
                  Latency += SpecialAddressLatency;
                // This is a wild guess as to the portion of the latency which
                // will be overlapped by work done outside the current
                // scheduling region.
                Latency -= std::min(Latency, Count);
                // Add the artificial edge.
                ExitSU.addPred(SDep(SU, SDep::Order, Latency,
                                    /*Reg=*/0, /*isNormalMemory=*/false,
                                    /*isMustAlias=*/false,
                                    /*isArtificial=*/true));
              } else if (SpecialAddressLatency > 0 &&
                         UseTID.OpInfo[UseMOIdx].isLookupPtrRegClass()) {
                // The entire loop body is within the current scheduling region
                // and the latency of this operation is assumed to be greater
                // than the latency of the loop.
                // TODO: Recursively mark data-edge predecessors as
                //       isScheduleHigh too.
                SU->isScheduleHigh = true;
              }
            }
            LoopRegs.Deps.erase(I);
          }
        }

        UseList.clear();
        if (!MO.isDead())
          DefList.clear();
        DefList.push_back(SU);
      } else {
        UseList.push_back(SU);
      }
    }

    // Add chain dependencies.
    // Chain dependencies used to enforce memory order should have
    // latency of 0 (except for true dependency of Store followed by
    // aliased Load... we estimate that with a single cycle of latency
    // assuming the hardware will bypass)
    // Note that isStoreToStackSlot and isLoadFromStackSLot are not usable
    // after stack slots are lowered to actual addresses.
    // TODO: Use an AliasAnalysis and do real alias-analysis queries, and
    // produce more precise dependence information.
#define STORE_LOAD_LATENCY 1
    unsigned TrueMemOrderLatency = 0;
    if (TID.isCall() || MI->hasUnmodeledSideEffects() ||
        (MI->hasVolatileMemoryRef() && 
         (!TID.mayLoad() || !MI->isInvariantLoad(AA)))) {
      // Be conservative with these and add dependencies on all memory
      // references, even those that are known to not alias.
      for (std::map<const Value *, SUnit *>::iterator I = 
             NonAliasMemDefs.begin(), E = NonAliasMemDefs.end(); I != E; ++I) {
        I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
      }
      for (std::map<const Value *, std::vector<SUnit *> >::iterator I =
             NonAliasMemUses.begin(), E = NonAliasMemUses.end(); I != E; ++I) {
        for (unsigned i = 0, e = I->second.size(); i != e; ++i)
          I->second[i]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency));
      }
      NonAliasMemDefs.clear();
      NonAliasMemUses.clear();
      // Add SU to the barrier chain.
      if (BarrierChain)
        BarrierChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
      BarrierChain = SU;

      // fall-through
    new_alias_chain:
      // Chain all possibly aliasing memory references though SU.
      if (AliasChain)
        AliasChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
      AliasChain = SU;
      for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k)
        PendingLoads[k]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency));
      for (std::map<const Value *, SUnit *>::iterator I = AliasMemDefs.begin(),
           E = AliasMemDefs.end(); I != E; ++I) {
        I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
      }
      for (std::map<const Value *, std::vector<SUnit *> >::iterator I =
           AliasMemUses.begin(), E = AliasMemUses.end(); I != E; ++I) {
        for (unsigned i = 0, e = I->second.size(); i != e; ++i)
          I->second[i]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency));
      }
      PendingLoads.clear();
      AliasMemDefs.clear();
      AliasMemUses.clear();
    } else if (TID.mayStore()) {
      bool MayAlias = true;
      TrueMemOrderLatency = STORE_LOAD_LATENCY;
      if (const Value *V = getUnderlyingObjectForInstr(MI, MFI, MayAlias)) {
        // A store to a specific PseudoSourceValue. Add precise dependencies.
        // Record the def in MemDefs, first adding a dep if there is
        // an existing def.
        std::map<const Value *, SUnit *>::iterator I = 
          ((MayAlias) ? AliasMemDefs.find(V) : NonAliasMemDefs.find(V));
        std::map<const Value *, SUnit *>::iterator IE = 
          ((MayAlias) ? AliasMemDefs.end() : NonAliasMemDefs.end());
        if (I != IE) {
          I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0, /*Reg=*/0,
                                  /*isNormalMemory=*/true));
          I->second = SU;
        } else {
          if (MayAlias)
            AliasMemDefs[V] = SU;
          else
            NonAliasMemDefs[V] = SU;
        }
        // Handle the uses in MemUses, if there are any.
        std::map<const Value *, std::vector<SUnit *> >::iterator J =
          ((MayAlias) ? AliasMemUses.find(V) : NonAliasMemUses.find(V));
        std::map<const Value *, std::vector<SUnit *> >::iterator JE =
          ((MayAlias) ? AliasMemUses.end() : NonAliasMemUses.end());
        if (J != JE) {
          for (unsigned i = 0, e = J->second.size(); i != e; ++i)
            J->second[i]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency,
                                       /*Reg=*/0, /*isNormalMemory=*/true));
          J->second.clear();
        }
        if (MayAlias) {
          // Add dependencies from all the PendingLoads, i.e. loads
          // with no underlying object.
          for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k)
            PendingLoads[k]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency));
          // Add dependence on alias chain, if needed.
          if (AliasChain)
            AliasChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
        }
        // Add dependence on barrier chain, if needed.
        if (BarrierChain)
          BarrierChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
      } else {
        // Treat all other stores conservatively.
        goto new_alias_chain;
      }

      if (!ExitSU.isPred(SU))
        // Push store's up a bit to avoid them getting in between cmp
        // and branches.
        ExitSU.addPred(SDep(SU, SDep::Order, 0,
                            /*Reg=*/0, /*isNormalMemory=*/false,
                            /*isMustAlias=*/false,
                            /*isArtificial=*/true));
    } else if (TID.mayLoad()) {
      bool MayAlias = true;
      TrueMemOrderLatency = 0;
      if (MI->isInvariantLoad(AA)) {
        // Invariant load, no chain dependencies needed!
      } else {
        if (const Value *V = 
            getUnderlyingObjectForInstr(MI, MFI, MayAlias)) {
          // A load from a specific PseudoSourceValue. Add precise dependencies.
          std::map<const Value *, SUnit *>::iterator I = 
            ((MayAlias) ? AliasMemDefs.find(V) : NonAliasMemDefs.find(V));
          std::map<const Value *, SUnit *>::iterator IE = 
            ((MayAlias) ? AliasMemDefs.end() : NonAliasMemDefs.end());
          if (I != IE)
            I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0, /*Reg=*/0,
                                    /*isNormalMemory=*/true));
          if (MayAlias)
            AliasMemUses[V].push_back(SU);
          else 
            NonAliasMemUses[V].push_back(SU);
        } else {
          // A load with no underlying object. Depend on all
          // potentially aliasing stores.
          for (std::map<const Value *, SUnit *>::iterator I = 
                 AliasMemDefs.begin(), E = AliasMemDefs.end(); I != E; ++I)
            I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
          
          PendingLoads.push_back(SU);
          MayAlias = true;
        }
        
        // Add dependencies on alias and barrier chains, if needed.
        if (MayAlias && AliasChain)
          AliasChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
        if (BarrierChain)
          BarrierChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
      } 
    }
  }

  for (int i = 0, e = TRI->getNumRegs(); i != e; ++i) {
    Defs[i].clear();
    Uses[i].clear();
  }
  PendingLoads.clear();
}
Ejemplo n.º 13
0
void ScheduleTDList::buildSchedGraph(AliasAnalysis *AA) {

    const LembergSubtarget *Subtarget = &TM.getSubtarget<LembergSubtarget>();

	// Create normal scheduling graph
	SchedulePostRATDList::buildSchedGraph(AA);
	
	// Refine scheduling graph
	for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
		SUnit &SU = SUnits[i];
		// Make sure that stuff that is chained to memory accesses is
		// scheduled in suitable cycles
		if (SU.getInstr()->mayStore()
			|| SU.getInstr()->mayLoad()) {

			// Pick a suitable latency
			unsigned MemLatency = 1;
			if (!isStackAccess(SU.getInstr())) {
				MemLatency = SU.getInstr()->mayStore() ? 1 : 2;
			} else {
			 	MemLatency = SU.getInstr()->mayStore() ? 1 : 2;
			}

			// Insert appropriate latencies
			for (SUnit::succ_iterator I = SU.Succs.begin(), E = SU.Succs.end();
				 I != E; ++I) {
				if (I->getKind() == SDep::Order || I->getKind() == SDep::Data) {

					SUnit *Succ = I->getSUnit();

					// TODO: why do we see successors without real instruction?
					if (!Succ->getInstr())
						continue;

					// Latencies are just interesting for some combinations
					if ((SU.getInstr()->mayStore()
						 && !(Succ->getInstr()->mayLoad()
							  || Succ->getInstr()->mayStore()))
						|| (SU.getInstr()->mayLoad()
							&& !(Succ->getInstr()->mayLoad()
								 || Succ->getInstr()->mayStore()
								 || Succ->getInstr()->readsRegister(Lemberg::R31))))
						continue;
					
					// Set forward latency
					I->setLatency(MemLatency);

					// Also set latencies in reverse direction
					for (SUnit::succ_iterator K = Succ->Preds.begin(), F = Succ->Preds.end();
						K != F; ++K) {
						if (K->getSUnit() == &SU) {
							K->setLatency(MemLatency);
						}
					}
				}
			}
		}

		// Make sure that results used for branching are computed early
		if (SU.Succs.size() == 0 && SU.getInstr()->getDesc().getNumDefs() > 0) {
		  const unsigned BranchLatency = Subtarget->DelaySlots+1;
		  SU.setHeightToAtLeast(BranchLatency+1);
		}

		// Set the isScheduleHigh flag for pinned instructions		
		unsigned idx = SU.getInstr()->getDesc().getSchedClass();
		unsigned units = InstrItins->beginStage(idx)->getUnits();
		unsigned unitCount = 0;
		while (units) {
		  if (units & 1)
			unitCount++;
		  units >>= 1;
		}
		if (unitCount < Subtarget->MaxClusters) {
		   SU.isScheduleHigh = true;
		}
	}
}
Ejemplo n.º 14
0
/// addPhysRegDeps - Add register dependencies (data, anti, and output) from
/// this SUnit to following instructions in the same scheduling region that
/// depend the physical register referenced at OperIdx.
void ScheduleDAGInstrs::addPhysRegDeps(SUnit *SU, unsigned OperIdx) {
  const MachineInstr *MI = SU->getInstr();
  const MachineOperand &MO = MI->getOperand(OperIdx);

  // Optionally add output and anti dependencies. For anti
  // dependencies we use a latency of 0 because for a multi-issue
  // target we want to allow the defining instruction to issue
  // in the same cycle as the using instruction.
  // TODO: Using a latency of 1 here for output dependencies assumes
  //       there's no cost for reusing registers.
  SDep::Kind Kind = MO.isUse() ? SDep::Anti : SDep::Output;
  for (MCRegAliasIterator Alias(MO.getReg(), TRI, true);
       Alias.isValid(); ++Alias) {
    if (!Defs.contains(*Alias))
      continue;
    std::vector<PhysRegSUOper> &DefList = Defs[*Alias];
    for (unsigned i = 0, e = DefList.size(); i != e; ++i) {
      SUnit *DefSU = DefList[i].SU;
      if (DefSU == &ExitSU)
        continue;
      if (DefSU != SU &&
          (Kind != SDep::Output || !MO.isDead() ||
           !DefSU->getInstr()->registerDefIsDead(*Alias))) {
        if (Kind == SDep::Anti)
          DefSU->addPred(SDep(SU, Kind, 0, /*Reg=*/*Alias));
        else {
          unsigned AOLat = TII->getOutputLatency(InstrItins, MI, OperIdx,
                                                 DefSU->getInstr());
          DefSU->addPred(SDep(SU, Kind, AOLat, /*Reg=*/*Alias));
        }
      }
    }
  }

  if (!MO.isDef()) {
    // Either insert a new Reg2SUnits entry with an empty SUnits list, or
    // retrieve the existing SUnits list for this register's uses.
    // Push this SUnit on the use list.
    Uses[MO.getReg()].push_back(PhysRegSUOper(SU, OperIdx));
  }
  else {
    addPhysRegDataDeps(SU, OperIdx);

    // Either insert a new Reg2SUnits entry with an empty SUnits list, or
    // retrieve the existing SUnits list for this register's defs.
    std::vector<PhysRegSUOper> &DefList = Defs[MO.getReg()];

    // If a def is going to wrap back around to the top of the loop,
    // backschedule it.
    if (!UnitLatencies && DefList.empty()) {
      LoopDependencies::LoopDeps::iterator I = LoopRegs.Deps.find(MO.getReg());
      if (I != LoopRegs.Deps.end()) {
        const MachineOperand *UseMO = I->second.first;
        unsigned Count = I->second.second;
        const MachineInstr *UseMI = UseMO->getParent();
        unsigned UseMOIdx = UseMO - &UseMI->getOperand(0);
        const MCInstrDesc &UseMCID = UseMI->getDesc();
        const TargetSubtargetInfo &ST =
          TM.getSubtarget<TargetSubtargetInfo>();
        unsigned SpecialAddressLatency = ST.getSpecialAddressLatency();
        // TODO: If we knew the total depth of the region here, we could
        // handle the case where the whole loop is inside the region but
        // is large enough that the isScheduleHigh trick isn't needed.
        if (UseMOIdx < UseMCID.getNumOperands()) {
          // Currently, we only support scheduling regions consisting of
          // single basic blocks. Check to see if the instruction is in
          // the same region by checking to see if it has the same parent.
          if (UseMI->getParent() != MI->getParent()) {
            unsigned Latency = SU->Latency;
            if (UseMCID.OpInfo[UseMOIdx].isLookupPtrRegClass())
              Latency += SpecialAddressLatency;
            // This is a wild guess as to the portion of the latency which
            // will be overlapped by work done outside the current
            // scheduling region.
            Latency -= std::min(Latency, Count);
            // Add the artificial edge.
            ExitSU.addPred(SDep(SU, SDep::Order, Latency,
                                /*Reg=*/0, /*isNormalMemory=*/false,
                                /*isMustAlias=*/false,
                                /*isArtificial=*/true));
          } else if (SpecialAddressLatency > 0 &&
                     UseMCID.OpInfo[UseMOIdx].isLookupPtrRegClass()) {
            // The entire loop body is within the current scheduling region
            // and the latency of this operation is assumed to be greater
            // than the latency of the loop.
            // TODO: Recursively mark data-edge predecessors as
            //       isScheduleHigh too.
            SU->isScheduleHigh = true;
          }
        }
        LoopRegs.Deps.erase(I);
      }
    }

    // clear this register's use list
    if (Uses.contains(MO.getReg()))
      Uses[MO.getReg()].clear();

    if (!MO.isDead())
      DefList.clear();

    // Calls will not be reordered because of chain dependencies (see
    // below). Since call operands are dead, calls may continue to be added
    // to the DefList making dependence checking quadratic in the size of
    // the block. Instead, we leave only one call at the back of the
    // DefList.
    if (SU->isCall) {
      while (!DefList.empty() && DefList.back().SU->isCall)
        DefList.pop_back();
    }
    // Defs are pushed in the order they are visited and never reordered.
    DefList.push_back(PhysRegSUOper(SU, OperIdx));
  }
}
Ejemplo n.º 15
0
/// addPhysRegDeps - Add register dependencies (data, anti, and output) from
/// this SUnit to following instructions in the same scheduling region that
/// depend the physical register referenced at OperIdx.
void ScheduleDAGInstrs::addPhysRegDeps(SUnit *SU, unsigned OperIdx) {
  const MachineInstr *MI = SU->getInstr();
  const MachineOperand &MO = MI->getOperand(OperIdx);

  // Optionally add output and anti dependencies. For anti
  // dependencies we use a latency of 0 because for a multi-issue
  // target we want to allow the defining instruction to issue
  // in the same cycle as the using instruction.
  // TODO: Using a latency of 1 here for output dependencies assumes
  //       there's no cost for reusing registers.
  SDep::Kind Kind = MO.isUse() ? SDep::Anti : SDep::Output;
  for (MCRegAliasIterator Alias(MO.getReg(), TRI, true);
       Alias.isValid(); ++Alias) {
    if (!Defs.contains(*Alias))
      continue;
    std::vector<PhysRegSUOper> &DefList = Defs[*Alias];
    for (unsigned i = 0, e = DefList.size(); i != e; ++i) {
      SUnit *DefSU = DefList[i].SU;
      if (DefSU == &ExitSU)
        continue;
      if (DefSU != SU &&
          (Kind != SDep::Output || !MO.isDead() ||
           !DefSU->getInstr()->registerDefIsDead(*Alias))) {
        if (Kind == SDep::Anti)
          DefSU->addPred(SDep(SU, Kind, 0, /*Reg=*/*Alias));
        else {
          unsigned AOLat =
            SchedModel.computeOutputLatency(MI, OperIdx, DefSU->getInstr());
          DefSU->addPred(SDep(SU, Kind, AOLat, /*Reg=*/*Alias));
        }
      }
    }
  }

  if (!MO.isDef()) {
    // Either insert a new Reg2SUnits entry with an empty SUnits list, or
    // retrieve the existing SUnits list for this register's uses.
    // Push this SUnit on the use list.
    Uses[MO.getReg()].push_back(PhysRegSUOper(SU, OperIdx));
  }
  else {
    addPhysRegDataDeps(SU, OperIdx);

    // Either insert a new Reg2SUnits entry with an empty SUnits list, or
    // retrieve the existing SUnits list for this register's defs.
    std::vector<PhysRegSUOper> &DefList = Defs[MO.getReg()];

    // clear this register's use list
    if (Uses.contains(MO.getReg()))
      Uses[MO.getReg()].clear();

    if (!MO.isDead())
      DefList.clear();

    // Calls will not be reordered because of chain dependencies (see
    // below). Since call operands are dead, calls may continue to be added
    // to the DefList making dependence checking quadratic in the size of
    // the block. Instead, we leave only one call at the back of the
    // DefList.
    if (SU->isCall) {
      while (!DefList.empty() && DefList.back().SU->isCall)
        DefList.pop_back();
    }
    // Defs are pushed in the order they are visited and never reordered.
    DefList.push_back(PhysRegSUOper(SU, OperIdx));
  }
}
Ejemplo n.º 16
0
// PacketizeMIs - Bundle machine instructions into packets.
void VLIWPacketizerList::PacketizeMIs(MachineBasicBlock *MBB,
                                      MachineBasicBlock::iterator BeginItr,
                                      MachineBasicBlock::iterator EndItr) {
    assert(VLIWScheduler && "VLIW Scheduler is not initialized!");
    VLIWScheduler->startBlock(MBB);
    VLIWScheduler->enterRegion(MBB, BeginItr, EndItr,
                               std::distance(BeginItr, EndItr));
    VLIWScheduler->schedule();

    // Generate MI -> SU map.
    MIToSUnit.clear();
    for (unsigned i = 0, e = VLIWScheduler->SUnits.size(); i != e; ++i) {
        SUnit *SU = &VLIWScheduler->SUnits[i];
        MIToSUnit[SU->getInstr()] = SU;
    }

    // The main packetizer loop.
    for (; BeginItr != EndItr; ++BeginItr) {
        MachineInstr *MI = BeginItr;

        this->initPacketizerState();

        // End the current packet if needed.
        if (this->isSoloInstruction(MI)) {
            endPacket(MBB, MI);
            continue;
        }

        // Ignore pseudo instructions.
        if (this->ignorePseudoInstruction(MI, MBB))
            continue;

        SUnit *SUI = MIToSUnit[MI];
        assert(SUI && "Missing SUnit Info!");

        // Ask DFA if machine resource is available for MI.
        bool ResourceAvail = ResourceTracker->canReserveResources(MI);
        if (ResourceAvail) {
            // Dependency check for MI with instructions in CurrentPacketMIs.
            for (std::vector<MachineInstr*>::iterator VI = CurrentPacketMIs.begin(),
                    VE = CurrentPacketMIs.end(); VI != VE; ++VI) {
                MachineInstr *MJ = *VI;
                SUnit *SUJ = MIToSUnit[MJ];
                assert(SUJ && "Missing SUnit Info!");

                // Is it legal to packetize SUI and SUJ together.
                if (!this->isLegalToPacketizeTogether(SUI, SUJ)) {
                    // Allow packetization if dependency can be pruned.
                    if (!this->isLegalToPruneDependencies(SUI, SUJ)) {
                        // End the packet if dependency cannot be pruned.
                        endPacket(MBB, MI);
                        break;
                    } // !isLegalToPruneDependencies.
                } // !isLegalToPacketizeTogether.
            } // For all instructions in CurrentPacketMIs.
        } else {
            // End the packet if resource is not available.
            endPacket(MBB, MI);
        }

        // Add MI to the current packet.
        BeginItr = this->addToPacket(MI);
    } // For all instructions in BB.

    // End any packet left behind.
    endPacket(MBB, EndItr);
    VLIWScheduler->exitRegion();
    VLIWScheduler->finishBlock();
}
Ejemplo n.º 17
0
void PatmosPostRASchedStrategy::postprocessDAG(ScheduleDAGPostRA *dag)
{
  DAG = dag;

  SUnit *CFL = NULL;
  // Find the inline asm statement, if any. Note that asm is a barrier,
  // therefore there is at most one CFL or inline asm.
  SUnit *Asm = NULL;

  // Push up loads to ensure load delay slot across BBs
  // TODO For some reasons, loads do not always have exit edges, and a latency
  //      of 1; find out why. Happens e.g. in coremark with 16k methods setup.
  for (std::vector<SUnit>::reverse_iterator it = DAG->SUnits.rbegin(),
         ie = DAG->SUnits.rend(); it != ie; it++) {
    MachineInstr *MI = it->getInstr();
    if (!MI) continue;

    if (MI->mayLoad()) {
      SDep Dep(&*it, SDep::Artificial);
      Dep.setLatency(computeExitLatency(*it));
      DAG->ExitSU.addPred(Dep);
    }
  }

  // Find the branch/call/ret instruction if available
  for (std::vector<SUnit>::reverse_iterator it = DAG->SUnits.rbegin(),
       ie = DAG->SUnits.rend(); it != ie; it++)
  {
    MachineInstr *MI = it->getInstr();
    if (!MI) continue;
    if (isPatmosCFL(MI->getOpcode(), MI->getDesc().TSFlags)) {
      CFL = &*it;
      break;
    }
    if (MI->isInlineAsm()) {
      Asm = &*it;
      break;
    }
  }

  const PatmosSubtarget *PST = PTM.getSubtargetImpl();

  unsigned DelaySlot = CFL ? PST->getDelaySlotCycles(CFL->getInstr()) : 0;

  if (CFL) {
    // RET and CALL have implicit deps on the return values and call
    // arguments. Remove all those edges to schedule them into the delay slot
    // if the registers are not actually used by CALL and RET
    if (CFL->getInstr()->isReturn() || CFL->getInstr()->isCall())
      removeImplicitCFLDeps(*CFL);

    // Add an artificial dep from CFL to exit for the delay slot
    SDep DelayDep(CFL, SDep::Artificial);
    DelayDep.setLatency(DelaySlot + 1);
    DAG->ExitSU.addPred(DelayDep);

    CFL->isScheduleLow = true;

    if (PTM.getSubtargetImpl()->getCFLType() != PatmosSubtarget::CFL_DELAYED) {
      // Push up single instructions that can be scheduled in the same
      // cycle as the branch
      unsigned LowCount = 0;
      SUnit *LowSU = 0;
      for (std::vector<SUnit>::reverse_iterator it = DAG->SUnits.rbegin(),
             ie = DAG->SUnits.rend(); it != ie; it++) {
        if (&*it == CFL) continue;
        
        MachineInstr *MI = it->getInstr();
        if (!MI) continue;
        
        if (it->getHeight() <= DelaySlot) {
          LowCount++;
          if (PII.canIssueInSlot(MI, LowCount)) {
            LowSU = &*it;
          }
        }
      }

      if (LowSU && LowCount == 1) {
        SDep Dep(LowSU, SDep::Artificial);
        Dep.setLatency(DelaySlot + 1);
        DAG->ExitSU.addPred(Dep);
      }
    }

    if (PTM.getSubtargetImpl()->getCFLType() == PatmosSubtarget::CFL_NON_DELAYED) {
      // Add dependencies from all other instructions to exit
      for (std::vector<SUnit>::reverse_iterator it = DAG->SUnits.rbegin(),
             ie = DAG->SUnits.rend(); it != ie; it++) {
        if (&*it == CFL) continue;

        MachineInstr *MI = it->getInstr();
        if (!MI) continue;

        SDep Dep(&*it, SDep::Artificial);
        Dep.setLatency(DelaySlot + 1);
        DAG->ExitSU.addPred(Dep);
      }
    }
  }

  // Add an exit delay between loads and inline asm, in case asm is empty
  if (Asm) {
    std::vector<SUnit*> PredLoads;
    for (SUnit::pred_iterator it = Asm->Preds.begin(), ie = Asm->Preds.end();
         it != ie; it++)
    {
      if (!it->getSUnit()) continue;
      MachineInstr *MI = it->getSUnit()->getInstr();
      // Check for loads
      if (!MI || !MI->mayLoad()) continue;
      PredLoads.push_back(it->getSUnit());
    }
    for (std::vector<SUnit*>::iterator it = PredLoads.begin(),
         ie = PredLoads.end(); it != ie; it++)
    {
      // Add a delay between loads and inline-asm, even if the operand is not
      // used.
      SDep Dep(*it, SDep::Artificial);
      Dep.setLatency( computeExitLatency(**it) );

      Asm->addPred(Dep);
    }
  }

  // remove barriers between loads/stores with different memory type
  removeTypedMemBarriers();

  // remove any dependency between instructions with mutually exclusive
  // predicates
  removeExclusivePredDeps();

  // TODO SWS and LWS do not have ST as implicit def edges
  // TODO CALL has chain edges to all SWS/.. instructions, remove

  // TODO remove edges from MUL to other MULs to overlap MUL and MFS for
  //      pipelined muls.
}