Пример #1
0
void Scheduler::issueInstruction(InstRef &IR) {
  // Release buffered resources.
  const InstrDesc &Desc = IR.getInstruction()->getDesc();
  releaseBuffers(Desc.Buffers);
  notifyReleasedBuffers(Desc.Buffers);

  // Issue IS to the underlying pipelines and notify listeners.
  SmallVector<std::pair<ResourceRef, double>, 4> Pipes;
  issueInstructionImpl(IR, Pipes);
  notifyInstructionIssued(IR, Pipes);
  if (IR.getInstruction()->isExecuted())
    notifyInstructionExecuted(IR);
}
Пример #2
0
bool DispatchStage::execute(InstRef &IR) {
  const InstrDesc &Desc = IR.getInstruction()->getDesc();
  if (!isAvailable(Desc.NumMicroOps) || !canDispatch(IR))
    return false;
  dispatch(IR);
  return true;
}
Пример #3
0
bool DispatchStage::checkRCU(const InstRef &IR) {
  const unsigned NumMicroOps = IR.getInstruction()->getDesc().NumMicroOps;
  if (RCU.isAvailable(NumMicroOps))
    return true;
  notifyEvent<HWStallEvent>(
      HWStallEvent(HWStallEvent::RetireControlUnitStall, IR));
  return false;
}
Пример #4
0
void Scheduler::issueInstructionImpl(
    InstRef &IR,
    SmallVectorImpl<std::pair<ResourceRef, double>> &UsedResources) {
  Instruction *IS = IR.getInstruction();
  const InstrDesc &D = IS->getDesc();

  // Issue the instruction and collect all the consumed resources
  // into a vector. That vector is then used to notify the listener.
  Resources->issueInstruction(D, UsedResources);

  // Notify the instruction that it started executing.
  // This updates the internal state of each write.
  IS->execute();

  if (IS->isExecuting())
    IssuedQueue[IR.getSourceIndex()] = IS;
}
Пример #5
0
// Schedule the instruction for execution on the hardware.
Error ExecuteStage::execute(InstRef &IR) {
  assert(isAvailable(IR) && "Scheduler is not available!");

#ifndef NDEBUG
  // Ensure that the HWS has not stored this instruction in its queues.
  HWS.sanityCheck(IR);
#endif

  if (IR.getInstruction()->isEliminated())
    return handleInstructionEliminated(IR);

  // Reserve a slot in each buffered resource. Also, mark units with
  // BufferSize=0 as reserved. Resources with a buffer size of zero will only
  // be released after MCIS is issued, and all the ResourceCycles for those
  // units have been consumed.
  bool IsReadyInstruction = HWS.dispatch(IR);
  const Instruction &Inst = *IR.getInstruction();
  NumDispatchedOpcodes += Inst.getDesc().NumMicroOps;
  notifyReservedOrReleasedBuffers(IR, /* Reserved */ true);
 
  if (!IsReadyInstruction) {
    if (Inst.isPending())
      notifyInstructionPending(IR);
    return ErrorSuccess();
  }

  notifyInstructionPending(IR);

  // If we did not return early, then the scheduler is ready for execution.
  notifyInstructionReady(IR);

  // If we cannot issue immediately, the HWS will add IR to its ready queue for
  // execution later, so we must return early here.
  if (!HWS.mustIssueImmediately(IR))
    return ErrorSuccess();

  // Issue IR to the underlying pipelines.
  return issueInstruction(IR);
}
Пример #6
0
void Scheduler::cycleEvent() {
  SmallVector<ResourceRef, 8> ResourcesFreed;
  Resources->cycleEvent(ResourcesFreed);

  for (const ResourceRef &RR : ResourcesFreed)
    notifyResourceAvailable(RR);

  SmallVector<InstRef, 4> InstructionIDs;
  updateIssuedQueue(InstructionIDs);
  for (const InstRef &IR : InstructionIDs)
    notifyInstructionExecuted(IR);
  InstructionIDs.clear();

  updatePendingQueue(InstructionIDs);
  for (const InstRef &IR : InstructionIDs)
    notifyInstructionReady(IR);
  InstructionIDs.clear();

  InstRef IR = select();
  while (IR.isValid()) {
    issueInstruction(IR);

    // Instructions that have been issued during this cycle might have unblocked
    // other dependent instructions. Dependent instructions may be issued during
    // this same cycle if operands have ReadAdvance entries.  Promote those
    // instructions to the ReadyQueue and tell to the caller that we need
    // another round of 'issue()'.
    promoteToReadyQueue(InstructionIDs);
    for (const InstRef &I : InstructionIDs)
      notifyInstructionReady(I);
    InstructionIDs.clear();

    // Select the next instruction to issue.
    IR = select();
  }
}
Пример #7
0
bool DispatchStage::checkPRF(const InstRef &IR) {
  SmallVector<unsigned, 4> RegDefs;
  for (const std::unique_ptr<WriteState> &RegDef :
       IR.getInstruction()->getDefs())
    RegDefs.emplace_back(RegDef->getRegisterID());

  const unsigned RegisterMask = PRF.isAvailable(RegDefs);
  // A mask with all zeroes means: register files are available.
  if (RegisterMask) {
    notifyEvent<HWStallEvent>(
        HWStallEvent(HWStallEvent::RegisterFileStall, IR));
    return false;
  }

  return true;
}
Пример #8
0
void DispatchStage::dispatch(InstRef IR) {
  assert(!CarryOver && "Cannot dispatch another instruction!");
  Instruction &IS = *IR.getInstruction();
  const InstrDesc &Desc = IS.getDesc();
  const unsigned NumMicroOps = Desc.NumMicroOps;
  if (NumMicroOps > DispatchWidth) {
    assert(AvailableEntries == DispatchWidth);
    AvailableEntries = 0;
    CarryOver = NumMicroOps - DispatchWidth;
  } else {
    assert(AvailableEntries >= NumMicroOps);
    AvailableEntries -= NumMicroOps;
  }

  // A dependency-breaking instruction doesn't have to wait on the register
  // input operands, and it is often optimized at register renaming stage.
  // Update RAW dependencies if this instruction is not a dependency-breaking
  // instruction. A dependency-breaking instruction is a zero-latency
  // instruction that doesn't consume hardware resources.
  // An example of dependency-breaking instruction on X86 is a zero-idiom XOR.
  bool IsDependencyBreaking = IS.isDependencyBreaking();
  for (std::unique_ptr<ReadState> &RS : IS.getUses())
    if (RS->isImplicitRead() || !IsDependencyBreaking)
      updateRAWDependencies(*RS, STI);

  // By default, a dependency-breaking zero-latency instruction is expected to
  // be optimized at register renaming stage. That means, no physical register
  // is allocated to the instruction.
  bool ShouldAllocateRegisters =
      !(Desc.isZeroLatency() && IsDependencyBreaking);
  SmallVector<unsigned, 4> RegisterFiles(PRF.getNumRegisterFiles());
  for (std::unique_ptr<WriteState> &WS : IS.getDefs()) {
    PRF.addRegisterWrite(WriteRef(IR.first, WS.get()), RegisterFiles,
                         ShouldAllocateRegisters);
  }

  // Reserve slots in the RCU, and notify the instruction that it has been
  // dispatched to the schedulers for execution.
  IS.dispatch(RCU.reserveSlot(IR, NumMicroOps));

  // Notify listeners of the "instruction dispatched" event.
  notifyInstructionDispatched(IR, RegisterFiles);
}
Пример #9
0
void Scheduler::scheduleInstruction(InstRef &IR) {
  const unsigned Idx = IR.getSourceIndex();
  assert(WaitQueue.find(Idx) == WaitQueue.end());
  assert(ReadyQueue.find(Idx) == ReadyQueue.end());
  assert(IssuedQueue.find(Idx) == IssuedQueue.end());

  // Reserve a slot in each buffered resource. Also, mark units with
  // BufferSize=0 as reserved. Resources with a buffer size of zero will only
  // be released after MCIS is issued, and all the ResourceCycles for those
  // units have been consumed.
  const InstrDesc &Desc = IR.getInstruction()->getDesc();
  reserveBuffers(Desc.Buffers);
  notifyReservedBuffers(Desc.Buffers);

  // If necessary, reserve queue entries in the load-store unit (LSU).
  bool Reserved = LSU->reserve(IR);
  if (!IR.getInstruction()->isReady() || (Reserved && !LSU->isReady(IR))) {
    LLVM_DEBUG(dbgs() << "[SCHEDULER] Adding " << Idx
                      << " to the Wait Queue\n");
    WaitQueue[Idx] = IR.getInstruction();
    return;
  }
  notifyInstructionReady(IR);

  // Don't add a zero-latency instruction to the Wait or Ready queue.
  // A zero-latency instruction doesn't consume any scheduler resources. That is
  // because it doesn't need to be executed, and it is often removed at register
  // renaming stage. For example, register-register moves are often optimized at
  // register renaming stage by simply updating register aliases. On some
  // targets, zero-idiom instructions (for example: a xor that clears the value
  // of a register) are treated speacially, and are often eliminated at register
  // renaming stage.

  // Instructions that use an in-order dispatch/issue processor resource must be
  // issued immediately to the pipeline(s). Any other in-order buffered
  // resources (i.e. BufferSize=1) is consumed.

  if (!Desc.isZeroLatency() && !Resources->mustIssueImmediately(Desc)) {
    LLVM_DEBUG(dbgs() << "[SCHEDULER] Adding " << IR
                      << " to the Ready Queue\n");
    ReadyQueue[IR.getSourceIndex()] = IR.getInstruction();
    return;
  }

  LLVM_DEBUG(dbgs() << "[SCHEDULER] Instruction " << IR
                    << " issued immediately\n");
  // Release buffered resources and issue MCIS to the underlying pipelines.
  issueInstruction(IR);
}
Пример #10
0
bool Scheduler::canBeDispatched(const InstRef &IR) const {
  HWStallEvent::GenericEventType Type = HWStallEvent::Invalid;
  const InstrDesc &Desc = IR.getInstruction()->getDesc();

  if (Desc.MayLoad && LSU->isLQFull())
    Type = HWStallEvent::LoadQueueFull;
  else if (Desc.MayStore && LSU->isSQFull())
    Type = HWStallEvent::StoreQueueFull;
  else {
    switch (Resources->canBeDispatched(Desc.Buffers)) {
    default:
      return true;
    case ResourceStateEvent::RS_BUFFER_UNAVAILABLE:
      Type = HWStallEvent::SchedulerQueueFull;
      break;
    case ResourceStateEvent::RS_RESERVED:
      Type = HWStallEvent::DispatchGroupStall;
    }
  }

  Owner->notifyStallEvent(HWStallEvent(Type, IR));
  return false;
}