/// If desirable, rewrite NewReg to a drop register.
static bool MaybeRewriteToDrop(unsigned OldReg, unsigned NewReg,
                               MachineOperand &MO, WebAssemblyFunctionInfo &MFI,
                               MachineRegisterInfo &MRI) {
  bool Changed = false;
  if (OldReg == NewReg) {
    Changed = true;
    unsigned NewReg = MRI.createVirtualRegister(MRI.getRegClass(OldReg));
    MO.setReg(NewReg);
    MO.setIsDead();
    MFI.stackifyVReg(NewReg);
  }
  return Changed;
}
Esempio n. 2
0
/// Wrap a machine instruction, MI, into a FAULTING machine instruction.
/// The FAULTING instruction does the same load/store as MI
/// (defining the same register), and branches to HandlerMBB if the mem access
/// faults.  The FAULTING instruction is inserted at the end of MBB.
MachineInstr *ImplicitNullChecks::insertFaultingInstr(
    MachineInstr *MI, MachineBasicBlock *MBB, MachineBasicBlock *HandlerMBB) {
  const unsigned NoRegister = 0; // Guaranteed to be the NoRegister value for
                                 // all targets.

  DebugLoc DL;
  unsigned NumDefs = MI->getDesc().getNumDefs();
  assert(NumDefs <= 1 && "other cases unhandled!");

  unsigned DefReg = NoRegister;
  if (NumDefs != 0) {
    DefReg = MI->defs().begin()->getReg();
    assert(std::distance(MI->defs().begin(), MI->defs().end()) == 1 &&
           "expected exactly one def!");
  }

  FaultMaps::FaultKind FK;
  if (MI->mayLoad())
    FK =
        MI->mayStore() ? FaultMaps::FaultingLoadStore : FaultMaps::FaultingLoad;
  else
    FK = FaultMaps::FaultingStore;

  auto MIB = BuildMI(MBB, DL, TII->get(TargetOpcode::FAULTING_OP), DefReg)
                 .addImm(FK)
                 .addMBB(HandlerMBB)
                 .addImm(MI->getOpcode());

  for (auto &MO : MI->uses()) {
    if (MO.isReg()) {
      MachineOperand NewMO = MO;
      if (MO.isUse()) {
        NewMO.setIsKill(false);
      } else {
        assert(MO.isDef() && "Expected def or use");
        NewMO.setIsDead(false);
      }
      MIB.add(NewMO);
    } else {
      MIB.add(MO);
    }
  }

  MIB.setMemRefs(MI->memoperands_begin(), MI->memoperands_end());

  return MIB;
}
Esempio n. 3
0
// Returns true if a new block was inserted.
bool SILowerControlFlow::loadM0(MachineInstr &MI, MachineInstr *MovRel, int Offset) {
  MachineBasicBlock &MBB = *MI.getParent();
  DebugLoc DL = MI.getDebugLoc();
  MachineBasicBlock::iterator I(&MI);

  const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);

  if (AMDGPU::SReg_32RegClass.contains(Idx->getReg())) {
    if (Offset) {
      BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
        .addReg(Idx->getReg(), getUndefRegState(Idx->isUndef()))
        .addImm(Offset);
    } else {
      BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
        .addReg(Idx->getReg(), getUndefRegState(Idx->isUndef()));
    }

    MBB.insert(I, MovRel);
    MI.eraseFromParent();
    return false;
  }

  MachineFunction &MF = *MBB.getParent();
  MachineOperand *SaveOp = TII->getNamedOperand(MI, AMDGPU::OpName::sdst);
  SaveOp->setIsDead(false);
  unsigned Save = SaveOp->getReg();

  // Reading from a VGPR requires looping over all workitems in the wavefront.
  assert(AMDGPU::SReg_64RegClass.contains(Save) &&
         AMDGPU::VGPR_32RegClass.contains(Idx->getReg()));

  // Save the EXEC mask
  BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B64), Save)
    .addReg(AMDGPU::EXEC);

  // To insert the loop we need to split the block. Move everything after this
  // point to a new block, and insert a new empty block between the two.
  MachineBasicBlock *LoopBB = MF.CreateMachineBasicBlock();
  MachineBasicBlock *RemainderBB = MF.CreateMachineBasicBlock();
  MachineFunction::iterator MBBI(MBB);
  ++MBBI;

  MF.insert(MBBI, LoopBB);
  MF.insert(MBBI, RemainderBB);

  LoopBB->addSuccessor(LoopBB);
  LoopBB->addSuccessor(RemainderBB);

  splitBlockLiveIns(MBB, MI, *LoopBB, *RemainderBB, Save, *Idx);

  // Move the rest of the block into a new block.
  RemainderBB->transferSuccessors(&MBB);
  RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end());

  emitLoadM0FromVGPRLoop(*LoopBB, DL, MovRel, *Idx, Offset);

  MachineBasicBlock::iterator First = RemainderBB->begin();
  BuildMI(*RemainderBB, First, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
    .addReg(Save);

  MI.eraseFromParent();
  return true;
}