/// foldMemoryOperand - Attempt to fold a load or store of the specified stack
/// slot into the specified machine instruction for the specified operand(s).
/// If this is possible, a new instruction is returned with the specified
/// operand folded, otherwise NULL is returned. The client is responsible for
/// removing the old instruction and adding the new one in the instruction
/// stream.
MachineInstr*
TargetInstrInfo::foldMemoryOperand(MachineFunction &MF,
                                   MachineInstr* MI,
                                   const SmallVectorImpl<unsigned> &Ops,
                                   int FrameIndex) const {
  unsigned Flags = 0;
  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
    if (MI->getOperand(Ops[i]).isDef())
      Flags |= MachineMemOperand::MOStore;
    else
      Flags |= MachineMemOperand::MOLoad;

  // Ask the target to do the actual folding.
  MachineInstr *NewMI = foldMemoryOperandImpl(MF, MI, Ops, FrameIndex);
  if (!NewMI) return 0;

  assert((!(Flags & MachineMemOperand::MOStore) ||
          NewMI->getDesc().mayStore()) &&
         "Folded a def to a non-store!");
  assert((!(Flags & MachineMemOperand::MOLoad) ||
          NewMI->getDesc().mayLoad()) &&
         "Folded a use to a non-load!");
  const MachineFrameInfo &MFI = *MF.getFrameInfo();
  assert(MFI.getObjectOffset(FrameIndex) != -1);
  MachineMemOperand *MMO =
    MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FrameIndex),
                            Flags, /*Offset=*/0,
                            MFI.getObjectSize(FrameIndex),
                            MFI.getObjectAlignment(FrameIndex));
  NewMI->addMemOperand(MF, MMO);

  return NewMI;
}
Esempio n. 2
0
MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
                                                 ArrayRef<unsigned> Ops,
                                                 MachineInstr &LoadMI,
                                                 LiveIntervals *LIS) const {
  assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!");
#ifndef NDEBUG
  for (unsigned OpIdx : Ops)
    assert(MI.getOperand(OpIdx).isUse() && "Folding load into def!");
#endif

  MachineBasicBlock &MBB = *MI.getParent();
  MachineFunction &MF = *MBB.getParent();

  // Ask the target to do the actual folding.
  MachineInstr *NewMI = nullptr;
  int FrameIndex = 0;

  if ((MI.getOpcode() == TargetOpcode::STACKMAP ||
       MI.getOpcode() == TargetOpcode::PATCHPOINT ||
       MI.getOpcode() == TargetOpcode::STATEPOINT) &&
      isLoadFromStackSlot(LoadMI, FrameIndex)) {
    // Fold stackmap/patchpoint.
    NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
    if (NewMI)
      NewMI = &*MBB.insert(MI, NewMI);
  } else {
    // Ask the target to do the actual folding.
    NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS);
  }

  if (!NewMI)
    return nullptr;

  // Copy the memoperands from the load to the folded instruction.
  if (MI.memoperands_empty()) {
    NewMI->setMemRefs(MF, LoadMI.memoperands());
  } else {
    // Handle the rare case of folding multiple loads.
    NewMI->setMemRefs(MF, MI.memoperands());
    for (MachineInstr::mmo_iterator I = LoadMI.memoperands_begin(),
                                    E = LoadMI.memoperands_end();
         I != E; ++I) {
      NewMI->addMemOperand(MF, *I);
    }
  }
  return NewMI;
}
Esempio n. 3
0
/// foldMemoryOperand - Same as the previous version except it allows folding
/// of any load and store from / to any address, not just from a specific
/// stack slot.
MachineInstr*
TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
                                   const SmallVectorImpl<unsigned> &Ops,
                                   MachineInstr* LoadMI) const {
  assert(LoadMI->canFoldAsLoad() && "LoadMI isn't foldable!");
#ifndef NDEBUG
  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
    assert(MI->getOperand(Ops[i]).isUse() && "Folding load into def!");
#endif
  MachineBasicBlock &MBB = *MI->getParent();
  MachineFunction &MF = *MBB.getParent();

  // Ask the target to do the actual folding.
  MachineInstr *NewMI = 0;
  int FrameIndex = 0;

  if ((MI->getOpcode() == TargetOpcode::STACKMAP ||
       MI->getOpcode() == TargetOpcode::PATCHPOINT) &&
      isLoadFromStackSlot(LoadMI, FrameIndex)) {
    // Fold stackmap/patchpoint.
    NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
  } else {
    // Ask the target to do the actual folding.
    NewMI = foldMemoryOperandImpl(MF, MI, Ops, LoadMI);
  }

  if (!NewMI) return 0;

  NewMI = MBB.insert(MI, NewMI);

  // Copy the memoperands from the load to the folded instruction.
  if (MI->memoperands_empty()) {
    NewMI->setMemRefs(LoadMI->memoperands_begin(),
                      LoadMI->memoperands_end());
  }
  else {
    // Handle the rare case of folding multiple loads.
    NewMI->setMemRefs(MI->memoperands_begin(),
                      MI->memoperands_end());
    for (MachineInstr::mmo_iterator I = LoadMI->memoperands_begin(),
           E = LoadMI->memoperands_end(); I != E; ++I) {
      NewMI->addMemOperand(MF, *I);
    }
  }
  return NewMI;
}
/// Given an "old group" OG of stores, create a "new group" NG of instructions
/// to replace them.  Ideally, NG would only have a single instruction in it,
/// but that may only be possible for store-immediate.
bool HexagonStoreWidening::createWideStores(InstrGroup &OG, InstrGroup &NG,
      unsigned TotalSize) {
  // XXX Current limitations:
  // - only expect stores of immediate values in OG,
  // - only handle a TotalSize of up to 4.

  if (TotalSize > 4)
    return false;

  unsigned Acc = 0;  // Value accumulator.
  unsigned Shift = 0;

  for (InstrGroup::iterator I = OG.begin(), E = OG.end(); I != E; ++I) {
    MachineInstr *MI = *I;
    const MachineMemOperand &MMO = getStoreTarget(MI);
    MachineOperand &SO = MI->getOperand(2);  // Source.
    assert(SO.isImm() && "Expecting an immediate operand");

    unsigned NBits = MMO.getSize()*8;
    unsigned Mask = (0xFFFFFFFFU >> (32-NBits));
    unsigned Val = (SO.getImm() & Mask) << Shift;
    Acc |= Val;
    Shift += NBits;
  }

  MachineInstr *FirstSt = OG.front();
  DebugLoc DL = OG.back()->getDebugLoc();
  const MachineMemOperand &OldM = getStoreTarget(FirstSt);
  MachineMemOperand *NewM =
    MF->getMachineMemOperand(OldM.getPointerInfo(), OldM.getFlags(),
                             TotalSize, OldM.getAlignment(),
                             OldM.getAAInfo());

  if (Acc < 0x10000) {
    // Create mem[hw] = #Acc
    unsigned WOpc = (TotalSize == 2) ? Hexagon::S4_storeirh_io :
                    (TotalSize == 4) ? Hexagon::S4_storeiri_io : 0;
    assert(WOpc && "Unexpected size");

    int Val = (TotalSize == 2) ? int16_t(Acc) : int(Acc);
    const MCInstrDesc &StD = TII->get(WOpc);
    MachineOperand &MR = FirstSt->getOperand(0);
    int64_t Off = FirstSt->getOperand(1).getImm();
    MachineInstr *StI = BuildMI(*MF, DL, StD)
                          .addReg(MR.getReg(), getKillRegState(MR.isKill()))
                          .addImm(Off)
                          .addImm(Val);
    StI->addMemOperand(*MF, NewM);
    NG.push_back(StI);
  } else {
    // Create vreg = A2_tfrsi #Acc; mem[hw] = vreg
    const MCInstrDesc &TfrD = TII->get(Hexagon::A2_tfrsi);
    const TargetRegisterClass *RC = TII->getRegClass(TfrD, 0, TRI, *MF);
    unsigned VReg = MF->getRegInfo().createVirtualRegister(RC);
    MachineInstr *TfrI = BuildMI(*MF, DL, TfrD, VReg)
                           .addImm(int(Acc));
    NG.push_back(TfrI);

    unsigned WOpc = (TotalSize == 2) ? Hexagon::S2_storerh_io :
                    (TotalSize == 4) ? Hexagon::S2_storeri_io : 0;
    assert(WOpc && "Unexpected size");

    const MCInstrDesc &StD = TII->get(WOpc);
    MachineOperand &MR = FirstSt->getOperand(0);
    int64_t Off = FirstSt->getOperand(1).getImm();
    MachineInstr *StI = BuildMI(*MF, DL, StD)
                          .addReg(MR.getReg(), getKillRegState(MR.isKill()))
                          .addImm(Off)
                          .addReg(VReg, RegState::Kill);
    StI->addMemOperand(*MF, NewM);
    NG.push_back(StI);
  }

  return true;
}
Esempio n. 5
0
/// foldMemoryOperand - Attempt to fold a load or store of the specified stack
/// slot into the specified machine instruction for the specified operand(s).
/// If this is possible, a new instruction is returned with the specified
/// operand folded, otherwise NULL is returned. The client is responsible for
/// removing the old instruction and adding the new one in the instruction
/// stream.
MachineInstr*
TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
                                   const SmallVectorImpl<unsigned> &Ops,
                                   int FI) const {
  unsigned Flags = 0;
  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
    if (MI->getOperand(Ops[i]).isDef())
      Flags |= MachineMemOperand::MOStore;
    else
      Flags |= MachineMemOperand::MOLoad;

  MachineBasicBlock *MBB = MI->getParent();
  assert(MBB && "foldMemoryOperand needs an inserted instruction");
  MachineFunction &MF = *MBB->getParent();

  MachineInstr *NewMI = 0;

  if (MI->getOpcode() == TargetOpcode::STACKMAP ||
      MI->getOpcode() == TargetOpcode::PATCHPOINT) {
    // Fold stackmap/patchpoint.
    NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
  } else {
    // Ask the target to do the actual folding.
    NewMI =foldMemoryOperandImpl(MF, MI, Ops, FI);
  }
 
  if (NewMI) {
    NewMI->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
    // Add a memory operand, foldMemoryOperandImpl doesn't do that.
    assert((!(Flags & MachineMemOperand::MOStore) ||
            NewMI->mayStore()) &&
           "Folded a def to a non-store!");
    assert((!(Flags & MachineMemOperand::MOLoad) ||
            NewMI->mayLoad()) &&
           "Folded a use to a non-load!");
    const MachineFrameInfo &MFI = *MF.getFrameInfo();
    assert(MFI.getObjectOffset(FI) != -1);
    MachineMemOperand *MMO =
      MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FI),
                              Flags, MFI.getObjectSize(FI),
                              MFI.getObjectAlignment(FI));
    NewMI->addMemOperand(MF, MMO);

    // FIXME: change foldMemoryOperandImpl semantics to also insert NewMI.
    return MBB->insert(MI, NewMI);
  }

  // Straight COPY may fold as load/store.
  if (!MI->isCopy() || Ops.size() != 1)
    return 0;

  const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]);
  if (!RC)
    return 0;

  const MachineOperand &MO = MI->getOperand(1-Ops[0]);
  MachineBasicBlock::iterator Pos = MI;
  const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo();

  if (Flags == MachineMemOperand::MOStore)
    storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI);
  else
    loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI);
  return --Pos;
}
Esempio n. 6
0
MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
                                                 ArrayRef<unsigned> Ops, int FI,
                                                 LiveIntervals *LIS) const {
  auto Flags = MachineMemOperand::MONone;
  for (unsigned OpIdx : Ops)
    Flags |= MI.getOperand(OpIdx).isDef() ? MachineMemOperand::MOStore
                                          : MachineMemOperand::MOLoad;

  MachineBasicBlock *MBB = MI.getParent();
  assert(MBB && "foldMemoryOperand needs an inserted instruction");
  MachineFunction &MF = *MBB->getParent();

  // If we're not folding a load into a subreg, the size of the load is the
  // size of the spill slot. But if we are, we need to figure out what the
  // actual load size is.
  int64_t MemSize = 0;
  const MachineFrameInfo &MFI = MF.getFrameInfo();
  const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();

  if (Flags & MachineMemOperand::MOStore) {
    MemSize = MFI.getObjectSize(FI);
  } else {
    for (unsigned OpIdx : Ops) {
      int64_t OpSize = MFI.getObjectSize(FI);

      if (auto SubReg = MI.getOperand(OpIdx).getSubReg()) {
        unsigned SubRegSize = TRI->getSubRegIdxSize(SubReg);
        if (SubRegSize > 0 && !(SubRegSize % 8))
          OpSize = SubRegSize / 8;
      }

      MemSize = std::max(MemSize, OpSize);
    }
  }

  assert(MemSize && "Did not expect a zero-sized stack slot");

  MachineInstr *NewMI = nullptr;

  if (MI.getOpcode() == TargetOpcode::STACKMAP ||
      MI.getOpcode() == TargetOpcode::PATCHPOINT ||
      MI.getOpcode() == TargetOpcode::STATEPOINT) {
    // Fold stackmap/patchpoint.
    NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
    if (NewMI)
      MBB->insert(MI, NewMI);
  } else {
    // Ask the target to do the actual folding.
    NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI, LIS);
  }

  if (NewMI) {
    NewMI->setMemRefs(MF, MI.memoperands());
    // Add a memory operand, foldMemoryOperandImpl doesn't do that.
    assert((!(Flags & MachineMemOperand::MOStore) ||
            NewMI->mayStore()) &&
           "Folded a def to a non-store!");
    assert((!(Flags & MachineMemOperand::MOLoad) ||
            NewMI->mayLoad()) &&
           "Folded a use to a non-load!");
    assert(MFI.getObjectOffset(FI) != -1);
    MachineMemOperand *MMO = MF.getMachineMemOperand(
        MachinePointerInfo::getFixedStack(MF, FI), Flags, MemSize,
        MFI.getObjectAlignment(FI));
    NewMI->addMemOperand(MF, MMO);

    return NewMI;
  }

  // Straight COPY may fold as load/store.
  if (!MI.isCopy() || Ops.size() != 1)
    return nullptr;

  const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]);
  if (!RC)
    return nullptr;

  const MachineOperand &MO = MI.getOperand(1 - Ops[0]);
  MachineBasicBlock::iterator Pos = MI;

  if (Flags == MachineMemOperand::MOStore)
    storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI);
  else
    loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI);
  return &*--Pos;
}