Exemplo n.º 1
0
static LegalityPredicate isMultiple32(unsigned TypeIdx,
                                      unsigned MaxSize = 512) {
  return [=](const LegalityQuery &Query) {
    const LLT Ty = Query.Types[TypeIdx];
    const LLT EltTy = Ty.getScalarType();
    return Ty.getSizeInBits() <= MaxSize && EltTy.getSizeInBits() % 32 == 0;
  };
}
Exemplo n.º 2
0
void RegisterBankInfo::applyDefaultMapping(const OperandsMapper &OpdMapper) {
  MachineInstr &MI = OpdMapper.getMI();
  MachineRegisterInfo &MRI = OpdMapper.getMRI();
  LLVM_DEBUG(dbgs() << "Applying default-like mapping\n");
  for (unsigned OpIdx = 0,
                EndIdx = OpdMapper.getInstrMapping().getNumOperands();
       OpIdx != EndIdx; ++OpIdx) {
    LLVM_DEBUG(dbgs() << "OpIdx " << OpIdx);
    MachineOperand &MO = MI.getOperand(OpIdx);
    if (!MO.isReg()) {
      LLVM_DEBUG(dbgs() << " is not a register, nothing to be done\n");
      continue;
    }
    if (!MO.getReg()) {
      LLVM_DEBUG(dbgs() << " is %%noreg, nothing to be done\n");
      continue;
    }
    assert(OpdMapper.getInstrMapping().getOperandMapping(OpIdx).NumBreakDowns !=
               0 &&
           "Invalid mapping");
    assert(OpdMapper.getInstrMapping().getOperandMapping(OpIdx).NumBreakDowns ==
               1 &&
           "This mapping is too complex for this function");
    iterator_range<SmallVectorImpl<unsigned>::const_iterator> NewRegs =
        OpdMapper.getVRegs(OpIdx);
    if (empty(NewRegs)) {
      LLVM_DEBUG(dbgs() << " has not been repaired, nothing to be done\n");
      continue;
    }
    unsigned OrigReg = MO.getReg();
    unsigned NewReg = *NewRegs.begin();
    LLVM_DEBUG(dbgs() << " changed, replace " << printReg(OrigReg, nullptr));
    MO.setReg(NewReg);
    LLVM_DEBUG(dbgs() << " with " << printReg(NewReg, nullptr));

    // The OperandsMapper creates plain scalar, we may have to fix that.
    // Check if the types match and if not, fix that.
    LLT OrigTy = MRI.getType(OrigReg);
    LLT NewTy = MRI.getType(NewReg);
    if (OrigTy != NewTy) {
      // The default mapping is not supposed to change the size of
      // the storage. However, right now we don't necessarily bump all
      // the types to storage size. For instance, we can consider
      // s16 G_AND legal whereas the storage size is going to be 32.
      assert(OrigTy.getSizeInBits() <= NewTy.getSizeInBits() &&
             "Types with difference size cannot be handled by the default "
             "mapping");
      LLVM_DEBUG(dbgs() << "\nChange type of new opd from " << NewTy << " to "
                        << OrigTy);
      MRI.setType(NewReg, OrigTy);
    }
    LLVM_DEBUG(dbgs() << '\n');
  }
}
Exemplo n.º 3
0
bool AArch64LegalizerInfo::legalizeVaArg(MachineInstr &MI,
                                         MachineRegisterInfo &MRI,
                                         MachineIRBuilder &MIRBuilder) const {
  MIRBuilder.setInstr(MI);
  MachineFunction &MF = MIRBuilder.getMF();
  unsigned Align = MI.getOperand(2).getImm();
  unsigned Dst = MI.getOperand(0).getReg();
  unsigned ListPtr = MI.getOperand(1).getReg();

  LLT PtrTy = MRI.getType(ListPtr);
  LLT IntPtrTy = LLT::scalar(PtrTy.getSizeInBits());

  const unsigned PtrSize = PtrTy.getSizeInBits() / 8;
  unsigned List = MRI.createGenericVirtualRegister(PtrTy);
  MIRBuilder.buildLoad(
      List, ListPtr,
      *MF.getMachineMemOperand(MachinePointerInfo(), MachineMemOperand::MOLoad,
                               PtrSize, /* Align = */ PtrSize));

  unsigned DstPtr;
  if (Align > PtrSize) {
    // Realign the list to the actual required alignment.
    unsigned AlignMinus1 = MRI.createGenericVirtualRegister(IntPtrTy);
    MIRBuilder.buildConstant(AlignMinus1, Align - 1);

    unsigned ListTmp = MRI.createGenericVirtualRegister(PtrTy);
    MIRBuilder.buildGEP(ListTmp, List, AlignMinus1);

    DstPtr = MRI.createGenericVirtualRegister(PtrTy);
    MIRBuilder.buildPtrMask(DstPtr, ListTmp, Log2_64(Align));
  } else
    DstPtr = List;

  uint64_t ValSize = MRI.getType(Dst).getSizeInBits() / 8;
  MIRBuilder.buildLoad(
      Dst, DstPtr,
      *MF.getMachineMemOperand(MachinePointerInfo(), MachineMemOperand::MOLoad,
                               ValSize, std::max(Align, PtrSize)));

  unsigned SizeReg = MRI.createGenericVirtualRegister(IntPtrTy);
  MIRBuilder.buildConstant(SizeReg, alignTo(ValSize, PtrSize));

  unsigned NewList = MRI.createGenericVirtualRegister(PtrTy);
  MIRBuilder.buildGEP(NewList, DstPtr, SizeReg);

  MIRBuilder.buildStore(
      NewList, ListPtr,
      *MF.getMachineMemOperand(MachinePointerInfo(), MachineMemOperand::MOStore,
                               PtrSize, /* Align = */ PtrSize));

  MI.eraseFromParent();
  return true;
}
Exemplo n.º 4
0
MachineLegalizeHelper::LegalizeResult
MachineLegalizeHelper::libcall(MachineInstr &MI) {
  LLT Ty = MRI.getType(MI.getOperand(0).getReg());
  unsigned Size = Ty.getSizeInBits();
  MIRBuilder.setInstr(MI);

  switch (MI.getOpcode()) {
  default:
    return UnableToLegalize;
  case TargetOpcode::G_FREM: {
    auto &Ctx = MIRBuilder.getMF().getFunction()->getContext();
    Type *Ty = Size == 64 ? Type::getDoubleTy(Ctx) : Type::getFloatTy(Ctx);
    auto &CLI = *MIRBuilder.getMF().getSubtarget().getCallLowering();
    auto &TLI = *MIRBuilder.getMF().getSubtarget().getTargetLowering();
    const char *Name =
        TLI.getLibcallName(Size == 64 ? RTLIB::REM_F64 : RTLIB::REM_F32);

    CLI.lowerCall(MIRBuilder, MachineOperand::CreateES(Name), Ty,
                  MI.getOperand(0).getReg(), {Ty, Ty},
                  {MI.getOperand(1).getReg(), MI.getOperand(2).getReg()});
    MI.eraseFromParent();
    return Legalized;
  }
  }
}
Exemplo n.º 5
0
bool AMDGPULegalizerInfo::legalizeFrint(
  MachineInstr &MI, MachineRegisterInfo &MRI,
  MachineIRBuilder &MIRBuilder) const {
  MIRBuilder.setInstr(MI);

  unsigned Src = MI.getOperand(1).getReg();
  LLT Ty = MRI.getType(Src);
  assert(Ty.isScalar() && Ty.getSizeInBits() == 64);

  APFloat C1Val(APFloat::IEEEdouble(), "0x1.0p+52");
  APFloat C2Val(APFloat::IEEEdouble(), "0x1.fffffffffffffp+51");

  auto C1 = MIRBuilder.buildFConstant(Ty, C1Val);
  auto CopySign = MIRBuilder.buildFCopysign(Ty, C1, Src);

  // TODO: Should this propagate fast-math-flags?
  auto Tmp1 = MIRBuilder.buildFAdd(Ty, Src, CopySign);
  auto Tmp2 = MIRBuilder.buildFSub(Ty, Tmp1, CopySign);

  auto C2 = MIRBuilder.buildFConstant(Ty, C2Val);
  auto Fabs = MIRBuilder.buildFAbs(Ty, Src);

  auto Cond = MIRBuilder.buildFCmp(CmpInst::FCMP_OGT, LLT::scalar(1), Fabs, C2);
  MIRBuilder.buildSelect(MI.getOperand(0).getReg(), Cond, Src, Tmp2);
  return true;
}
Exemplo n.º 6
0
MachineLegalizeHelper::LegalizeResult
MachineLegalizeHelper::fewerElementsVector(MachineInstr &MI, unsigned TypeIdx,
                                           LLT NarrowTy) {
  // FIXME: Don't know how to handle secondary types yet.
  if (TypeIdx != 0)
    return UnableToLegalize;
  switch (MI.getOpcode()) {
  default:
    return UnableToLegalize;
  case TargetOpcode::G_ADD: {
    unsigned NarrowSize = NarrowTy.getSizeInBits();
    unsigned DstReg = MI.getOperand(0).getReg();
    int NumParts = MRI.getType(DstReg).getSizeInBits() / NarrowSize;

    MIRBuilder.setInstr(MI);

    SmallVector<unsigned, 2> Src1Regs, Src2Regs, DstRegs, Indexes;
    extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src1Regs);
    extractParts(MI.getOperand(2).getReg(), NarrowTy, NumParts, Src2Regs);

    for (int i = 0; i < NumParts; ++i) {
      unsigned DstReg = MRI.createGenericVirtualRegister(NarrowTy);
      MIRBuilder.buildAdd(DstReg, Src1Regs[i], Src2Regs[i]);
      DstRegs.push_back(DstReg);
      Indexes.push_back(i * NarrowSize);
    }

    MIRBuilder.buildSequence(DstReg, DstRegs, Indexes);
    MI.eraseFromParent();
    return Legalized;
  }
  }
}
Exemplo n.º 7
0
X86GenRegisterBankInfo::PartialMappingIdx
X86GenRegisterBankInfo::getPartialMappingIdx(const LLT &Ty, bool isFP) {
  if ((Ty.isScalar() && !isFP) || Ty.isPointer()) {
    switch (Ty.getSizeInBits()) {
    case 1:
    case 8:
      return PMI_GPR8;
    case 16:
      return PMI_GPR16;
    case 32:
      return PMI_GPR32;
    case 64:
      return PMI_GPR64;
    case 128:
      return PMI_VEC128;
      break;
    default:
      llvm_unreachable("Unsupported register size.");
    }
  } else if (Ty.isScalar()) {
    switch (Ty.getSizeInBits()) {
    case 32:
      return PMI_FP32;
    case 64:
      return PMI_FP64;
    case 128:
      return PMI_VEC128;
    default:
      llvm_unreachable("Unsupported register size.");
    }
  } else {
    switch (Ty.getSizeInBits()) {
    case 128:
      return PMI_VEC128;
    case 256:
      return PMI_VEC256;
    case 512:
      return PMI_VEC512;
    default:
      llvm_unreachable("Unsupported register size.");
    }
  }

  return PMI_None;
}
Exemplo n.º 8
0
void MachineLegalizeHelper::extractParts(unsigned Reg, LLT Ty, int NumParts,
                                         SmallVectorImpl<unsigned> &VRegs) {
  unsigned Size = Ty.getSizeInBits();
  SmallVector<uint64_t, 4> Indexes;
  for (int i = 0; i < NumParts; ++i) {
    VRegs.push_back(MRI.createGenericVirtualRegister(Ty));
    Indexes.push_back(i * Size);
  }
  MIRBuilder.buildExtract(VRegs, Indexes, Reg);
}
Exemplo n.º 9
0
static LegalizeMutation fewerEltsToSize64Vector(unsigned TypeIdx) {
  return [=](const LegalityQuery &Query) {
    const LLT Ty = Query.Types[TypeIdx];
    const LLT EltTy = Ty.getElementType();
    unsigned Size = Ty.getSizeInBits();
    unsigned Pieces = (Size + 63) / 64;
    unsigned NewNumElts = (Ty.getNumElements() + 1) / Pieces;
    return std::make_pair(TypeIdx, LLT::scalarOrVector(NewNumElts, EltTy));
  };
}
Exemplo n.º 10
0
const RegisterBankInfo::InstructionMapping &
AArch64RegisterBankInfo::getSameKindOfOperandsMapping(
    const MachineInstr &MI) const {
  const unsigned Opc = MI.getOpcode();
  const MachineFunction &MF = *MI.getParent()->getParent();
  const MachineRegisterInfo &MRI = MF.getRegInfo();

  unsigned NumOperands = MI.getNumOperands();
  assert(NumOperands <= 3 &&
         "This code is for instructions with 3 or less operands");

  LLT Ty = MRI.getType(MI.getOperand(0).getReg());
  unsigned Size = Ty.getSizeInBits();
  bool IsFPR = Ty.isVector() || isPreISelGenericFloatingPointOpcode(Opc);

  PartialMappingIdx RBIdx = IsFPR ? PMI_FirstFPR : PMI_FirstGPR;

#ifndef NDEBUG
  // Make sure all the operands are using similar size and type.
  // Should probably be checked by the machine verifier.
  // This code won't catch cases where the number of lanes is
  // different between the operands.
  // If we want to go to that level of details, it is probably
  // best to check that the types are the same, period.
  // Currently, we just check that the register banks are the same
  // for each types.
  for (unsigned Idx = 1; Idx != NumOperands; ++Idx) {
    LLT OpTy = MRI.getType(MI.getOperand(Idx).getReg());
    assert(
        AArch64GenRegisterBankInfo::getRegBankBaseIdxOffset(
            RBIdx, OpTy.getSizeInBits()) ==
            AArch64GenRegisterBankInfo::getRegBankBaseIdxOffset(RBIdx, Size) &&
        "Operand has incompatible size");
    bool OpIsFPR = OpTy.isVector() || isPreISelGenericFloatingPointOpcode(Opc);
    (void)OpIsFPR;
    assert(IsFPR == OpIsFPR && "Operand has incompatible type");
  }
#endif // End NDEBUG.

  return getInstructionMapping(DefaultMappingID, 1,
                               getValueMapping(RBIdx, Size), NumOperands);
}
Exemplo n.º 11
0
MachineLegalizeHelper::LegalizeResult
MachineLegalizeHelper::narrowScalar(MachineInstr &MI, unsigned TypeIdx,
                                    LLT NarrowTy) {
  // FIXME: Don't know how to handle secondary types yet.
  if (TypeIdx != 0)
    return UnableToLegalize;
  switch (MI.getOpcode()) {
  default:
    return UnableToLegalize;
  case TargetOpcode::G_ADD: {
    // Expand in terms of carry-setting/consuming G_ADDE instructions.
    unsigned NarrowSize = NarrowTy.getSizeInBits();
    int NumParts = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits() /
                   NarrowTy.getSizeInBits();

    MIRBuilder.setInstr(MI);

    SmallVector<unsigned, 2> Src1Regs, Src2Regs, DstRegs, Indexes;
    extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src1Regs);
    extractParts(MI.getOperand(2).getReg(), NarrowTy, NumParts, Src2Regs);

    unsigned CarryIn = MRI.createGenericVirtualRegister(LLT::scalar(1));
    MIRBuilder.buildConstant(CarryIn, 0);

    for (int i = 0; i < NumParts; ++i) {
      unsigned DstReg = MRI.createGenericVirtualRegister(NarrowTy);
      unsigned CarryOut = MRI.createGenericVirtualRegister(LLT::scalar(1));

      MIRBuilder.buildUAdde(DstReg, CarryOut, Src1Regs[i],
                            Src2Regs[i], CarryIn);

      DstRegs.push_back(DstReg);
      Indexes.push_back(i * NarrowSize);
      CarryIn = CarryOut;
    }
    unsigned DstReg = MI.getOperand(0).getReg();
    MIRBuilder.buildSequence(DstReg, DstRegs, Indexes);
    MI.eraseFromParent();
    return Legalized;
  }
  }
}
Exemplo n.º 12
0
void MachineIRBuilder::validateTruncExt(unsigned Dst, unsigned Src,
                                        bool IsExtend) {
#ifndef NDEBUG
  LLT SrcTy = MRI->getType(Src);
  LLT DstTy = MRI->getType(Dst);

  if (DstTy.isVector()) {
    assert(SrcTy.isVector() && "mismatched cast between vecot and non-vector");
    assert(SrcTy.getNumElements() == DstTy.getNumElements() &&
           "different number of elements in a trunc/ext");
  } else
    assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");

  if (IsExtend)
    assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() &&
           "invalid narrowing extend");
  else
    assert(DstTy.getSizeInBits() < SrcTy.getSizeInBits() &&
           "invalid widening trunc");
#endif
}
Exemplo n.º 13
0
Arquivo: Utils.cpp Projeto: happz/llvm
Optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode, const unsigned Op1,
                                        const unsigned Op2,
                                        const MachineRegisterInfo &MRI) {
  auto MaybeOp1Cst = getConstantVRegVal(Op1, MRI);
  auto MaybeOp2Cst = getConstantVRegVal(Op2, MRI);
  if (MaybeOp1Cst && MaybeOp2Cst) {
    LLT Ty = MRI.getType(Op1);
    APInt C1(Ty.getSizeInBits(), *MaybeOp1Cst, true);
    APInt C2(Ty.getSizeInBits(), *MaybeOp2Cst, true);
    switch (Opcode) {
    default:
      break;
    case TargetOpcode::G_ADD:
      return C1 + C2;
    case TargetOpcode::G_AND:
      return C1 & C2;
    case TargetOpcode::G_ASHR:
      return C1.ashr(C2);
    case TargetOpcode::G_LSHR:
      return C1.lshr(C2);
    case TargetOpcode::G_MUL:
      return C1 * C2;
    case TargetOpcode::G_OR:
      return C1 | C2;
    case TargetOpcode::G_SHL:
      return C1 << C2;
    case TargetOpcode::G_SUB:
      return C1 - C2;
    case TargetOpcode::G_XOR:
      return C1 ^ C2;
    case TargetOpcode::G_UDIV:
      if (!C2.getBoolValue())
        break;
      return C1.udiv(C2);
    case TargetOpcode::G_SDIV:
      if (!C2.getBoolValue())
        break;
      return C1.sdiv(C2);
    case TargetOpcode::G_UREM:
      if (!C2.getBoolValue())
        break;
      return C1.urem(C2);
    case TargetOpcode::G_SREM:
      if (!C2.getBoolValue())
        break;
      return C1.srem(C2);
    }
  }
  return None;
}
Exemplo n.º 14
0
unsigned TargetRegisterInfo::getRegSizeInBits(unsigned Reg,
                                         const MachineRegisterInfo &MRI) const {
  const TargetRegisterClass *RC{};
  if (isPhysicalRegister(Reg)) {
    // The size is not directly available for physical registers.
    // Instead, we need to access a register class that contains Reg and
    // get the size of that register class.
    RC = getMinimalPhysRegClass(Reg);
  } else {
    LLT Ty = MRI.getType(Reg);
    unsigned RegSize = Ty.isValid() ? Ty.getSizeInBits() : 0;
    // If Reg is not a generic register, query the register class to
    // get its size.
    if (RegSize)
      return RegSize;
    // Since Reg is not a generic register, it must have a register class.
    RC = MRI.getRegClass(Reg);
  }
  assert(RC && "Unable to deduce the register class");
  return getRegSizeInBits(*RC);
}
RegisterBankInfo::InstructionMapping
AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
  RegisterBankInfo::InstructionMapping Mapping = getInstrMappingImpl(MI);
  if (Mapping.isValid())
    return Mapping;

  // As a top-level guess, vectors go in FPRs, scalars in GPRs. Obviously this
  // won't work for normal floating-point types (or NZCV). When such
  // instructions exist we'll need to look at the MI's opcode.
  LLT Ty = MI.getType();
  unsigned BankID;
  if (Ty.isVector())
    BankID = AArch64::FPRRegBankID;
  else
    BankID = AArch64::GPRRegBankID;

  Mapping = InstructionMapping{1, 1, MI.getNumOperands()};
  int Size = Ty.isSized() ? Ty.getSizeInBits() : 0;
  for (unsigned Idx = 0; Idx < MI.getNumOperands(); ++Idx)
    Mapping.setOperandMapping(Idx, Size, getRegBank(BankID));

  return Mapping;
}
Exemplo n.º 16
0
void LegalizerInfo::computeTables() {
  assert(TablesInitialized == false);

  for (unsigned OpcodeIdx = 0; OpcodeIdx <= LastOp - FirstOp; ++OpcodeIdx) {
    const unsigned Opcode = FirstOp + OpcodeIdx;
    for (unsigned TypeIdx = 0; TypeIdx != SpecifiedActions[OpcodeIdx].size();
         ++TypeIdx) {
      // 0. Collect information specified through the setAction API, i.e.
      // for specific bit sizes.
      // For scalar types:
      SizeAndActionsVec ScalarSpecifiedActions;
      // For pointer types:
      std::map<uint16_t, SizeAndActionsVec> AddressSpace2SpecifiedActions;
      // For vector types:
      std::map<uint16_t, SizeAndActionsVec> ElemSize2SpecifiedActions;
      for (auto LLT2Action : SpecifiedActions[OpcodeIdx][TypeIdx]) {
        const LLT Type = LLT2Action.first;
        const LegalizeAction Action = LLT2Action.second;

        auto SizeAction = std::make_pair(Type.getSizeInBits(), Action);
        if (Type.isPointer())
          AddressSpace2SpecifiedActions[Type.getAddressSpace()].push_back(
              SizeAction);
        else if (Type.isVector())
          ElemSize2SpecifiedActions[Type.getElementType().getSizeInBits()]
              .push_back(SizeAction);
        else
          ScalarSpecifiedActions.push_back(SizeAction);
      }

      // 1. Handle scalar types
      {
        // Decide how to handle bit sizes for which no explicit specification
        // was given.
        SizeChangeStrategy S = &unsupportedForDifferentSizes;
        if (TypeIdx < ScalarSizeChangeStrategies[OpcodeIdx].size() &&
            ScalarSizeChangeStrategies[OpcodeIdx][TypeIdx] != nullptr)
          S = ScalarSizeChangeStrategies[OpcodeIdx][TypeIdx];
        std::sort(ScalarSpecifiedActions.begin(), ScalarSpecifiedActions.end());
        checkPartialSizeAndActionsVector(ScalarSpecifiedActions);
        setScalarAction(Opcode, TypeIdx, S(ScalarSpecifiedActions));
      }

      // 2. Handle pointer types
      for (auto PointerSpecifiedActions : AddressSpace2SpecifiedActions) {
        std::sort(PointerSpecifiedActions.second.begin(),
                  PointerSpecifiedActions.second.end());
        checkPartialSizeAndActionsVector(PointerSpecifiedActions.second);
        // For pointer types, we assume that there isn't a meaningfull way
        // to change the number of bits used in the pointer.
        setPointerAction(
            Opcode, TypeIdx, PointerSpecifiedActions.first,
            unsupportedForDifferentSizes(PointerSpecifiedActions.second));
      }

      // 3. Handle vector types
      SizeAndActionsVec ElementSizesSeen;
      for (auto VectorSpecifiedActions : ElemSize2SpecifiedActions) {
        std::sort(VectorSpecifiedActions.second.begin(),
                  VectorSpecifiedActions.second.end());
        const uint16_t ElementSize = VectorSpecifiedActions.first;
        ElementSizesSeen.push_back({ElementSize, Legal});
        checkPartialSizeAndActionsVector(VectorSpecifiedActions.second);
        // For vector types, we assume that the best way to adapt the number
        // of elements is to the next larger number of elements type for which
        // the vector type is legal, unless there is no such type. In that case,
        // legalize towards a vector type with a smaller number of elements.
        SizeAndActionsVec NumElementsActions;
        for (SizeAndAction BitsizeAndAction : VectorSpecifiedActions.second) {
          assert(BitsizeAndAction.first % ElementSize == 0);
          const uint16_t NumElements = BitsizeAndAction.first / ElementSize;
          NumElementsActions.push_back({NumElements, BitsizeAndAction.second});
        }
        setVectorNumElementAction(
            Opcode, TypeIdx, ElementSize,
            moreToWiderTypesAndLessToWidest(NumElementsActions));
      }
      std::sort(ElementSizesSeen.begin(), ElementSizesSeen.end());
      SizeChangeStrategy VectorElementSizeChangeStrategy =
          &unsupportedForDifferentSizes;
      if (TypeIdx < VectorElementSizeChangeStrategies[OpcodeIdx].size() &&
          VectorElementSizeChangeStrategies[OpcodeIdx][TypeIdx] != nullptr)
        VectorElementSizeChangeStrategy =
            VectorElementSizeChangeStrategies[OpcodeIdx][TypeIdx];
      setScalarInVectorAction(
          Opcode, TypeIdx, VectorElementSizeChangeStrategy(ElementSizesSeen));
    }
  }

  TablesInitialized = true;
}
Exemplo n.º 17
0
const RegisterBankInfo::InstructionMapping &
AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
  const unsigned Opc = MI.getOpcode();
  const MachineFunction &MF = *MI.getParent()->getParent();
  const MachineRegisterInfo &MRI = MF.getRegInfo();

  // Try the default logic for non-generic instructions that are either copies
  // or already have some operands assigned to banks.
  if (!isPreISelGenericOpcode(Opc) ||
      Opc == TargetOpcode::G_PHI) {
    const RegisterBankInfo::InstructionMapping &Mapping =
        getInstrMappingImpl(MI);
    if (Mapping.isValid())
      return Mapping;
  }

  switch (Opc) {
    // G_{F|S|U}REM are not listed because they are not legal.
    // Arithmetic ops.
  case TargetOpcode::G_ADD:
  case TargetOpcode::G_SUB:
  case TargetOpcode::G_GEP:
  case TargetOpcode::G_MUL:
  case TargetOpcode::G_SDIV:
  case TargetOpcode::G_UDIV:
    // Bitwise ops.
  case TargetOpcode::G_AND:
  case TargetOpcode::G_OR:
  case TargetOpcode::G_XOR:
    // Shifts.
  case TargetOpcode::G_SHL:
  case TargetOpcode::G_LSHR:
  case TargetOpcode::G_ASHR:
    // Floating point ops.
  case TargetOpcode::G_FADD:
  case TargetOpcode::G_FSUB:
  case TargetOpcode::G_FMUL:
  case TargetOpcode::G_FDIV:
    return getSameKindOfOperandsMapping(MI);
  case TargetOpcode::G_BITCAST: {
    LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
    LLT SrcTy = MRI.getType(MI.getOperand(1).getReg());
    unsigned Size = DstTy.getSizeInBits();
    bool DstIsGPR = !DstTy.isVector();
    bool SrcIsGPR = !SrcTy.isVector();
    const RegisterBank &DstRB =
        DstIsGPR ? AArch64::GPRRegBank : AArch64::FPRRegBank;
    const RegisterBank &SrcRB =
        SrcIsGPR ? AArch64::GPRRegBank : AArch64::FPRRegBank;
    return getInstructionMapping(
        DefaultMappingID, copyCost(DstRB, SrcRB, Size),
        getCopyMapping(DstRB.getID(), SrcRB.getID(), Size),
        /*NumOperands*/ 2);
  }
  default:
    break;
  }

  unsigned NumOperands = MI.getNumOperands();

  // Track the size and bank of each register.  We don't do partial mappings.
  SmallVector<unsigned, 4> OpSize(NumOperands);
  SmallVector<PartialMappingIdx, 4> OpRegBankIdx(NumOperands);
  for (unsigned Idx = 0; Idx < NumOperands; ++Idx) {
    auto &MO = MI.getOperand(Idx);
    if (!MO.isReg() || !MO.getReg())
      continue;

    LLT Ty = MRI.getType(MO.getReg());
    OpSize[Idx] = Ty.getSizeInBits();

    // As a top-level guess, vectors go in FPRs, scalars and pointers in GPRs.
    // For floating-point instructions, scalars go in FPRs.
    if (Ty.isVector() || isPreISelGenericFloatingPointOpcode(Opc) ||
        Ty.getSizeInBits() > 64)
      OpRegBankIdx[Idx] = PMI_FirstFPR;
    else
      OpRegBankIdx[Idx] = PMI_FirstGPR;
  }

  unsigned Cost = 1;
  // Some of the floating-point instructions have mixed GPR and FPR operands:
  // fine-tune the computed mapping.
  switch (Opc) {
  case TargetOpcode::G_SITOFP:
  case TargetOpcode::G_UITOFP:
    OpRegBankIdx = {PMI_FirstFPR, PMI_FirstGPR};
    break;
  case TargetOpcode::G_FPTOSI:
  case TargetOpcode::G_FPTOUI:
    OpRegBankIdx = {PMI_FirstGPR, PMI_FirstFPR};
    break;
  case TargetOpcode::G_FCMP:
    OpRegBankIdx = {PMI_FirstGPR,
                    /* Predicate */ PMI_None, PMI_FirstFPR, PMI_FirstFPR};
    break;
  case TargetOpcode::G_BITCAST:
    // This is going to be a cross register bank copy and this is expensive.
    if (OpRegBankIdx[0] != OpRegBankIdx[1])
      Cost = copyCost(
          *AArch64GenRegisterBankInfo::PartMappings[OpRegBankIdx[0]].RegBank,
          *AArch64GenRegisterBankInfo::PartMappings[OpRegBankIdx[1]].RegBank,
          OpSize[0]);
    break;
  case TargetOpcode::G_LOAD:
    // Loading in vector unit is slightly more expensive.
    // This is actually only true for the LD1R and co instructions,
    // but anyway for the fast mode this number does not matter and
    // for the greedy mode the cost of the cross bank copy will
    // offset this number.
    // FIXME: Should be derived from the scheduling model.
    if (OpRegBankIdx[0] != PMI_FirstGPR)
      Cost = 2;
    else
      // Check if that load feeds fp instructions.
      // In that case, we want the default mapping to be on FPR
      // instead of blind map every scalar to GPR.
      for (const MachineInstr &UseMI :
           MRI.use_instructions(MI.getOperand(0).getReg()))
        // If we have at least one direct use in a FP instruction,
        // assume this was a floating point load in the IR.
        // If it was not, we would have had a bitcast before
        // reaching that instruction.
        if (isPreISelGenericFloatingPointOpcode(UseMI.getOpcode())) {
          OpRegBankIdx[0] = PMI_FirstFPR;
          break;
        }
    break;
  case TargetOpcode::G_STORE:
    // Check if that store is fed by fp instructions.
    if (OpRegBankIdx[0] == PMI_FirstGPR) {
      unsigned VReg = MI.getOperand(0).getReg();
      if (!VReg)
        break;
      MachineInstr *DefMI = MRI.getVRegDef(VReg);
      if (isPreISelGenericFloatingPointOpcode(DefMI->getOpcode()))
        OpRegBankIdx[0] = PMI_FirstFPR;
      break;
    }
  }

  // Finally construct the computed mapping.
  SmallVector<const ValueMapping *, 8> OpdsMapping(NumOperands);
  for (unsigned Idx = 0; Idx < NumOperands; ++Idx) {
    if (MI.getOperand(Idx).isReg() && MI.getOperand(Idx).getReg()) {
      auto Mapping = getValueMapping(OpRegBankIdx[Idx], OpSize[Idx]);
      if (!Mapping->isValid())
        return getInvalidInstructionMapping();

      OpdsMapping[Idx] = Mapping;
    }
  }

  return getInstructionMapping(DefaultMappingID, Cost,
                               getOperandsMapping(OpdsMapping), NumOperands);
}
Exemplo n.º 18
0
AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST,
                                         const GCNTargetMachine &TM) {
  using namespace TargetOpcode;

  auto GetAddrSpacePtr = [&TM](unsigned AS) {
    return LLT::pointer(AS, TM.getPointerSizeInBits(AS));
  };

  const LLT S1 = LLT::scalar(1);
  const LLT S8 = LLT::scalar(8);
  const LLT S16 = LLT::scalar(16);
  const LLT S32 = LLT::scalar(32);
  const LLT S64 = LLT::scalar(64);
  const LLT S128 = LLT::scalar(128);
  const LLT S256 = LLT::scalar(256);
  const LLT S512 = LLT::scalar(512);

  const LLT V2S16 = LLT::vector(2, 16);
  const LLT V4S16 = LLT::vector(4, 16);
  const LLT V8S16 = LLT::vector(8, 16);

  const LLT V2S32 = LLT::vector(2, 32);
  const LLT V3S32 = LLT::vector(3, 32);
  const LLT V4S32 = LLT::vector(4, 32);
  const LLT V5S32 = LLT::vector(5, 32);
  const LLT V6S32 = LLT::vector(6, 32);
  const LLT V7S32 = LLT::vector(7, 32);
  const LLT V8S32 = LLT::vector(8, 32);
  const LLT V9S32 = LLT::vector(9, 32);
  const LLT V10S32 = LLT::vector(10, 32);
  const LLT V11S32 = LLT::vector(11, 32);
  const LLT V12S32 = LLT::vector(12, 32);
  const LLT V13S32 = LLT::vector(13, 32);
  const LLT V14S32 = LLT::vector(14, 32);
  const LLT V15S32 = LLT::vector(15, 32);
  const LLT V16S32 = LLT::vector(16, 32);

  const LLT V2S64 = LLT::vector(2, 64);
  const LLT V3S64 = LLT::vector(3, 64);
  const LLT V4S64 = LLT::vector(4, 64);
  const LLT V5S64 = LLT::vector(5, 64);
  const LLT V6S64 = LLT::vector(6, 64);
  const LLT V7S64 = LLT::vector(7, 64);
  const LLT V8S64 = LLT::vector(8, 64);

  std::initializer_list<LLT> AllS32Vectors =
    {V2S32, V3S32, V4S32, V5S32, V6S32, V7S32, V8S32,
     V9S32, V10S32, V11S32, V12S32, V13S32, V14S32, V15S32, V16S32};
  std::initializer_list<LLT> AllS64Vectors =
    {V2S64, V3S64, V4S64, V5S64, V6S64, V7S64, V8S64};

  const LLT GlobalPtr = GetAddrSpacePtr(AMDGPUAS::GLOBAL_ADDRESS);
  const LLT ConstantPtr = GetAddrSpacePtr(AMDGPUAS::CONSTANT_ADDRESS);
  const LLT LocalPtr = GetAddrSpacePtr(AMDGPUAS::LOCAL_ADDRESS);
  const LLT FlatPtr = GetAddrSpacePtr(AMDGPUAS::FLAT_ADDRESS);
  const LLT PrivatePtr = GetAddrSpacePtr(AMDGPUAS::PRIVATE_ADDRESS);

  const LLT CodePtr = FlatPtr;

  const std::initializer_list<LLT> AddrSpaces64 = {
    GlobalPtr, ConstantPtr, FlatPtr
  };

  const std::initializer_list<LLT> AddrSpaces32 = {
    LocalPtr, PrivatePtr
  };

  setAction({G_BRCOND, S1}, Legal);

  // TODO: All multiples of 32, vectors of pointers, all v2s16 pairs, more
  // elements for v3s16
  getActionDefinitionsBuilder(G_PHI)
    .legalFor({S32, S64, V2S16, V4S16, S1, S128, S256})
    .legalFor(AllS32Vectors)
    .legalFor(AllS64Vectors)
    .legalFor(AddrSpaces64)
    .legalFor(AddrSpaces32)
    .clampScalar(0, S32, S256)
    .widenScalarToNextPow2(0, 32)
    .clampMaxNumElements(0, S32, 16)
    .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
    .legalIf(isPointer(0));


  getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL, G_UMULH, G_SMULH})
    .legalFor({S32})
    .clampScalar(0, S32, S32)
    .scalarize(0);

  // Report legal for any types we can handle anywhere. For the cases only legal
  // on the SALU, RegBankSelect will be able to re-legalize.
  getActionDefinitionsBuilder({G_AND, G_OR, G_XOR})
    .legalFor({S32, S1, S64, V2S32, V2S16, V4S16})
    .clampScalar(0, S32, S64)
    .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
    .fewerElementsIf(vectorWiderThan(0, 32), fewerEltsToSize64Vector(0))
    .widenScalarToNextPow2(0)
    .scalarize(0);

  getActionDefinitionsBuilder({G_UADDO, G_SADDO, G_USUBO, G_SSUBO,
                               G_UADDE, G_SADDE, G_USUBE, G_SSUBE})
    .legalFor({{S32, S1}})
    .clampScalar(0, S32, S32);

  getActionDefinitionsBuilder(G_BITCAST)
    .legalForCartesianProduct({S32, V2S16})
    .legalForCartesianProduct({S64, V2S32, V4S16})
    .legalForCartesianProduct({V2S64, V4S32})
    // Don't worry about the size constraint.
    .legalIf(all(isPointer(0), isPointer(1)));

  if (ST.has16BitInsts()) {
    getActionDefinitionsBuilder(G_FCONSTANT)
      .legalFor({S32, S64, S16})
      .clampScalar(0, S16, S64);
  } else {
    getActionDefinitionsBuilder(G_FCONSTANT)
      .legalFor({S32, S64})
      .clampScalar(0, S32, S64);
  }

  getActionDefinitionsBuilder(G_IMPLICIT_DEF)
    .legalFor({S1, S32, S64, V2S32, V4S32, V2S16, V4S16, GlobalPtr,
               ConstantPtr, LocalPtr, FlatPtr, PrivatePtr})
    .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
    .clampScalarOrElt(0, S32, S512)
    .legalIf(isMultiple32(0))
    .widenScalarToNextPow2(0, 32)
    .clampMaxNumElements(0, S32, 16);


  // FIXME: i1 operands to intrinsics should always be legal, but other i1
  // values may not be legal.  We need to figure out how to distinguish
  // between these two scenarios.
  getActionDefinitionsBuilder(G_CONSTANT)
    .legalFor({S1, S32, S64, GlobalPtr,
               LocalPtr, ConstantPtr, PrivatePtr, FlatPtr })
    .clampScalar(0, S32, S64)
    .widenScalarToNextPow2(0)
    .legalIf(isPointer(0));

  setAction({G_FRAME_INDEX, PrivatePtr}, Legal);

  auto &FPOpActions = getActionDefinitionsBuilder(
    { G_FADD, G_FMUL, G_FNEG, G_FABS, G_FMA, G_FCANONICALIZE})
    .legalFor({S32, S64});

  if (ST.has16BitInsts()) {
    if (ST.hasVOP3PInsts())
      FPOpActions.legalFor({S16, V2S16});
    else
      FPOpActions.legalFor({S16});
  }

  if (ST.hasVOP3PInsts())
    FPOpActions.clampMaxNumElements(0, S16, 2);
  FPOpActions
    .scalarize(0)
    .clampScalar(0, ST.has16BitInsts() ? S16 : S32, S64);

  if (ST.has16BitInsts()) {
    getActionDefinitionsBuilder(G_FSQRT)
      .legalFor({S32, S64, S16})
      .scalarize(0)
      .clampScalar(0, S16, S64);
  } else {
    getActionDefinitionsBuilder(G_FSQRT)
      .legalFor({S32, S64})
      .scalarize(0)
      .clampScalar(0, S32, S64);
  }

  getActionDefinitionsBuilder(G_FPTRUNC)
    .legalFor({{S32, S64}, {S16, S32}})
    .scalarize(0);

  getActionDefinitionsBuilder(G_FPEXT)
    .legalFor({{S64, S32}, {S32, S16}})
    .lowerFor({{S64, S16}}) // FIXME: Implement
    .scalarize(0);

  getActionDefinitionsBuilder(G_FCOPYSIGN)
    .legalForCartesianProduct({S16, S32, S64}, {S16, S32, S64})
    .scalarize(0);

  getActionDefinitionsBuilder(G_FSUB)
      // Use actual fsub instruction
      .legalFor({S32})
      // Must use fadd + fneg
      .lowerFor({S64, S16, V2S16})
      .scalarize(0)
      .clampScalar(0, S32, S64);

  getActionDefinitionsBuilder({G_SEXT, G_ZEXT, G_ANYEXT})
    .legalFor({{S64, S32}, {S32, S16}, {S64, S16},
               {S32, S1}, {S64, S1}, {S16, S1},
               // FIXME: Hack
               {S64, LLT::scalar(33)},
               {S32, S8}, {S128, S32}, {S128, S64}, {S32, LLT::scalar(24)}})
    .scalarize(0);

  getActionDefinitionsBuilder({G_SITOFP, G_UITOFP})
    .legalFor({{S32, S32}, {S64, S32}})
    .lowerFor({{S32, S64}})
    .customFor({{S64, S64}})
    .scalarize(0);

  getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI})
    .legalFor({{S32, S32}, {S32, S64}})
    .scalarize(0);

  getActionDefinitionsBuilder(G_INTRINSIC_ROUND)
    .legalFor({S32, S64})
    .scalarize(0);

  if (ST.getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS) {
    getActionDefinitionsBuilder({G_INTRINSIC_TRUNC, G_FCEIL, G_FRINT})
      .legalFor({S32, S64})
      .clampScalar(0, S32, S64)
      .scalarize(0);
  } else {
    getActionDefinitionsBuilder({G_INTRINSIC_TRUNC, G_FCEIL, G_FRINT})
      .legalFor({S32})
      .customFor({S64})
      .clampScalar(0, S32, S64)
      .scalarize(0);
  }

  getActionDefinitionsBuilder(G_GEP)
    .legalForCartesianProduct(AddrSpaces64, {S64})
    .legalForCartesianProduct(AddrSpaces32, {S32})
    .scalarize(0);

  setAction({G_BLOCK_ADDR, CodePtr}, Legal);

  getActionDefinitionsBuilder(G_ICMP)
    .legalForCartesianProduct(
      {S1}, {S32, S64, GlobalPtr, LocalPtr, ConstantPtr, PrivatePtr, FlatPtr})
    .legalFor({{S1, S32}, {S1, S64}})
    .widenScalarToNextPow2(1)
    .clampScalar(1, S32, S64)
    .scalarize(0)
    .legalIf(all(typeIs(0, S1), isPointer(1)));

  getActionDefinitionsBuilder(G_FCMP)
    .legalFor({{S1, S32}, {S1, S64}})
    .widenScalarToNextPow2(1)
    .clampScalar(1, S32, S64)
    .scalarize(0);

  // FIXME: fexp, flog2, flog10 needs to be custom lowered.
  getActionDefinitionsBuilder({G_FPOW, G_FEXP, G_FEXP2,
                               G_FLOG, G_FLOG2, G_FLOG10})
    .legalFor({S32})
    .scalarize(0);

  // The 64-bit versions produce 32-bit results, but only on the SALU.
  getActionDefinitionsBuilder({G_CTLZ, G_CTLZ_ZERO_UNDEF,
                               G_CTTZ, G_CTTZ_ZERO_UNDEF,
                               G_CTPOP})
    .legalFor({{S32, S32}, {S32, S64}})
    .clampScalar(0, S32, S32)
    .clampScalar(1, S32, S64)
    .scalarize(0)
    .widenScalarToNextPow2(0, 32)
    .widenScalarToNextPow2(1, 32);

  // TODO: Expand for > s32
  getActionDefinitionsBuilder(G_BSWAP)
    .legalFor({S32})
    .clampScalar(0, S32, S32)
    .scalarize(0);


  auto smallerThan = [](unsigned TypeIdx0, unsigned TypeIdx1) {
    return [=](const LegalityQuery &Query) {
      return Query.Types[TypeIdx0].getSizeInBits() <
             Query.Types[TypeIdx1].getSizeInBits();
    };
  };

  auto greaterThan = [](unsigned TypeIdx0, unsigned TypeIdx1) {
    return [=](const LegalityQuery &Query) {
      return Query.Types[TypeIdx0].getSizeInBits() >
             Query.Types[TypeIdx1].getSizeInBits();
    };
  };

  getActionDefinitionsBuilder(G_INTTOPTR)
    // List the common cases
    .legalForCartesianProduct(AddrSpaces64, {S64})
    .legalForCartesianProduct(AddrSpaces32, {S32})
    .scalarize(0)
    // Accept any address space as long as the size matches
    .legalIf(sameSize(0, 1))
    .widenScalarIf(smallerThan(1, 0),
      [](const LegalityQuery &Query) {
        return std::make_pair(1, LLT::scalar(Query.Types[0].getSizeInBits()));
      })
    .narrowScalarIf(greaterThan(1, 0),
      [](const LegalityQuery &Query) {
        return std::make_pair(1, LLT::scalar(Query.Types[0].getSizeInBits()));
      });

  getActionDefinitionsBuilder(G_PTRTOINT)
    // List the common cases
    .legalForCartesianProduct(AddrSpaces64, {S64})
    .legalForCartesianProduct(AddrSpaces32, {S32})
    .scalarize(0)
    // Accept any address space as long as the size matches
    .legalIf(sameSize(0, 1))
    .widenScalarIf(smallerThan(0, 1),
      [](const LegalityQuery &Query) {
        return std::make_pair(0, LLT::scalar(Query.Types[1].getSizeInBits()));
      })
    .narrowScalarIf(
      greaterThan(0, 1),
      [](const LegalityQuery &Query) {
        return std::make_pair(0, LLT::scalar(Query.Types[1].getSizeInBits()));
      });

  if (ST.hasFlatAddressSpace()) {
    getActionDefinitionsBuilder(G_ADDRSPACE_CAST)
      .scalarize(0)
      .custom();
  }

  getActionDefinitionsBuilder({G_LOAD, G_STORE})
    .narrowScalarIf([](const LegalityQuery &Query) {
        unsigned Size = Query.Types[0].getSizeInBits();
        unsigned MemSize = Query.MMODescrs[0].SizeInBits;
        return (Size > 32 && MemSize < Size);
      },
      [](const LegalityQuery &Query) {
        return std::make_pair(0, LLT::scalar(32));
      })
    .fewerElementsIf([=, &ST](const LegalityQuery &Query) {
        unsigned MemSize = Query.MMODescrs[0].SizeInBits;
        return (MemSize == 96) &&
               Query.Types[0].isVector() &&
               ST.getGeneration() < AMDGPUSubtarget::SEA_ISLANDS;
      },
      [=](const LegalityQuery &Query) {
        return std::make_pair(0, V2S32);
      })
    .legalIf([=, &ST](const LegalityQuery &Query) {
        const LLT &Ty0 = Query.Types[0];

        unsigned Size = Ty0.getSizeInBits();
        unsigned MemSize = Query.MMODescrs[0].SizeInBits;
        if (Size < 32 || (Size > 32 && MemSize < Size))
          return false;

        if (Ty0.isVector() && Size != MemSize)
          return false;

        // TODO: Decompose private loads into 4-byte components.
        // TODO: Illegal flat loads on SI
        switch (MemSize) {
        case 8:
        case 16:
          return Size == 32;
        case 32:
        case 64:
        case 128:
          return true;

        case 96:
          // XXX hasLoadX3
          return (ST.getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS);

        case 256:
        case 512:
          // TODO: constant loads
        default:
          return false;
        }
      })
    .clampScalar(0, S32, S64);


  // FIXME: Handle alignment requirements.
  auto &ExtLoads = getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD})
    .legalForTypesWithMemDesc({
        {S32, GlobalPtr, 8, 8},
        {S32, GlobalPtr, 16, 8},
        {S32, LocalPtr, 8, 8},
        {S32, LocalPtr, 16, 8},
        {S32, PrivatePtr, 8, 8},
        {S32, PrivatePtr, 16, 8}});
  if (ST.hasFlatAddressSpace()) {
    ExtLoads.legalForTypesWithMemDesc({{S32, FlatPtr, 8, 8},
                                       {S32, FlatPtr, 16, 8}});
  }

  ExtLoads.clampScalar(0, S32, S32)
          .widenScalarToNextPow2(0)
          .unsupportedIfMemSizeNotPow2()
          .lower();

  auto &Atomics = getActionDefinitionsBuilder(
    {G_ATOMICRMW_XCHG, G_ATOMICRMW_ADD, G_ATOMICRMW_SUB,
     G_ATOMICRMW_AND, G_ATOMICRMW_OR, G_ATOMICRMW_XOR,
     G_ATOMICRMW_MAX, G_ATOMICRMW_MIN, G_ATOMICRMW_UMAX,
     G_ATOMICRMW_UMIN, G_ATOMIC_CMPXCHG})
    .legalFor({{S32, GlobalPtr}, {S32, LocalPtr},
               {S64, GlobalPtr}, {S64, LocalPtr}});
  if (ST.hasFlatAddressSpace()) {
    Atomics.legalFor({{S32, FlatPtr}, {S64, FlatPtr}});
  }

  // TODO: Pointer types, any 32-bit or 64-bit vector
  getActionDefinitionsBuilder(G_SELECT)
    .legalForCartesianProduct({S32, S64, V2S32, V2S16, V4S16,
          GlobalPtr, LocalPtr, FlatPtr, PrivatePtr,
          LLT::vector(2, LocalPtr), LLT::vector(2, PrivatePtr)}, {S1})
    .clampScalar(0, S32, S64)
    .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
    .fewerElementsIf(numElementsNotEven(0), scalarize(0))
    .scalarize(1)
    .clampMaxNumElements(0, S32, 2)
    .clampMaxNumElements(0, LocalPtr, 2)
    .clampMaxNumElements(0, PrivatePtr, 2)
    .scalarize(0)
    .widenScalarToNextPow2(0)
    .legalIf(all(isPointer(0), typeIs(1, S1)));

  // TODO: Only the low 4/5/6 bits of the shift amount are observed, so we can
  // be more flexible with the shift amount type.
  auto &Shifts = getActionDefinitionsBuilder({G_SHL, G_LSHR, G_ASHR})
    .legalFor({{S32, S32}, {S64, S32}});
  if (ST.has16BitInsts()) {
    if (ST.hasVOP3PInsts()) {
      Shifts.legalFor({{S16, S32}, {S16, S16}, {V2S16, V2S16}})
            .clampMaxNumElements(0, S16, 2);
    } else
      Shifts.legalFor({{S16, S32}, {S16, S16}});

    Shifts.clampScalar(1, S16, S32);
    Shifts.clampScalar(0, S16, S64);
    Shifts.widenScalarToNextPow2(0, 16);
  } else {
    // Make sure we legalize the shift amount type first, as the general
    // expansion for the shifted type will produce much worse code if it hasn't
    // been truncated already.
    Shifts.clampScalar(1, S32, S32);
    Shifts.clampScalar(0, S32, S64);
    Shifts.widenScalarToNextPow2(0, 32);
  }
  Shifts.scalarize(0);

  for (unsigned Op : {G_EXTRACT_VECTOR_ELT, G_INSERT_VECTOR_ELT}) {
    unsigned VecTypeIdx = Op == G_EXTRACT_VECTOR_ELT ? 1 : 0;
    unsigned EltTypeIdx = Op == G_EXTRACT_VECTOR_ELT ? 0 : 1;
    unsigned IdxTypeIdx = 2;

    getActionDefinitionsBuilder(Op)
      .legalIf([=](const LegalityQuery &Query) {
          const LLT &VecTy = Query.Types[VecTypeIdx];
          const LLT &IdxTy = Query.Types[IdxTypeIdx];
          return VecTy.getSizeInBits() % 32 == 0 &&
            VecTy.getSizeInBits() <= 512 &&
            IdxTy.getSizeInBits() == 32;
        })
      .clampScalar(EltTypeIdx, S32, S64)
      .clampScalar(VecTypeIdx, S32, S64)
      .clampScalar(IdxTypeIdx, S32, S32);
  }

  getActionDefinitionsBuilder(G_EXTRACT_VECTOR_ELT)
    .unsupportedIf([=](const LegalityQuery &Query) {
        const LLT &EltTy = Query.Types[1].getElementType();
        return Query.Types[0] != EltTy;
      });

  for (unsigned Op : {G_EXTRACT, G_INSERT}) {
    unsigned BigTyIdx = Op == G_EXTRACT ? 1 : 0;
    unsigned LitTyIdx = Op == G_EXTRACT ? 0 : 1;

    // FIXME: Doesn't handle extract of illegal sizes.
    getActionDefinitionsBuilder(Op)
      .legalIf([=](const LegalityQuery &Query) {
          const LLT BigTy = Query.Types[BigTyIdx];
          const LLT LitTy = Query.Types[LitTyIdx];
          return (BigTy.getSizeInBits() % 32 == 0) &&
                 (LitTy.getSizeInBits() % 16 == 0);
        })
      .widenScalarIf(
        [=](const LegalityQuery &Query) {
          const LLT BigTy = Query.Types[BigTyIdx];
          return (BigTy.getScalarSizeInBits() < 16);
        },
        LegalizeMutations::widenScalarOrEltToNextPow2(BigTyIdx, 16))
      .widenScalarIf(
        [=](const LegalityQuery &Query) {
          const LLT LitTy = Query.Types[LitTyIdx];
          return (LitTy.getScalarSizeInBits() < 16);
        },
        LegalizeMutations::widenScalarOrEltToNextPow2(LitTyIdx, 16))
      .moreElementsIf(isSmallOddVector(BigTyIdx), oneMoreElement(BigTyIdx))
      .widenScalarToNextPow2(BigTyIdx, 32);

  }

  // TODO: vectors of pointers
  getActionDefinitionsBuilder(G_BUILD_VECTOR)
      .legalForCartesianProduct(AllS32Vectors, {S32})
      .legalForCartesianProduct(AllS64Vectors, {S64})
      .clampNumElements(0, V16S32, V16S32)
      .clampNumElements(0, V2S64, V8S64)
      .minScalarSameAs(1, 0)
      // FIXME: Sort of a hack to make progress on other legalizations.
      .legalIf([=](const LegalityQuery &Query) {
        return Query.Types[0].getScalarSizeInBits() <= 32 ||
               Query.Types[0].getScalarSizeInBits() == 64;
      });

  // TODO: Support any combination of v2s32
  getActionDefinitionsBuilder(G_CONCAT_VECTORS)
    .legalFor({{V4S32, V2S32},
               {V8S32, V2S32},
               {V8S32, V4S32},
               {V4S64, V2S64},
               {V4S16, V2S16},
               {V8S16, V2S16},
               {V8S16, V4S16},
               {LLT::vector(4, LocalPtr), LLT::vector(2, LocalPtr)},
               {LLT::vector(4, PrivatePtr), LLT::vector(2, PrivatePtr)}});

  // Merge/Unmerge
  for (unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) {
    unsigned BigTyIdx = Op == G_MERGE_VALUES ? 0 : 1;
    unsigned LitTyIdx = Op == G_MERGE_VALUES ? 1 : 0;

    auto notValidElt = [=](const LegalityQuery &Query, unsigned TypeIdx) {
      const LLT &Ty = Query.Types[TypeIdx];
      if (Ty.isVector()) {
        const LLT &EltTy = Ty.getElementType();
        if (EltTy.getSizeInBits() < 8 || EltTy.getSizeInBits() > 64)
          return true;
        if (!isPowerOf2_32(EltTy.getSizeInBits()))
          return true;
      }
      return false;
    };

    getActionDefinitionsBuilder(Op)
      .widenScalarToNextPow2(LitTyIdx, /*Min*/ 16)
      // Clamp the little scalar to s8-s256 and make it a power of 2. It's not
      // worth considering the multiples of 64 since 2*192 and 2*384 are not
      // valid.
      .clampScalar(LitTyIdx, S16, S256)
      .widenScalarToNextPow2(LitTyIdx, /*Min*/ 32)

      // Break up vectors with weird elements into scalars
      .fewerElementsIf(
        [=](const LegalityQuery &Query) { return notValidElt(Query, 0); },
        scalarize(0))
      .fewerElementsIf(
        [=](const LegalityQuery &Query) { return notValidElt(Query, 1); },
        scalarize(1))
      .clampScalar(BigTyIdx, S32, S512)
      .widenScalarIf(
        [=](const LegalityQuery &Query) {
          const LLT &Ty = Query.Types[BigTyIdx];
          return !isPowerOf2_32(Ty.getSizeInBits()) &&
                 Ty.getSizeInBits() % 16 != 0;
        },
        [=](const LegalityQuery &Query) {
          // Pick the next power of 2, or a multiple of 64 over 128.
          // Whichever is smaller.
          const LLT &Ty = Query.Types[BigTyIdx];
          unsigned NewSizeInBits = 1 << Log2_32_Ceil(Ty.getSizeInBits() + 1);
          if (NewSizeInBits >= 256) {
            unsigned RoundedTo = alignTo<64>(Ty.getSizeInBits() + 1);
            if (RoundedTo < NewSizeInBits)
              NewSizeInBits = RoundedTo;
          }
          return std::make_pair(BigTyIdx, LLT::scalar(NewSizeInBits));
        })
      .legalIf([=](const LegalityQuery &Query) {
          const LLT &BigTy = Query.Types[BigTyIdx];
          const LLT &LitTy = Query.Types[LitTyIdx];

          if (BigTy.isVector() && BigTy.getSizeInBits() < 32)
            return false;
          if (LitTy.isVector() && LitTy.getSizeInBits() < 32)
            return false;

          return BigTy.getSizeInBits() % 16 == 0 &&
                 LitTy.getSizeInBits() % 16 == 0 &&
                 BigTy.getSizeInBits() <= 512;
        })
      // Any vectors left are the wrong size. Scalarize them.
      .scalarize(0)
      .scalarize(1);
  }

  computeTables();
  verify(*ST.getInstrInfo());
}
Exemplo n.º 19
0
static LegalityPredicate vectorWiderThan(unsigned TypeIdx, unsigned Size) {
  return [=](const LegalityQuery &Query) {
    const LLT QueryTy = Query.Types[TypeIdx];
    return QueryTy.isVector() && QueryTy.getSizeInBits() > Size;
  };
}
Exemplo n.º 20
0
const RegisterBankInfo::InstructionMapping &
ARMRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
  auto Opc = MI.getOpcode();

  // Try the default logic for non-generic instructions that are either copies
  // or already have some operands assigned to banks.
  if (!isPreISelGenericOpcode(Opc)) {
    const InstructionMapping &Mapping = getInstrMappingImpl(MI);
    if (Mapping.isValid())
      return Mapping;
  }

  using namespace TargetOpcode;

  const MachineFunction &MF = *MI.getParent()->getParent();
  const MachineRegisterInfo &MRI = MF.getRegInfo();
  LLT Ty = MRI.getType(MI.getOperand(0).getReg());

  unsigned NumOperands = MI.getNumOperands();
  const ValueMapping *OperandsMapping = &ARM::ValueMappings[ARM::GPR3OpsIdx];

  switch (Opc) {
  case G_ADD:
  case G_SUB:
  case G_MUL:
  case G_SDIV:
  case G_UDIV:
  case G_SEXT:
  case G_ZEXT:
  case G_ANYEXT:
  case G_TRUNC:
  case G_GEP:
    // FIXME: We're abusing the fact that everything lives in a GPR for now; in
    // the real world we would use different mappings.
    OperandsMapping = &ARM::ValueMappings[ARM::GPR3OpsIdx];
    break;
  case G_LOAD:
  case G_STORE:
    OperandsMapping =
        Ty.getSizeInBits() == 64
            ? getOperandsMapping({&ARM::ValueMappings[ARM::DPR3OpsIdx],
                                  &ARM::ValueMappings[ARM::GPR3OpsIdx]})
            : &ARM::ValueMappings[ARM::GPR3OpsIdx];
    break;
  case G_FADD:
    assert((Ty.getSizeInBits() == 32 || Ty.getSizeInBits() == 64) &&
           "Unsupported size for G_FADD");
    OperandsMapping = Ty.getSizeInBits() == 64
                          ? &ARM::ValueMappings[ARM::DPR3OpsIdx]
                          : &ARM::ValueMappings[ARM::SPR3OpsIdx];
    break;
  case G_CONSTANT:
  case G_FRAME_INDEX:
    OperandsMapping =
        getOperandsMapping({&ARM::ValueMappings[ARM::GPR3OpsIdx], nullptr});
    break;
  case G_SEQUENCE: {
    // We only support G_SEQUENCE for creating a double precision floating point
    // value out of two GPRs.
    LLT Ty1 = MRI.getType(MI.getOperand(1).getReg());
    LLT Ty2 = MRI.getType(MI.getOperand(3).getReg());
    if (Ty.getSizeInBits() != 64 || Ty1.getSizeInBits() != 32 ||
        Ty2.getSizeInBits() != 32)
      return getInvalidInstructionMapping();
    OperandsMapping =
        getOperandsMapping({&ARM::ValueMappings[ARM::DPR3OpsIdx],
                            &ARM::ValueMappings[ARM::GPR3OpsIdx], nullptr,
                            &ARM::ValueMappings[ARM::GPR3OpsIdx], nullptr});
    break;
  }
  case G_EXTRACT: {
    // We only support G_EXTRACT for splitting a double precision floating point
    // value into two GPRs.
    LLT Ty1 = MRI.getType(MI.getOperand(1).getReg());
    if (Ty.getSizeInBits() != 32 || Ty1.getSizeInBits() != 64 ||
        MI.getOperand(2).getImm() % 32 != 0)
      return getInvalidInstructionMapping();
    OperandsMapping = getOperandsMapping({&ARM::ValueMappings[ARM::GPR3OpsIdx],
                                          &ARM::ValueMappings[ARM::DPR3OpsIdx],
                                          nullptr, nullptr});
    break;
  }
  default:
    return getInvalidInstructionMapping();
  }

#ifndef NDEBUG
  for (unsigned i = 0; i < NumOperands; i++) {
    for (const auto &Mapping : OperandsMapping[i]) {
      assert(
          (Mapping.RegBank->getID() != ARM::FPRRegBankID ||
           MF.getSubtarget<ARMSubtarget>().hasVFP2()) &&
          "Trying to use floating point register bank on target without vfp");
    }
  }
#endif

  return getInstructionMapping(DefaultMappingID, /*Cost=*/1, OperandsMapping,
                               NumOperands);
}
bool ARMInstructionSelector::select(MachineInstr &I,
                                    CodeGenCoverage &CoverageInfo) const {
  assert(I.getParent() && "Instruction should be in a basic block!");
  assert(I.getParent()->getParent() && "Instruction should be in a function!");

  auto &MBB = *I.getParent();
  auto &MF = *MBB.getParent();
  auto &MRI = MF.getRegInfo();

  if (!isPreISelGenericOpcode(I.getOpcode())) {
    if (I.isCopy())
      return selectCopy(I, TII, MRI, TRI, RBI);

    return true;
  }

  using namespace TargetOpcode;

  if (selectImpl(I, CoverageInfo))
    return true;

  MachineInstrBuilder MIB{MF, I};
  bool isSExt = false;

  switch (I.getOpcode()) {
  case G_SEXT:
    isSExt = true;
    LLVM_FALLTHROUGH;
  case G_ZEXT: {
    LLT DstTy = MRI.getType(I.getOperand(0).getReg());
    // FIXME: Smaller destination sizes coming soon!
    if (DstTy.getSizeInBits() != 32) {
      LLVM_DEBUG(dbgs() << "Unsupported destination size for extension");
      return false;
    }

    LLT SrcTy = MRI.getType(I.getOperand(1).getReg());
    unsigned SrcSize = SrcTy.getSizeInBits();
    switch (SrcSize) {
    case 1: {
      // ZExt boils down to & 0x1; for SExt we also subtract that from 0
      I.setDesc(TII.get(Opcodes.AND));
      MIB.addImm(1).add(predOps(ARMCC::AL)).add(condCodeOp());

      if (isSExt) {
        unsigned SExtResult = I.getOperand(0).getReg();

        // Use a new virtual register for the result of the AND
        unsigned AndResult = MRI.createVirtualRegister(&ARM::GPRRegClass);
        I.getOperand(0).setReg(AndResult);

        auto InsertBefore = std::next(I.getIterator());
        auto SubI =
            BuildMI(MBB, InsertBefore, I.getDebugLoc(), TII.get(Opcodes.RSB))
                .addDef(SExtResult)
                .addUse(AndResult)
                .addImm(0)
                .add(predOps(ARMCC::AL))
                .add(condCodeOp());
        if (!constrainSelectedInstRegOperands(*SubI, TII, TRI, RBI))
          return false;
      }
      break;
    }
    case 8:
    case 16: {
      unsigned NewOpc = selectSimpleExtOpc(I.getOpcode(), SrcSize);
      if (NewOpc == I.getOpcode())
        return false;
      I.setDesc(TII.get(NewOpc));
      MIB.addImm(0).add(predOps(ARMCC::AL));
      break;
    }
    default:
      LLVM_DEBUG(dbgs() << "Unsupported source size for extension");
      return false;
    }
    break;
  }
  case G_ANYEXT:
  case G_TRUNC: {
    // The high bits are undefined, so there's nothing special to do, just
    // treat it as a copy.
    auto SrcReg = I.getOperand(1).getReg();
    auto DstReg = I.getOperand(0).getReg();

    const auto &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
    const auto &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI);

    if (SrcRegBank.getID() == ARM::FPRRegBankID) {
      // This should only happen in the obscure case where we have put a 64-bit
      // integer into a D register. Get it out of there and keep only the
      // interesting part.
      assert(I.getOpcode() == G_TRUNC && "Unsupported operand for G_ANYEXT");
      assert(DstRegBank.getID() == ARM::GPRRegBankID &&
             "Unsupported combination of register banks");
      assert(MRI.getType(SrcReg).getSizeInBits() == 64 && "Unsupported size");
      assert(MRI.getType(DstReg).getSizeInBits() <= 32 && "Unsupported size");

      unsigned IgnoredBits = MRI.createVirtualRegister(&ARM::GPRRegClass);
      auto InsertBefore = std::next(I.getIterator());
      auto MovI =
          BuildMI(MBB, InsertBefore, I.getDebugLoc(), TII.get(ARM::VMOVRRD))
              .addDef(DstReg)
              .addDef(IgnoredBits)
              .addUse(SrcReg)
              .add(predOps(ARMCC::AL));
      if (!constrainSelectedInstRegOperands(*MovI, TII, TRI, RBI))
        return false;

      MIB->eraseFromParent();
      return true;
    }

    if (SrcRegBank.getID() != DstRegBank.getID()) {
      LLVM_DEBUG(
          dbgs() << "G_TRUNC/G_ANYEXT operands on different register banks\n");
      return false;
    }

    if (SrcRegBank.getID() != ARM::GPRRegBankID) {
      LLVM_DEBUG(dbgs() << "G_TRUNC/G_ANYEXT on non-GPR not supported yet\n");
      return false;
    }

    I.setDesc(TII.get(COPY));
    return selectCopy(I, TII, MRI, TRI, RBI);
  }
  case G_CONSTANT: {
    if (!MRI.getType(I.getOperand(0).getReg()).isPointer()) {
      // Non-pointer constants should be handled by TableGen.
      LLVM_DEBUG(dbgs() << "Unsupported constant type\n");
      return false;
    }

    auto &Val = I.getOperand(1);
    if (Val.isCImm()) {
      if (!Val.getCImm()->isZero()) {
        LLVM_DEBUG(dbgs() << "Unsupported pointer constant value\n");
        return false;
      }
      Val.ChangeToImmediate(0);
    } else {
      assert(Val.isImm() && "Unexpected operand for G_CONSTANT");
      if (Val.getImm() != 0) {
        LLVM_DEBUG(dbgs() << "Unsupported pointer constant value\n");
        return false;
      }
    }

    I.setDesc(TII.get(ARM::MOVi));
    MIB.add(predOps(ARMCC::AL)).add(condCodeOp());
    break;
  }
  case G_INTTOPTR:
  case G_PTRTOINT: {
    auto SrcReg = I.getOperand(1).getReg();
    auto DstReg = I.getOperand(0).getReg();

    const auto &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
    const auto &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI);

    if (SrcRegBank.getID() != DstRegBank.getID()) {
      LLVM_DEBUG(
          dbgs()
          << "G_INTTOPTR/G_PTRTOINT operands on different register banks\n");
      return false;
    }

    if (SrcRegBank.getID() != ARM::GPRRegBankID) {
      LLVM_DEBUG(
          dbgs() << "G_INTTOPTR/G_PTRTOINT on non-GPR not supported yet\n");
      return false;
    }

    I.setDesc(TII.get(COPY));
    return selectCopy(I, TII, MRI, TRI, RBI);
  }
  case G_SELECT:
    return selectSelect(MIB, MRI);
  case G_ICMP: {
    CmpConstants Helper(ARM::CMPrr, ARM::INSTRUCTION_LIST_END,
                        ARM::GPRRegBankID, 32);
    return selectCmp(Helper, MIB, MRI);
  }
  case G_FCMP: {
    assert(STI.hasVFP2() && "Can't select fcmp without VFP");

    unsigned OpReg = I.getOperand(2).getReg();
    unsigned Size = MRI.getType(OpReg).getSizeInBits();

    if (Size == 64 && STI.isFPOnlySP()) {
      LLVM_DEBUG(dbgs() << "Subtarget only supports single precision");
      return false;
    }
    if (Size != 32 && Size != 64) {
      LLVM_DEBUG(dbgs() << "Unsupported size for G_FCMP operand");
      return false;
    }

    CmpConstants Helper(Size == 32 ? ARM::VCMPS : ARM::VCMPD, ARM::FMSTAT,
                        ARM::FPRRegBankID, Size);
    return selectCmp(Helper, MIB, MRI);
  }
  case G_LSHR:
    return selectShift(ARM_AM::ShiftOpc::lsr, MIB);
  case G_ASHR:
    return selectShift(ARM_AM::ShiftOpc::asr, MIB);
  case G_SHL: {
    return selectShift(ARM_AM::ShiftOpc::lsl, MIB);
  }
  case G_GEP:
    I.setDesc(TII.get(ARM::ADDrr));
    MIB.add(predOps(ARMCC::AL)).add(condCodeOp());
    break;
  case G_FRAME_INDEX:
    // Add 0 to the given frame index and hope it will eventually be folded into
    // the user(s).
    I.setDesc(TII.get(ARM::ADDri));
    MIB.addImm(0).add(predOps(ARMCC::AL)).add(condCodeOp());
    break;
  case G_GLOBAL_VALUE:
    return selectGlobal(MIB, MRI);
  case G_STORE:
  case G_LOAD: {
    const auto &MemOp = **I.memoperands_begin();
    if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) {
      LLVM_DEBUG(dbgs() << "Atomic load/store not supported yet\n");
      return false;
    }

    unsigned Reg = I.getOperand(0).getReg();
    unsigned RegBank = RBI.getRegBank(Reg, MRI, TRI)->getID();

    LLT ValTy = MRI.getType(Reg);
    const auto ValSize = ValTy.getSizeInBits();

    assert((ValSize != 64 || STI.hasVFP2()) &&
           "Don't know how to load/store 64-bit value without VFP");

    const auto NewOpc = selectLoadStoreOpCode(I.getOpcode(), RegBank, ValSize);
    if (NewOpc == G_LOAD || NewOpc == G_STORE)
      return false;

    I.setDesc(TII.get(NewOpc));

    if (NewOpc == ARM::LDRH || NewOpc == ARM::STRH)
      // LDRH has a funny addressing mode (there's already a FIXME for it).
      MIB.addReg(0);
    MIB.addImm(0).add(predOps(ARMCC::AL));
    break;
  }
  case G_MERGE_VALUES: {
    if (!selectMergeValues(MIB, TII, MRI, TRI, RBI))
      return false;
    break;
  }
  case G_UNMERGE_VALUES: {
    if (!selectUnmergeValues(MIB, TII, MRI, TRI, RBI))
      return false;
    break;
  }
  case G_BRCOND: {
    if (!validReg(MRI, I.getOperand(0).getReg(), 1, ARM::GPRRegBankID)) {
      LLVM_DEBUG(dbgs() << "Unsupported condition register for G_BRCOND");
      return false;
    }

    // Set the flags.
    auto Test = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(ARM::TSTri))
                    .addReg(I.getOperand(0).getReg())
                    .addImm(1)
                    .add(predOps(ARMCC::AL));
    if (!constrainSelectedInstRegOperands(*Test, TII, TRI, RBI))
      return false;

    // Branch conditionally.
    auto Branch = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(ARM::Bcc))
                      .add(I.getOperand(1))
                      .add(predOps(ARMCC::NE, ARM::CPSR));
    if (!constrainSelectedInstRegOperands(*Branch, TII, TRI, RBI))
      return false;
    I.eraseFromParent();
    return true;
  }
  case G_PHI: {
    I.setDesc(TII.get(PHI));

    unsigned DstReg = I.getOperand(0).getReg();
    const TargetRegisterClass *RC = guessRegClass(DstReg, MRI, TRI, RBI);
    if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
      break;
    }

    return true;
  }
  default:
    return false;
  }

  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
}
RegisterBankInfo::InstructionMapping
AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
  const unsigned Opc = MI.getOpcode();
  const MachineFunction &MF = *MI.getParent()->getParent();
  const MachineRegisterInfo &MRI = MF.getRegInfo();

  // Try the default logic for non-generic instructions that are either copies
  // or already have some operands assigned to banks.
  if (!isPreISelGenericOpcode(Opc)) {
    RegisterBankInfo::InstructionMapping Mapping = getInstrMappingImpl(MI);
    if (Mapping.isValid())
      return Mapping;
  }

  RegisterBankInfo::InstructionMapping Mapping =
      InstructionMapping{DefaultMappingID, 1, MI.getNumOperands()};

  // Track the size and bank of each register.  We don't do partial mappings.
  SmallVector<unsigned, 4> OpBaseIdx(MI.getNumOperands());
  SmallVector<unsigned, 4> OpFinalIdx(MI.getNumOperands());
  for (unsigned Idx = 0; Idx < MI.getNumOperands(); ++Idx) {
    auto &MO = MI.getOperand(Idx);
    if (!MO.isReg())
      continue;

    LLT Ty = MRI.getType(MO.getReg());
    unsigned RBIdx = AArch64::getRegBankBaseIdx(Ty.getSizeInBits());
    OpBaseIdx[Idx] = RBIdx;

    // As a top-level guess, vectors go in FPRs, scalars and pointers in GPRs.
    // For floating-point instructions, scalars go in FPRs.
    if (Ty.isVector() || isPreISelGenericFloatingPointOpcode(Opc)) {
      assert(RBIdx < (AArch64::LastFPR - AArch64::FirstFPR) + 1 &&
             "Index out of bound");
      OpFinalIdx[Idx] = AArch64::FirstFPR + RBIdx;
    } else {
      assert(RBIdx < (AArch64::LastGPR - AArch64::FirstGPR) + 1 &&
             "Index out of bound");
      OpFinalIdx[Idx] = AArch64::FirstGPR + RBIdx;
    }
  }

  // Some of the floating-point instructions have mixed GPR and FPR operands:
  // fine-tune the computed mapping.
  switch (Opc) {
  case TargetOpcode::G_SITOFP:
  case TargetOpcode::G_UITOFP: {
    OpFinalIdx = {OpBaseIdx[0] + AArch64::FirstFPR,
                  OpBaseIdx[1] + AArch64::FirstGPR};
    break;
  }
  case TargetOpcode::G_FPTOSI:
  case TargetOpcode::G_FPTOUI: {
    OpFinalIdx = {OpBaseIdx[0] + AArch64::FirstGPR,
                  OpBaseIdx[1] + AArch64::FirstFPR};
    break;
  }
  case TargetOpcode::G_FCMP: {
    OpFinalIdx = {OpBaseIdx[0] + AArch64::FirstGPR, /* Predicate */ 0,
                  OpBaseIdx[2] + AArch64::FirstFPR,
                  OpBaseIdx[3] + AArch64::FirstFPR};
    break;
  }
  }

  // Finally construct the computed mapping.
  for (unsigned Idx = 0; Idx < MI.getNumOperands(); ++Idx)
    if (MI.getOperand(Idx).isReg())
      Mapping.setOperandMapping(
          Idx, ValueMapping{&AArch64::PartMappings[OpFinalIdx[Idx]], 1});

  return Mapping;
}
Exemplo n.º 23
0
RegisterBankInfo::InstructionMapping
AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
  const unsigned Opc = MI.getOpcode();
  const MachineFunction &MF = *MI.getParent()->getParent();
  const MachineRegisterInfo &MRI = MF.getRegInfo();

  // Try the default logic for non-generic instructions that are either copies
  // or already have some operands assigned to banks.
  if (!isPreISelGenericOpcode(Opc)) {
    RegisterBankInfo::InstructionMapping Mapping = getInstrMappingImpl(MI);
    if (Mapping.isValid())
      return Mapping;
  }

  switch (Opc) {
    // G_{F|S|U}REM are not listed because they are not legal.
    // Arithmetic ops.
  case TargetOpcode::G_ADD:
  case TargetOpcode::G_SUB:
  case TargetOpcode::G_GEP:
  case TargetOpcode::G_MUL:
  case TargetOpcode::G_SDIV:
  case TargetOpcode::G_UDIV:
    // Bitwise ops.
  case TargetOpcode::G_AND:
  case TargetOpcode::G_OR:
  case TargetOpcode::G_XOR:
    // Shifts.
  case TargetOpcode::G_SHL:
  case TargetOpcode::G_LSHR:
  case TargetOpcode::G_ASHR:
    // Floating point ops.
  case TargetOpcode::G_FADD:
  case TargetOpcode::G_FSUB:
  case TargetOpcode::G_FMUL:
  case TargetOpcode::G_FDIV:
    return getSameKindOfOperandsMapping(MI);
  case TargetOpcode::G_BITCAST: {
    LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
    LLT SrcTy = MRI.getType(MI.getOperand(1).getReg());
    unsigned Size = DstTy.getSizeInBits();
    bool DstIsGPR = !DstTy.isVector();
    bool SrcIsGPR = !SrcTy.isVector();
    const RegisterBank &DstRB =
        DstIsGPR ? AArch64::GPRRegBank : AArch64::FPRRegBank;
    const RegisterBank &SrcRB =
        SrcIsGPR ? AArch64::GPRRegBank : AArch64::FPRRegBank;
    return InstructionMapping{
        DefaultMappingID, copyCost(DstRB, SrcRB, Size),
        getCopyMapping(DstRB.getID(), SrcRB.getID(), Size),
        /*NumOperands*/ 2};
  }
  case TargetOpcode::G_SEQUENCE:
    // FIXME: support this, but the generic code is really not going to do
    // anything sane.
    return InstructionMapping();
  default:
    break;
  }

  unsigned NumOperands = MI.getNumOperands();

  // Track the size and bank of each register.  We don't do partial mappings.
  SmallVector<unsigned, 4> OpSize(NumOperands);
  SmallVector<PartialMappingIdx, 4> OpRegBankIdx(NumOperands);
  for (unsigned Idx = 0; Idx < NumOperands; ++Idx) {
    auto &MO = MI.getOperand(Idx);
    if (!MO.isReg() || !MO.getReg())
      continue;

    LLT Ty = MRI.getType(MO.getReg());
    OpSize[Idx] = Ty.getSizeInBits();

    // As a top-level guess, vectors go in FPRs, scalars and pointers in GPRs.
    // For floating-point instructions, scalars go in FPRs.
    if (Ty.isVector() || isPreISelGenericFloatingPointOpcode(Opc))
      OpRegBankIdx[Idx] = PMI_FirstFPR;
    else
      OpRegBankIdx[Idx] = PMI_FirstGPR;
  }

  unsigned Cost = 1;
  // Some of the floating-point instructions have mixed GPR and FPR operands:
  // fine-tune the computed mapping.
  switch (Opc) {
  case TargetOpcode::G_SITOFP:
  case TargetOpcode::G_UITOFP:
    OpRegBankIdx = {PMI_FirstFPR, PMI_FirstGPR};
    break;
  case TargetOpcode::G_FPTOSI:
  case TargetOpcode::G_FPTOUI:
    OpRegBankIdx = {PMI_FirstGPR, PMI_FirstFPR};
    break;
  case TargetOpcode::G_FCMP:
    OpRegBankIdx = {PMI_FirstGPR,
                    /* Predicate */ PMI_None, PMI_FirstFPR, PMI_FirstFPR};
    break;
  case TargetOpcode::G_BITCAST:
    // This is going to be a cross register bank copy and this is expensive.
    if (OpRegBankIdx[0] != OpRegBankIdx[1])
      Cost = copyCost(
          *AArch64GenRegisterBankInfo::PartMappings[OpRegBankIdx[0]].RegBank,
          *AArch64GenRegisterBankInfo::PartMappings[OpRegBankIdx[1]].RegBank,
          OpSize[0]);
    break;
  case TargetOpcode::G_LOAD:
    // Loading in vector unit is slightly more expensive.
    // This is actually only true for the LD1R and co instructions,
    // but anyway for the fast mode this number does not matter and
    // for the greedy mode the cost of the cross bank copy will
    // offset this number.
    // FIXME: Should be derived from the scheduling model.
    if (OpRegBankIdx[0] >= PMI_FirstFPR)
      Cost = 2;
    break;
  }

  // Finally construct the computed mapping.
  RegisterBankInfo::InstructionMapping Mapping =
      InstructionMapping{DefaultMappingID, Cost, nullptr, NumOperands};
  SmallVector<const ValueMapping *, 8> OpdsMapping(NumOperands);
  for (unsigned Idx = 0; Idx < NumOperands; ++Idx) {
    if (MI.getOperand(Idx).isReg() && MI.getOperand(Idx).getReg()) {
      auto Mapping = getValueMapping(OpRegBankIdx[Idx], OpSize[Idx]);
      if (!Mapping->isValid())
        return InstructionMapping();

      OpdsMapping[Idx] = Mapping;
    }
  }

  Mapping.setOperandsMapping(getOperandsMapping(OpdsMapping));
  return Mapping;
}
Exemplo n.º 24
0
MachineLegalizeHelper::LegalizeResult
MachineLegalizeHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx,
                                   LLT WideTy) {
  MIRBuilder.setInstr(MI);

  switch (MI.getOpcode()) {
  default:
    return UnableToLegalize;
  case TargetOpcode::G_ADD:
  case TargetOpcode::G_AND:
  case TargetOpcode::G_MUL:
  case TargetOpcode::G_OR:
  case TargetOpcode::G_XOR:
  case TargetOpcode::G_SUB: {
    // Perform operation at larger width (any extension is fine here, high bits
    // don't affect the result) and then truncate the result back to the
    // original type.
    unsigned Src1Ext = MRI.createGenericVirtualRegister(WideTy);
    unsigned Src2Ext = MRI.createGenericVirtualRegister(WideTy);
    MIRBuilder.buildAnyExt(Src1Ext, MI.getOperand(1).getReg());
    MIRBuilder.buildAnyExt(Src2Ext, MI.getOperand(2).getReg());

    unsigned DstExt = MRI.createGenericVirtualRegister(WideTy);
    MIRBuilder.buildInstr(MI.getOpcode())
        .addDef(DstExt)
        .addUse(Src1Ext)
        .addUse(Src2Ext);

    MIRBuilder.buildTrunc(MI.getOperand(0).getReg(), DstExt);
    MI.eraseFromParent();
    return Legalized;
  }
  case TargetOpcode::G_SDIV:
  case TargetOpcode::G_UDIV: {
    unsigned ExtOp = MI.getOpcode() == TargetOpcode::G_SDIV
                          ? TargetOpcode::G_SEXT
                          : TargetOpcode::G_ZEXT;

    unsigned LHSExt = MRI.createGenericVirtualRegister(WideTy);
    MIRBuilder.buildInstr(ExtOp).addDef(LHSExt).addUse(
        MI.getOperand(1).getReg());

    unsigned RHSExt = MRI.createGenericVirtualRegister(WideTy);
    MIRBuilder.buildInstr(ExtOp).addDef(RHSExt).addUse(
        MI.getOperand(2).getReg());

    unsigned ResExt = MRI.createGenericVirtualRegister(WideTy);
    MIRBuilder.buildInstr(MI.getOpcode())
        .addDef(ResExt)
        .addUse(LHSExt)
        .addUse(RHSExt);

    MIRBuilder.buildTrunc(MI.getOperand(0).getReg(), ResExt);
    MI.eraseFromParent();
    return Legalized;
  }
  case TargetOpcode::G_LOAD: {
    assert(alignTo(MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(), 8) ==
               WideTy.getSizeInBits() &&
           "illegal to increase number of bytes loaded");

    unsigned DstExt = MRI.createGenericVirtualRegister(WideTy);
    MIRBuilder.buildLoad(DstExt, MI.getOperand(1).getReg(),
                         **MI.memoperands_begin());
    MIRBuilder.buildTrunc(MI.getOperand(0).getReg(), DstExt);
    MI.eraseFromParent();
    return Legalized;
  }
  case TargetOpcode::G_STORE: {
    assert(alignTo(MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(), 8) ==
               WideTy.getSizeInBits() &&
           "illegal to increase number of bytes modified by a store");

    unsigned SrcExt = MRI.createGenericVirtualRegister(WideTy);
    MIRBuilder.buildAnyExt(SrcExt, MI.getOperand(0).getReg());
    MIRBuilder.buildStore(SrcExt, MI.getOperand(1).getReg(),
                          **MI.memoperands_begin());
    MI.eraseFromParent();
    return Legalized;
  }
  case TargetOpcode::G_CONSTANT: {
    unsigned DstExt = MRI.createGenericVirtualRegister(WideTy);
    MIRBuilder.buildConstant(DstExt, MI.getOperand(1).getImm());
    MIRBuilder.buildTrunc(MI.getOperand(0).getReg(), DstExt);
    MI.eraseFromParent();
    return Legalized;
  }
  case TargetOpcode::G_FCONSTANT: {
    unsigned DstExt = MRI.createGenericVirtualRegister(WideTy);
    MIRBuilder.buildFConstant(DstExt, *MI.getOperand(1).getFPImm());
    MIRBuilder.buildFPTrunc(MI.getOperand(0).getReg(), DstExt);
    MI.eraseFromParent();
    return Legalized;
  }
  case TargetOpcode::G_BRCOND: {
    unsigned TstExt = MRI.createGenericVirtualRegister(WideTy);
    MIRBuilder.buildAnyExt(TstExt, MI.getOperand(0).getReg());
    MIRBuilder.buildBrCond(TstExt, *MI.getOperand(1).getMBB());
    MI.eraseFromParent();
    return Legalized;
  }
  case TargetOpcode::G_ICMP: {
    assert(TypeIdx == 1 && "unable to legalize predicate");
    bool IsSigned = CmpInst::isSigned(
        static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate()));
    unsigned Op0Ext = MRI.createGenericVirtualRegister(WideTy);
    unsigned Op1Ext = MRI.createGenericVirtualRegister(WideTy);
    if (IsSigned) {
      MIRBuilder.buildSExt(Op0Ext, MI.getOperand(2).getReg());
      MIRBuilder.buildSExt(Op1Ext, MI.getOperand(3).getReg());
    } else {
      MIRBuilder.buildZExt(Op0Ext, MI.getOperand(2).getReg());
      MIRBuilder.buildZExt(Op1Ext, MI.getOperand(3).getReg());
    }
    MIRBuilder.buildICmp(
        static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate()),
        MI.getOperand(0).getReg(), Op0Ext, Op1Ext);
    MI.eraseFromParent();
    return Legalized;
  }
  case TargetOpcode::G_GEP: {
    assert(TypeIdx == 1 && "unable to legalize pointer of GEP");
    unsigned OffsetExt = MRI.createGenericVirtualRegister(WideTy);
    MIRBuilder.buildSExt(OffsetExt, MI.getOperand(2).getReg());
    MI.getOperand(2).setReg(OffsetExt);
    return Legalized;
  }
  }
}
Exemplo n.º 25
0
const RegisterBankInfo::InstructionMapping &
X86RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
  const MachineFunction &MF = *MI.getParent()->getParent();
  const MachineRegisterInfo &MRI = MF.getRegInfo();
  auto Opc = MI.getOpcode();

  // Try the default logic for non-generic instructions that are either copies
  // or already have some operands assigned to banks.
  if (!isPreISelGenericOpcode(Opc) || Opc == TargetOpcode::G_PHI) {
    const InstructionMapping &Mapping = getInstrMappingImpl(MI);
    if (Mapping.isValid())
      return Mapping;
  }

  switch (Opc) {
  case TargetOpcode::G_ADD:
  case TargetOpcode::G_SUB:
  case TargetOpcode::G_MUL:
  case TargetOpcode::G_SHL:
  case TargetOpcode::G_LSHR:
  case TargetOpcode::G_ASHR:
    return getSameOperandsMapping(MI, false);
    break;
  case TargetOpcode::G_FADD:
  case TargetOpcode::G_FSUB:
  case TargetOpcode::G_FMUL:
  case TargetOpcode::G_FDIV:
    return getSameOperandsMapping(MI, true);
    break;
  default:
    break;
  }

  unsigned NumOperands = MI.getNumOperands();
  SmallVector<PartialMappingIdx, 4> OpRegBankIdx(NumOperands);

  switch (Opc) {
  case TargetOpcode::G_FPEXT:
  case TargetOpcode::G_FCONSTANT:
    // Instruction having only floating-point operands (all scalars in VECRReg)
    getInstrPartialMappingIdxs(MI, MRI, /* isFP */ true, OpRegBankIdx);
    break;
  case TargetOpcode::G_TRUNC:
  case TargetOpcode::G_ANYEXT: {
    auto &Op0 = MI.getOperand(0);
    auto &Op1 = MI.getOperand(1);
    const LLT Ty0 = MRI.getType(Op0.getReg());
    const LLT Ty1 = MRI.getType(Op1.getReg());

    bool isFPTrunc = (Ty0.getSizeInBits() == 32 || Ty0.getSizeInBits() == 64) &&
                     Ty1.getSizeInBits() == 128 && Opc == TargetOpcode::G_TRUNC;
    bool isFPAnyExt =
        Ty0.getSizeInBits() == 128 &&
        (Ty1.getSizeInBits() == 32 || Ty1.getSizeInBits() == 64) &&
        Opc == TargetOpcode::G_ANYEXT;

    getInstrPartialMappingIdxs(MI, MRI, /* isFP */ isFPTrunc || isFPAnyExt,
                               OpRegBankIdx);
  } break;
  default:
    // Track the bank of each register, use NotFP mapping (all scalars in GPRs)
    getInstrPartialMappingIdxs(MI, MRI, /* isFP */ false, OpRegBankIdx);
    break;
  }

  // Finally construct the computed mapping.
  SmallVector<const ValueMapping *, 8> OpdsMapping(NumOperands);
  if (!getInstrValueMapping(MI, OpRegBankIdx, OpdsMapping))
    return getInvalidInstructionMapping();

  return getInstructionMapping(DefaultMappingID, /* Cost */ 1,
                               getOperandsMapping(OpdsMapping), NumOperands);
}
Exemplo n.º 26
0
// FIXME: inefficient implementation for now. Without ComputeValueVTs we're
// probably going to need specialized lookup structures for various types before
// we have any hope of doing well with something like <13 x i3>. Even the common
// cases should do better than what we have now.
std::pair<LegalizerInfo::LegalizeAction, LLT>
LegalizerInfo::getAction(const InstrAspect &Aspect) const {
  assert(TablesInitialized && "backend forgot to call computeTables");
  // These *have* to be implemented for now, they're the fundamental basis of
  // how everything else is transformed.

  // FIXME: the long-term plan calls for expansion in terms of load/store (if
  // they're not legal).
  if (Aspect.Opcode == TargetOpcode::G_MERGE_VALUES ||
      Aspect.Opcode == TargetOpcode::G_UNMERGE_VALUES)
    return std::make_pair(Legal, Aspect.Type);

  LLT Ty = Aspect.Type;
  LegalizeAction Action = findInActions(Aspect);
  // LegalizerHelper is not able to handle non-power-of-2 types right now, so do
  // not try to legalize them unless they are marked as Legal or Custom.
  // FIXME: This is a temporary hack until the general non-power-of-2
  // legalization works.
  if (!isPowerOf2_64(Ty.getSizeInBits()) &&
      !(Action == Legal || Action == Custom))
    return std::make_pair(Unsupported, LLT());

  if (Action != NotFound)
    return findLegalAction(Aspect, Action);

  unsigned Opcode = Aspect.Opcode;
  if (!Ty.isVector()) {
    auto DefaultAction = DefaultActions.find(Aspect.Opcode);
    if (DefaultAction != DefaultActions.end() && DefaultAction->second == Legal)
      return std::make_pair(Legal, Ty);

    if (DefaultAction != DefaultActions.end() && DefaultAction->second == Lower)
      return std::make_pair(Lower, Ty);

    if (DefaultAction == DefaultActions.end() ||
        DefaultAction->second != NarrowScalar)
      return std::make_pair(Unsupported, LLT());
    return findLegalAction(Aspect, NarrowScalar);
  }

  LLT EltTy = Ty.getElementType();
  int NumElts = Ty.getNumElements();

  auto ScalarAction = ScalarInVectorActions.find(std::make_pair(Opcode, EltTy));
  if (ScalarAction != ScalarInVectorActions.end() &&
      ScalarAction->second != Legal)
    return findLegalAction(Aspect, ScalarAction->second);

  // The element type is legal in principle, but the number of elements is
  // wrong.
  auto MaxLegalElts = MaxLegalVectorElts.lookup(std::make_pair(Opcode, EltTy));
  if (MaxLegalElts > NumElts)
    return findLegalAction(Aspect, MoreElements);

  if (MaxLegalElts == 0) {
    // Scalarize if there's no legal vector type, which is just a special case
    // of FewerElements.
    return std::make_pair(FewerElements, EltTy);
  }

  return findLegalAction(Aspect, FewerElements);
}