Example #1
0
static LegalizeMutation oneMoreElement(unsigned TypeIdx) {
  return [=](const LegalityQuery &Query) {
    const LLT Ty = Query.Types[TypeIdx];
    const LLT EltTy = Ty.getElementType();
    return std::make_pair(TypeIdx, LLT::vector(Ty.getNumElements() + 1, EltTy));
  };
}
Example #2
0
LegalizeMutation LegalizeMutations::changeElementTo(unsigned TypeIdx,
                                                    LLT NewEltTy) {
  return [=](const LegalityQuery &Query) {
    const LLT OldTy = Query.Types[TypeIdx];
    return std::make_pair(TypeIdx, OldTy.changeElementType(NewEltTy));
  };
}
Example #3
0
bool AMDGPULegalizerInfo::legalizeFrint(
  MachineInstr &MI, MachineRegisterInfo &MRI,
  MachineIRBuilder &MIRBuilder) const {
  MIRBuilder.setInstr(MI);

  unsigned Src = MI.getOperand(1).getReg();
  LLT Ty = MRI.getType(Src);
  assert(Ty.isScalar() && Ty.getSizeInBits() == 64);

  APFloat C1Val(APFloat::IEEEdouble(), "0x1.0p+52");
  APFloat C2Val(APFloat::IEEEdouble(), "0x1.fffffffffffffp+51");

  auto C1 = MIRBuilder.buildFConstant(Ty, C1Val);
  auto CopySign = MIRBuilder.buildFCopysign(Ty, C1, Src);

  // TODO: Should this propagate fast-math-flags?
  auto Tmp1 = MIRBuilder.buildFAdd(Ty, Src, CopySign);
  auto Tmp2 = MIRBuilder.buildFSub(Ty, Tmp1, CopySign);

  auto C2 = MIRBuilder.buildFConstant(Ty, C2Val);
  auto Fabs = MIRBuilder.buildFAbs(Ty, Src);

  auto Cond = MIRBuilder.buildFCmp(CmpInst::FCMP_OGT, LLT::scalar(1), Fabs, C2);
  MIRBuilder.buildSelect(MI.getOperand(0).getReg(), Cond, Src, Tmp2);
  return true;
}
Example #4
0
std::pair<LegalizerInfo::LegalizeAction, LLT>
LegalizerInfo::findVectorLegalAction(const InstrAspect &Aspect) const {
  assert(Aspect.Type.isVector());
  // First legalize the vector element size, then legalize the number of
  // lanes in the vector.
  if (Aspect.Opcode < FirstOp || Aspect.Opcode > LastOp)
    return {NotFound, Aspect.Type};
  const unsigned OpcodeIdx = Aspect.Opcode - FirstOp;
  const unsigned TypeIdx = Aspect.Idx;
  if (TypeIdx >= ScalarInVectorActions[OpcodeIdx].size())
    return {NotFound, Aspect.Type};
  const SizeAndActionsVec &ElemSizeVec =
      ScalarInVectorActions[OpcodeIdx][TypeIdx];

  LLT IntermediateType;
  auto ElementSizeAndAction =
      findAction(ElemSizeVec, Aspect.Type.getScalarSizeInBits());
  IntermediateType =
      LLT::vector(Aspect.Type.getNumElements(), ElementSizeAndAction.first);
  if (ElementSizeAndAction.second != Legal)
    return {ElementSizeAndAction.second, IntermediateType};

  auto i = NumElements2Actions[OpcodeIdx].find(
      IntermediateType.getScalarSizeInBits());
  if (i == NumElements2Actions[OpcodeIdx].end()) {
    return {NotFound, IntermediateType};
  }
  const SizeAndActionsVec &NumElementsVec = (*i).second[TypeIdx];
  auto NumElementsAndAction =
      findAction(NumElementsVec, IntermediateType.getNumElements());
  return {NumElementsAndAction.second,
          LLT::vector(NumElementsAndAction.first,
                      IntermediateType.getScalarSizeInBits())};
}
Example #5
0
MachineLegalizeHelper::LegalizeResult
MachineLegalizeHelper::libcall(MachineInstr &MI) {
  LLT Ty = MRI.getType(MI.getOperand(0).getReg());
  unsigned Size = Ty.getSizeInBits();
  MIRBuilder.setInstr(MI);

  switch (MI.getOpcode()) {
  default:
    return UnableToLegalize;
  case TargetOpcode::G_FREM: {
    auto &Ctx = MIRBuilder.getMF().getFunction()->getContext();
    Type *Ty = Size == 64 ? Type::getDoubleTy(Ctx) : Type::getFloatTy(Ctx);
    auto &CLI = *MIRBuilder.getMF().getSubtarget().getCallLowering();
    auto &TLI = *MIRBuilder.getMF().getSubtarget().getTargetLowering();
    const char *Name =
        TLI.getLibcallName(Size == 64 ? RTLIB::REM_F64 : RTLIB::REM_F32);

    CLI.lowerCall(MIRBuilder, MachineOperand::CreateES(Name), Ty,
                  MI.getOperand(0).getReg(), {Ty, Ty},
                  {MI.getOperand(1).getReg(), MI.getOperand(2).getReg()});
    MI.eraseFromParent();
    return Legalized;
  }
  }
}
Example #6
0
LLT MachineLegalizer::findLegalType(unsigned Opcode, LLT Ty,
                                    LegalizeAction Action) const {
  switch(Action) {
  default:
    llvm_unreachable("Cannot find legal type");
  case Legal:
    return Ty;
  case NarrowScalar: {
    return findLegalType(Opcode, Ty,
                         [&](LLT Ty) -> LLT { return Ty.halfScalarSize(); });
  }
  case WidenScalar: {
    return findLegalType(Opcode, Ty,
                         [&](LLT Ty) -> LLT { return Ty.doubleScalarSize(); });
  }
  case FewerElements: {
    return findLegalType(Opcode, Ty,
                         [&](LLT Ty) -> LLT { return Ty.halfElements(); });
  }
  case MoreElements: {
    return findLegalType(
        Opcode, Ty, [&](LLT Ty) -> LLT { return Ty.doubleElements(); });
  }
  }
}
Example #7
0
static LegalityPredicate isMultiple32(unsigned TypeIdx,
                                      unsigned MaxSize = 512) {
  return [=](const LegalityQuery &Query) {
    const LLT Ty = Query.Types[TypeIdx];
    const LLT EltTy = Ty.getScalarType();
    return Ty.getSizeInBits() <= MaxSize && EltTy.getSizeInBits() % 32 == 0;
  };
}
Example #8
0
static LegalityPredicate isSmallOddVector(unsigned TypeIdx) {
  return [=](const LegalityQuery &Query) {
    const LLT Ty = Query.Types[TypeIdx];
    return Ty.isVector() &&
           Ty.getNumElements() % 2 != 0 &&
           Ty.getElementType().getSizeInBits() < 32;
  };
}
Example #9
0
LegalizeMutation LegalizeMutations::widenScalarOrEltToNextPow2(unsigned TypeIdx,
                                                               unsigned Min) {
  return [=](const LegalityQuery &Query) {
    const LLT Ty = Query.Types[TypeIdx];
    unsigned NewEltSizeInBits =
        std::max(1u << Log2_32_Ceil(Ty.getScalarSizeInBits()), Min);
    return std::make_pair(TypeIdx, Ty.changeElementSize(NewEltSizeInBits));
  };
}
bool CLinearRidgeRegression::train_machine(CFeatures* data)
{
    REQUIRE(m_labels,"No labels set\n");

    if (!data)
    	data=features;

    REQUIRE(data,"No features provided and no featured previously set\n");

    REQUIRE(m_labels->get_num_labels() == data->get_num_vectors(),
    	"Number of training vectors (%d) does not match number of labels (%d)\n",
    	m_labels->get_num_labels(), data->get_num_vectors());

    REQUIRE(data->get_feature_class() == C_DENSE,
    	"Expected Dense Features (%d) but got (%d)\n",
    	C_DENSE, data->get_feature_class());

    REQUIRE(data->get_feature_type() == F_DREAL,
    	"Expected Real Features (%d) but got (%d)\n",
    	F_DREAL, data->get_feature_type());

    CDenseFeatures<float64_t>* feats=(CDenseFeatures<float64_t>*) data;
    int32_t num_feat=feats->get_num_features();
    int32_t num_vec=feats->get_num_vectors();

    SGMatrix<float64_t> kernel_matrix(num_feat,num_feat);
    SGMatrix<float64_t> feats_matrix(feats->get_feature_matrix());
    SGVector<float64_t> y(num_feat);
    SGVector<float64_t> tau_vector(num_feat);

    tau_vector.zero();
    tau_vector.add(m_tau);

    Map<MatrixXd> eigen_kernel_matrix(kernel_matrix.matrix, num_feat,num_feat);
    Map<MatrixXd> eigen_feats_matrix(feats_matrix.matrix, num_feat,num_vec);
    Map<VectorXd> eigen_y(y.vector, num_feat);
    Map<VectorXd> eigen_labels(((CRegressionLabels*)m_labels)->get_labels(),num_vec);
    Map<VectorXd> eigen_tau(tau_vector.vector, num_feat);

    eigen_kernel_matrix = eigen_feats_matrix*eigen_feats_matrix.transpose();

    eigen_kernel_matrix.diagonal() += eigen_tau;

    eigen_y = eigen_feats_matrix*eigen_labels ;

    LLT<MatrixXd> llt;
    llt.compute(eigen_kernel_matrix);
    if(llt.info() != Eigen::Success)
    {
    	SG_WARNING("Features covariance matrix was not positive definite\n");
    	return false;
    }
    eigen_y = llt.solve(eigen_y);

    set_w(y);
    return true;
}
Example #11
0
LegalizeMutation LegalizeMutations::moreElementsToNextPow2(unsigned TypeIdx,
                                                           unsigned Min) {
  return [=](const LegalityQuery &Query) {
    const LLT VecTy = Query.Types[TypeIdx];
    unsigned NewNumElements =
        std::max(1u << Log2_32_Ceil(VecTy.getNumElements()), Min);
    return std::make_pair(TypeIdx,
                          LLT::vector(NewNumElements, VecTy.getElementType()));
  };
}
Example #12
0
static LegalizeMutation fewerEltsToSize64Vector(unsigned TypeIdx) {
  return [=](const LegalityQuery &Query) {
    const LLT Ty = Query.Types[TypeIdx];
    const LLT EltTy = Ty.getElementType();
    unsigned Size = Ty.getSizeInBits();
    unsigned Pieces = (Size + 63) / 64;
    unsigned NewNumElts = (Ty.getNumElements() + 1) / Pieces;
    return std::make_pair(TypeIdx, LLT::scalarOrVector(NewNumElts, EltTy));
  };
}
Example #13
0
void RegisterBankInfo::applyDefaultMapping(const OperandsMapper &OpdMapper) {
  MachineInstr &MI = OpdMapper.getMI();
  MachineRegisterInfo &MRI = OpdMapper.getMRI();
  LLVM_DEBUG(dbgs() << "Applying default-like mapping\n");
  for (unsigned OpIdx = 0,
                EndIdx = OpdMapper.getInstrMapping().getNumOperands();
       OpIdx != EndIdx; ++OpIdx) {
    LLVM_DEBUG(dbgs() << "OpIdx " << OpIdx);
    MachineOperand &MO = MI.getOperand(OpIdx);
    if (!MO.isReg()) {
      LLVM_DEBUG(dbgs() << " is not a register, nothing to be done\n");
      continue;
    }
    if (!MO.getReg()) {
      LLVM_DEBUG(dbgs() << " is %%noreg, nothing to be done\n");
      continue;
    }
    assert(OpdMapper.getInstrMapping().getOperandMapping(OpIdx).NumBreakDowns !=
               0 &&
           "Invalid mapping");
    assert(OpdMapper.getInstrMapping().getOperandMapping(OpIdx).NumBreakDowns ==
               1 &&
           "This mapping is too complex for this function");
    iterator_range<SmallVectorImpl<unsigned>::const_iterator> NewRegs =
        OpdMapper.getVRegs(OpIdx);
    if (empty(NewRegs)) {
      LLVM_DEBUG(dbgs() << " has not been repaired, nothing to be done\n");
      continue;
    }
    unsigned OrigReg = MO.getReg();
    unsigned NewReg = *NewRegs.begin();
    LLVM_DEBUG(dbgs() << " changed, replace " << printReg(OrigReg, nullptr));
    MO.setReg(NewReg);
    LLVM_DEBUG(dbgs() << " with " << printReg(NewReg, nullptr));

    // The OperandsMapper creates plain scalar, we may have to fix that.
    // Check if the types match and if not, fix that.
    LLT OrigTy = MRI.getType(OrigReg);
    LLT NewTy = MRI.getType(NewReg);
    if (OrigTy != NewTy) {
      // The default mapping is not supposed to change the size of
      // the storage. However, right now we don't necessarily bump all
      // the types to storage size. For instance, we can consider
      // s16 G_AND legal whereas the storage size is going to be 32.
      assert(OrigTy.getSizeInBits() <= NewTy.getSizeInBits() &&
             "Types with difference size cannot be handled by the default "
             "mapping");
      LLVM_DEBUG(dbgs() << "\nChange type of new opd from " << NewTy << " to "
                        << OrigTy);
      MRI.setType(NewReg, OrigTy);
    }
    LLVM_DEBUG(dbgs() << '\n');
  }
}
Example #14
0
bool AArch64LegalizerInfo::legalizeVaArg(MachineInstr &MI,
                                         MachineRegisterInfo &MRI,
                                         MachineIRBuilder &MIRBuilder) const {
  MIRBuilder.setInstr(MI);
  MachineFunction &MF = MIRBuilder.getMF();
  unsigned Align = MI.getOperand(2).getImm();
  unsigned Dst = MI.getOperand(0).getReg();
  unsigned ListPtr = MI.getOperand(1).getReg();

  LLT PtrTy = MRI.getType(ListPtr);
  LLT IntPtrTy = LLT::scalar(PtrTy.getSizeInBits());

  const unsigned PtrSize = PtrTy.getSizeInBits() / 8;
  unsigned List = MRI.createGenericVirtualRegister(PtrTy);
  MIRBuilder.buildLoad(
      List, ListPtr,
      *MF.getMachineMemOperand(MachinePointerInfo(), MachineMemOperand::MOLoad,
                               PtrSize, /* Align = */ PtrSize));

  unsigned DstPtr;
  if (Align > PtrSize) {
    // Realign the list to the actual required alignment.
    unsigned AlignMinus1 = MRI.createGenericVirtualRegister(IntPtrTy);
    MIRBuilder.buildConstant(AlignMinus1, Align - 1);

    unsigned ListTmp = MRI.createGenericVirtualRegister(PtrTy);
    MIRBuilder.buildGEP(ListTmp, List, AlignMinus1);

    DstPtr = MRI.createGenericVirtualRegister(PtrTy);
    MIRBuilder.buildPtrMask(DstPtr, ListTmp, Log2_64(Align));
  } else
    DstPtr = List;

  uint64_t ValSize = MRI.getType(Dst).getSizeInBits() / 8;
  MIRBuilder.buildLoad(
      Dst, DstPtr,
      *MF.getMachineMemOperand(MachinePointerInfo(), MachineMemOperand::MOLoad,
                               ValSize, std::max(Align, PtrSize)));

  unsigned SizeReg = MRI.createGenericVirtualRegister(IntPtrTy);
  MIRBuilder.buildConstant(SizeReg, alignTo(ValSize, PtrSize));

  unsigned NewList = MRI.createGenericVirtualRegister(PtrTy);
  MIRBuilder.buildGEP(NewList, DstPtr, SizeReg);

  MIRBuilder.buildStore(
      NewList, ListPtr,
      *MF.getMachineMemOperand(MachinePointerInfo(), MachineMemOperand::MOStore,
                               PtrSize, /* Align = */ PtrSize));

  MI.eraseFromParent();
  return true;
}
Example #15
0
void MachineLegalizer::computeTables() {
  for (auto &Op : Actions) {
    LLT Ty = Op.first.second;
    if (!Ty.isVector())
      continue;

    auto &Entry =
        MaxLegalVectorElts[std::make_pair(Op.first.first, Ty.getElementType())];
    Entry = std::max(Entry, Ty.getNumElements());
  }

  TablesInitialized = true;
}
Example #16
0
File: Utils.cpp Project: happz/llvm
Optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode, const unsigned Op1,
                                        const unsigned Op2,
                                        const MachineRegisterInfo &MRI) {
  auto MaybeOp1Cst = getConstantVRegVal(Op1, MRI);
  auto MaybeOp2Cst = getConstantVRegVal(Op2, MRI);
  if (MaybeOp1Cst && MaybeOp2Cst) {
    LLT Ty = MRI.getType(Op1);
    APInt C1(Ty.getSizeInBits(), *MaybeOp1Cst, true);
    APInt C2(Ty.getSizeInBits(), *MaybeOp2Cst, true);
    switch (Opcode) {
    default:
      break;
    case TargetOpcode::G_ADD:
      return C1 + C2;
    case TargetOpcode::G_AND:
      return C1 & C2;
    case TargetOpcode::G_ASHR:
      return C1.ashr(C2);
    case TargetOpcode::G_LSHR:
      return C1.lshr(C2);
    case TargetOpcode::G_MUL:
      return C1 * C2;
    case TargetOpcode::G_OR:
      return C1 | C2;
    case TargetOpcode::G_SHL:
      return C1 << C2;
    case TargetOpcode::G_SUB:
      return C1 - C2;
    case TargetOpcode::G_XOR:
      return C1 ^ C2;
    case TargetOpcode::G_UDIV:
      if (!C2.getBoolValue())
        break;
      return C1.udiv(C2);
    case TargetOpcode::G_SDIV:
      if (!C2.getBoolValue())
        break;
      return C1.sdiv(C2);
    case TargetOpcode::G_UREM:
      if (!C2.getBoolValue())
        break;
      return C1.urem(C2);
    case TargetOpcode::G_SREM:
      if (!C2.getBoolValue())
        break;
      return C1.srem(C2);
    }
  }
  return None;
}
// FIXME: inefficient implementation for now. Without ComputeValueVTs we're
// probably going to need specialized lookup structures for various types before
// we have any hope of doing well with something like <13 x i3>. Even the common
// cases should do better than what we have now.
std::pair<MachineLegalizer::LegalizeAction, LLT>
MachineLegalizer::getAction(const InstrAspect &Aspect) const {
  assert(TablesInitialized && "backend forgot to call computeTables");
  // These *have* to be implemented for now, they're the fundamental basis of
  // how everything else is transformed.

  // FIXME: the long-term plan calls for expansion in terms of load/store (if
  // they're not legal).
  if (Aspect.Opcode == TargetOpcode::G_SEQUENCE ||
      Aspect.Opcode == TargetOpcode::G_EXTRACT)
    return std::make_pair(Legal, Aspect.Type);

  LegalizeAction Action = findInActions(Aspect);
  if (Action != NotFound)
    return findLegalAction(Aspect, Action);

  unsigned Opcode = Aspect.Opcode;
  LLT Ty = Aspect.Type;
  if (!Ty.isVector()) {
    auto DefaultAction = DefaultActions.find(Aspect.Opcode);
    if (DefaultAction != DefaultActions.end() && DefaultAction->second == Legal)
      return std::make_pair(Legal, Ty);

    assert(DefaultAction->second == NarrowScalar && "unexpected default");
    return findLegalAction(Aspect, NarrowScalar);
  }

  LLT EltTy = Ty.getElementType();
  int NumElts = Ty.getNumElements();

  auto ScalarAction = ScalarInVectorActions.find(std::make_pair(Opcode, EltTy));
  if (ScalarAction != ScalarInVectorActions.end() &&
      ScalarAction->second != Legal)
    return findLegalAction(Aspect, ScalarAction->second);

  // The element type is legal in principle, but the number of elements is
  // wrong.
  auto MaxLegalElts = MaxLegalVectorElts.lookup(std::make_pair(Opcode, EltTy));
  if (MaxLegalElts > NumElts)
    return findLegalAction(Aspect, MoreElements);

  if (MaxLegalElts == 0) {
    // Scalarize if there's no legal vector type, which is just a special case
    // of FewerElements.
    return std::make_pair(FewerElements, EltTy);
  }

  return findLegalAction(Aspect, FewerElements);
}
Example #18
0
MachineLegalizeHelper::LegalizeResult
MachineLegalizeHelper::fewerElementsVector(MachineInstr &MI, unsigned TypeIdx,
                                           LLT NarrowTy) {
  // FIXME: Don't know how to handle secondary types yet.
  if (TypeIdx != 0)
    return UnableToLegalize;
  switch (MI.getOpcode()) {
  default:
    return UnableToLegalize;
  case TargetOpcode::G_ADD: {
    unsigned NarrowSize = NarrowTy.getSizeInBits();
    unsigned DstReg = MI.getOperand(0).getReg();
    int NumParts = MRI.getType(DstReg).getSizeInBits() / NarrowSize;

    MIRBuilder.setInstr(MI);

    SmallVector<unsigned, 2> Src1Regs, Src2Regs, DstRegs, Indexes;
    extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src1Regs);
    extractParts(MI.getOperand(2).getReg(), NarrowTy, NumParts, Src2Regs);

    for (int i = 0; i < NumParts; ++i) {
      unsigned DstReg = MRI.createGenericVirtualRegister(NarrowTy);
      MIRBuilder.buildAdd(DstReg, Src1Regs[i], Src2Regs[i]);
      DstRegs.push_back(DstReg);
      Indexes.push_back(i * NarrowSize);
    }

    MIRBuilder.buildSequence(DstReg, DstRegs, Indexes);
    MI.eraseFromParent();
    return Legalized;
  }
  }
}
Example #19
0
void LegalizerInfo::computeTables() {
  for (unsigned Opcode = 0; Opcode <= LastOp - FirstOp; ++Opcode) {
    for (unsigned Idx = 0; Idx != Actions[Opcode].size(); ++Idx) {
      for (auto &Action : Actions[Opcode][Idx]) {
        LLT Ty = Action.first;
        if (!Ty.isVector())
          continue;

        auto &Entry = MaxLegalVectorElts[std::make_pair(Opcode + FirstOp,
                                                        Ty.getElementType())];
        Entry = std::max(Entry, Ty.getNumElements());
      }
    }
  }

  TablesInitialized = true;
}
Example #20
0
// Recompute the kernel.
void GPCMGaussianProcess::recomputeKernel()
{
    // Constants.
    int N = dataMatrix.rows();
    int D = dataMatrix.cols();

    // Initialize the covariance matrix.
    K.noalias() = kernel->covariance(X);

    // Compute inverse of kernel matrix.
    LLT<MatrixXd> cholesky = K.llt();
    logDetK = cholesky.matrixLLT().diagonal().array().log().sum()*2.0;
    invK = cholesky.solve(MatrixXd::Identity(N,N));

    // Clear gK matrices.
    gK.setZero(N,N);
    gKd.setZero(N,N);
}
Example #21
0
void MachineLegalizeHelper::extractParts(unsigned Reg, LLT Ty, int NumParts,
                                         SmallVectorImpl<unsigned> &VRegs) {
  unsigned Size = Ty.getSizeInBits();
  SmallVector<uint64_t, 4> Indexes;
  for (int i = 0; i < NumParts; ++i) {
    VRegs.push_back(MRI.createGenericVirtualRegister(Ty));
    Indexes.push_back(i * Size);
  }
  MIRBuilder.buildExtract(VRegs, Indexes, Reg);
}
Example #22
0
MachineLegalizeHelper::LegalizeResult
MachineLegalizeHelper::narrowScalar(MachineInstr &MI, unsigned TypeIdx,
                                    LLT NarrowTy) {
  // FIXME: Don't know how to handle secondary types yet.
  if (TypeIdx != 0)
    return UnableToLegalize;
  switch (MI.getOpcode()) {
  default:
    return UnableToLegalize;
  case TargetOpcode::G_ADD: {
    // Expand in terms of carry-setting/consuming G_ADDE instructions.
    unsigned NarrowSize = NarrowTy.getSizeInBits();
    int NumParts = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits() /
                   NarrowTy.getSizeInBits();

    MIRBuilder.setInstr(MI);

    SmallVector<unsigned, 2> Src1Regs, Src2Regs, DstRegs, Indexes;
    extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src1Regs);
    extractParts(MI.getOperand(2).getReg(), NarrowTy, NumParts, Src2Regs);

    unsigned CarryIn = MRI.createGenericVirtualRegister(LLT::scalar(1));
    MIRBuilder.buildConstant(CarryIn, 0);

    for (int i = 0; i < NumParts; ++i) {
      unsigned DstReg = MRI.createGenericVirtualRegister(NarrowTy);
      unsigned CarryOut = MRI.createGenericVirtualRegister(LLT::scalar(1));

      MIRBuilder.buildUAdde(DstReg, CarryOut, Src1Regs[i],
                            Src2Regs[i], CarryIn);

      DstRegs.push_back(DstReg);
      Indexes.push_back(i * NarrowSize);
      CarryIn = CarryOut;
    }
    unsigned DstReg = MI.getOperand(0).getReg();
    MIRBuilder.buildSequence(DstReg, DstRegs, Indexes);
    MI.eraseFromParent();
    return Legalized;
  }
  }
}
Example #23
0
unsigned TargetRegisterInfo::getRegSizeInBits(unsigned Reg,
                                         const MachineRegisterInfo &MRI) const {
  const TargetRegisterClass *RC{};
  if (isPhysicalRegister(Reg)) {
    // The size is not directly available for physical registers.
    // Instead, we need to access a register class that contains Reg and
    // get the size of that register class.
    RC = getMinimalPhysRegClass(Reg);
  } else {
    LLT Ty = MRI.getType(Reg);
    unsigned RegSize = Ty.isValid() ? Ty.getSizeInBits() : 0;
    // If Reg is not a generic register, query the register class to
    // get its size.
    if (RegSize)
      return RegSize;
    // Since Reg is not a generic register, it must have a register class.
    RC = MRI.getRegClass(Reg);
  }
  assert(RC && "Unable to deduce the register class");
  return getRegSizeInBits(*RC);
}
Example #24
0
bool CKernelRidgeRegression::solve_krr_system()
{
	SGMatrix<float64_t> kernel_matrix(kernel->get_kernel_matrix());
	int32_t n = kernel_matrix.num_rows;
	SGVector<float64_t> y = ((CRegressionLabels*)m_labels)->get_labels();

	for(index_t i=0; i<n; i++)
		kernel_matrix(i,i) += m_tau;

	Map<MatrixXd> eigen_kernel_matrix(kernel_matrix.matrix, n, n);
	Map<VectorXd> eigen_alphas(m_alpha.vector, n);
	Map<VectorXd> eigen_y(y.vector, n);

	LLT<MatrixXd> llt;
	llt.compute(eigen_kernel_matrix);
	if (llt.info() != Eigen::Success)
	{
		SG_WARNING("Features covariance matrix was not positive definite\n");
		return false;
	}
	eigen_alphas = llt.solve(eigen_y);
	return true;
}
RegisterBankInfo::InstructionMapping
AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
  RegisterBankInfo::InstructionMapping Mapping = getInstrMappingImpl(MI);
  if (Mapping.isValid())
    return Mapping;

  // As a top-level guess, vectors go in FPRs, scalars in GPRs. Obviously this
  // won't work for normal floating-point types (or NZCV). When such
  // instructions exist we'll need to look at the MI's opcode.
  LLT Ty = MI.getType();
  unsigned BankID;
  if (Ty.isVector())
    BankID = AArch64::FPRRegBankID;
  else
    BankID = AArch64::GPRRegBankID;

  Mapping = InstructionMapping{1, 1, MI.getNumOperands()};
  int Size = Ty.isSized() ? Ty.getSizeInBits() : 0;
  for (unsigned Idx = 0; Idx < MI.getNumOperands(); ++Idx)
    Mapping.setOperandMapping(Idx, Size, getRegBank(BankID));

  return Mapping;
}
unsigned
MachineRegisterInfo::createGenericVirtualRegister(LLT Ty) {
  assert(Ty.isValid() && "Cannot create empty virtual register");

  // New virtual register number.
  unsigned Reg = TargetRegisterInfo::index2VirtReg(getNumVirtRegs());
  VRegInfo.grow(Reg);
  // FIXME: Should we use a dummy register bank?
  VRegInfo[Reg].first = static_cast<RegisterBank *>(nullptr);
  getVRegToType()[Reg] = Ty;
  RegAllocHints.grow(Reg);
  if (TheDelegate)
    TheDelegate->MRI_NoteNewVirtualRegister(Reg);
  return Reg;
}
Example #27
0
X86GenRegisterBankInfo::PartialMappingIdx
X86GenRegisterBankInfo::getPartialMappingIdx(const LLT &Ty, bool isFP) {
  if ((Ty.isScalar() && !isFP) || Ty.isPointer()) {
    switch (Ty.getSizeInBits()) {
    case 1:
    case 8:
      return PMI_GPR8;
    case 16:
      return PMI_GPR16;
    case 32:
      return PMI_GPR32;
    case 64:
      return PMI_GPR64;
    case 128:
      return PMI_VEC128;
      break;
    default:
      llvm_unreachable("Unsupported register size.");
    }
  } else if (Ty.isScalar()) {
    switch (Ty.getSizeInBits()) {
    case 32:
      return PMI_FP32;
    case 64:
      return PMI_FP64;
    case 128:
      return PMI_VEC128;
    default:
      llvm_unreachable("Unsupported register size.");
    }
  } else {
    switch (Ty.getSizeInBits()) {
    case 128:
      return PMI_VEC128;
    case 256:
      return PMI_VEC256;
    case 512:
      return PMI_VEC512;
    default:
      llvm_unreachable("Unsupported register size.");
    }
  }

  return PMI_None;
}
const RegisterBankInfo::InstructionMapping &
AArch64RegisterBankInfo::getSameKindOfOperandsMapping(
    const MachineInstr &MI) const {
  const unsigned Opc = MI.getOpcode();
  const MachineFunction &MF = *MI.getParent()->getParent();
  const MachineRegisterInfo &MRI = MF.getRegInfo();

  unsigned NumOperands = MI.getNumOperands();
  assert(NumOperands <= 3 &&
         "This code is for instructions with 3 or less operands");

  LLT Ty = MRI.getType(MI.getOperand(0).getReg());
  unsigned Size = Ty.getSizeInBits();
  bool IsFPR = Ty.isVector() || isPreISelGenericFloatingPointOpcode(Opc);

  PartialMappingIdx RBIdx = IsFPR ? PMI_FirstFPR : PMI_FirstGPR;

#ifndef NDEBUG
  // Make sure all the operands are using similar size and type.
  // Should probably be checked by the machine verifier.
  // This code won't catch cases where the number of lanes is
  // different between the operands.
  // If we want to go to that level of details, it is probably
  // best to check that the types are the same, period.
  // Currently, we just check that the register banks are the same
  // for each types.
  for (unsigned Idx = 1; Idx != NumOperands; ++Idx) {
    LLT OpTy = MRI.getType(MI.getOperand(Idx).getReg());
    assert(
        AArch64GenRegisterBankInfo::getRegBankBaseIdxOffset(
            RBIdx, OpTy.getSizeInBits()) ==
            AArch64GenRegisterBankInfo::getRegBankBaseIdxOffset(RBIdx, Size) &&
        "Operand has incompatible size");
    bool OpIsFPR = OpTy.isVector() || isPreISelGenericFloatingPointOpcode(Opc);
    (void)OpIsFPR;
    assert(IsFPR == OpIsFPR && "Operand has incompatible type");
  }
#endif // End NDEBUG.

  return getInstructionMapping(DefaultMappingID, 1,
                               getValueMapping(RBIdx, Size), NumOperands);
}
Example #29
0
AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST,
                                         const GCNTargetMachine &TM) {
  using namespace TargetOpcode;

  auto GetAddrSpacePtr = [&TM](unsigned AS) {
    return LLT::pointer(AS, TM.getPointerSizeInBits(AS));
  };

  const LLT S1 = LLT::scalar(1);
  const LLT S8 = LLT::scalar(8);
  const LLT S16 = LLT::scalar(16);
  const LLT S32 = LLT::scalar(32);
  const LLT S64 = LLT::scalar(64);
  const LLT S128 = LLT::scalar(128);
  const LLT S256 = LLT::scalar(256);
  const LLT S512 = LLT::scalar(512);

  const LLT V2S16 = LLT::vector(2, 16);
  const LLT V4S16 = LLT::vector(4, 16);
  const LLT V8S16 = LLT::vector(8, 16);

  const LLT V2S32 = LLT::vector(2, 32);
  const LLT V3S32 = LLT::vector(3, 32);
  const LLT V4S32 = LLT::vector(4, 32);
  const LLT V5S32 = LLT::vector(5, 32);
  const LLT V6S32 = LLT::vector(6, 32);
  const LLT V7S32 = LLT::vector(7, 32);
  const LLT V8S32 = LLT::vector(8, 32);
  const LLT V9S32 = LLT::vector(9, 32);
  const LLT V10S32 = LLT::vector(10, 32);
  const LLT V11S32 = LLT::vector(11, 32);
  const LLT V12S32 = LLT::vector(12, 32);
  const LLT V13S32 = LLT::vector(13, 32);
  const LLT V14S32 = LLT::vector(14, 32);
  const LLT V15S32 = LLT::vector(15, 32);
  const LLT V16S32 = LLT::vector(16, 32);

  const LLT V2S64 = LLT::vector(2, 64);
  const LLT V3S64 = LLT::vector(3, 64);
  const LLT V4S64 = LLT::vector(4, 64);
  const LLT V5S64 = LLT::vector(5, 64);
  const LLT V6S64 = LLT::vector(6, 64);
  const LLT V7S64 = LLT::vector(7, 64);
  const LLT V8S64 = LLT::vector(8, 64);

  std::initializer_list<LLT> AllS32Vectors =
    {V2S32, V3S32, V4S32, V5S32, V6S32, V7S32, V8S32,
     V9S32, V10S32, V11S32, V12S32, V13S32, V14S32, V15S32, V16S32};
  std::initializer_list<LLT> AllS64Vectors =
    {V2S64, V3S64, V4S64, V5S64, V6S64, V7S64, V8S64};

  const LLT GlobalPtr = GetAddrSpacePtr(AMDGPUAS::GLOBAL_ADDRESS);
  const LLT ConstantPtr = GetAddrSpacePtr(AMDGPUAS::CONSTANT_ADDRESS);
  const LLT LocalPtr = GetAddrSpacePtr(AMDGPUAS::LOCAL_ADDRESS);
  const LLT FlatPtr = GetAddrSpacePtr(AMDGPUAS::FLAT_ADDRESS);
  const LLT PrivatePtr = GetAddrSpacePtr(AMDGPUAS::PRIVATE_ADDRESS);

  const LLT CodePtr = FlatPtr;

  const std::initializer_list<LLT> AddrSpaces64 = {
    GlobalPtr, ConstantPtr, FlatPtr
  };

  const std::initializer_list<LLT> AddrSpaces32 = {
    LocalPtr, PrivatePtr
  };

  setAction({G_BRCOND, S1}, Legal);

  // TODO: All multiples of 32, vectors of pointers, all v2s16 pairs, more
  // elements for v3s16
  getActionDefinitionsBuilder(G_PHI)
    .legalFor({S32, S64, V2S16, V4S16, S1, S128, S256})
    .legalFor(AllS32Vectors)
    .legalFor(AllS64Vectors)
    .legalFor(AddrSpaces64)
    .legalFor(AddrSpaces32)
    .clampScalar(0, S32, S256)
    .widenScalarToNextPow2(0, 32)
    .clampMaxNumElements(0, S32, 16)
    .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
    .legalIf(isPointer(0));


  getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL, G_UMULH, G_SMULH})
    .legalFor({S32})
    .clampScalar(0, S32, S32)
    .scalarize(0);

  // Report legal for any types we can handle anywhere. For the cases only legal
  // on the SALU, RegBankSelect will be able to re-legalize.
  getActionDefinitionsBuilder({G_AND, G_OR, G_XOR})
    .legalFor({S32, S1, S64, V2S32, V2S16, V4S16})
    .clampScalar(0, S32, S64)
    .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
    .fewerElementsIf(vectorWiderThan(0, 32), fewerEltsToSize64Vector(0))
    .widenScalarToNextPow2(0)
    .scalarize(0);

  getActionDefinitionsBuilder({G_UADDO, G_SADDO, G_USUBO, G_SSUBO,
                               G_UADDE, G_SADDE, G_USUBE, G_SSUBE})
    .legalFor({{S32, S1}})
    .clampScalar(0, S32, S32);

  getActionDefinitionsBuilder(G_BITCAST)
    .legalForCartesianProduct({S32, V2S16})
    .legalForCartesianProduct({S64, V2S32, V4S16})
    .legalForCartesianProduct({V2S64, V4S32})
    // Don't worry about the size constraint.
    .legalIf(all(isPointer(0), isPointer(1)));

  if (ST.has16BitInsts()) {
    getActionDefinitionsBuilder(G_FCONSTANT)
      .legalFor({S32, S64, S16})
      .clampScalar(0, S16, S64);
  } else {
    getActionDefinitionsBuilder(G_FCONSTANT)
      .legalFor({S32, S64})
      .clampScalar(0, S32, S64);
  }

  getActionDefinitionsBuilder(G_IMPLICIT_DEF)
    .legalFor({S1, S32, S64, V2S32, V4S32, V2S16, V4S16, GlobalPtr,
               ConstantPtr, LocalPtr, FlatPtr, PrivatePtr})
    .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
    .clampScalarOrElt(0, S32, S512)
    .legalIf(isMultiple32(0))
    .widenScalarToNextPow2(0, 32)
    .clampMaxNumElements(0, S32, 16);


  // FIXME: i1 operands to intrinsics should always be legal, but other i1
  // values may not be legal.  We need to figure out how to distinguish
  // between these two scenarios.
  getActionDefinitionsBuilder(G_CONSTANT)
    .legalFor({S1, S32, S64, GlobalPtr,
               LocalPtr, ConstantPtr, PrivatePtr, FlatPtr })
    .clampScalar(0, S32, S64)
    .widenScalarToNextPow2(0)
    .legalIf(isPointer(0));

  setAction({G_FRAME_INDEX, PrivatePtr}, Legal);

  auto &FPOpActions = getActionDefinitionsBuilder(
    { G_FADD, G_FMUL, G_FNEG, G_FABS, G_FMA, G_FCANONICALIZE})
    .legalFor({S32, S64});

  if (ST.has16BitInsts()) {
    if (ST.hasVOP3PInsts())
      FPOpActions.legalFor({S16, V2S16});
    else
      FPOpActions.legalFor({S16});
  }

  if (ST.hasVOP3PInsts())
    FPOpActions.clampMaxNumElements(0, S16, 2);
  FPOpActions
    .scalarize(0)
    .clampScalar(0, ST.has16BitInsts() ? S16 : S32, S64);

  if (ST.has16BitInsts()) {
    getActionDefinitionsBuilder(G_FSQRT)
      .legalFor({S32, S64, S16})
      .scalarize(0)
      .clampScalar(0, S16, S64);
  } else {
    getActionDefinitionsBuilder(G_FSQRT)
      .legalFor({S32, S64})
      .scalarize(0)
      .clampScalar(0, S32, S64);
  }

  getActionDefinitionsBuilder(G_FPTRUNC)
    .legalFor({{S32, S64}, {S16, S32}})
    .scalarize(0);

  getActionDefinitionsBuilder(G_FPEXT)
    .legalFor({{S64, S32}, {S32, S16}})
    .lowerFor({{S64, S16}}) // FIXME: Implement
    .scalarize(0);

  getActionDefinitionsBuilder(G_FCOPYSIGN)
    .legalForCartesianProduct({S16, S32, S64}, {S16, S32, S64})
    .scalarize(0);

  getActionDefinitionsBuilder(G_FSUB)
      // Use actual fsub instruction
      .legalFor({S32})
      // Must use fadd + fneg
      .lowerFor({S64, S16, V2S16})
      .scalarize(0)
      .clampScalar(0, S32, S64);

  getActionDefinitionsBuilder({G_SEXT, G_ZEXT, G_ANYEXT})
    .legalFor({{S64, S32}, {S32, S16}, {S64, S16},
               {S32, S1}, {S64, S1}, {S16, S1},
               // FIXME: Hack
               {S64, LLT::scalar(33)},
               {S32, S8}, {S128, S32}, {S128, S64}, {S32, LLT::scalar(24)}})
    .scalarize(0);

  getActionDefinitionsBuilder({G_SITOFP, G_UITOFP})
    .legalFor({{S32, S32}, {S64, S32}})
    .lowerFor({{S32, S64}})
    .customFor({{S64, S64}})
    .scalarize(0);

  getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI})
    .legalFor({{S32, S32}, {S32, S64}})
    .scalarize(0);

  getActionDefinitionsBuilder(G_INTRINSIC_ROUND)
    .legalFor({S32, S64})
    .scalarize(0);

  if (ST.getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS) {
    getActionDefinitionsBuilder({G_INTRINSIC_TRUNC, G_FCEIL, G_FRINT})
      .legalFor({S32, S64})
      .clampScalar(0, S32, S64)
      .scalarize(0);
  } else {
    getActionDefinitionsBuilder({G_INTRINSIC_TRUNC, G_FCEIL, G_FRINT})
      .legalFor({S32})
      .customFor({S64})
      .clampScalar(0, S32, S64)
      .scalarize(0);
  }

  getActionDefinitionsBuilder(G_GEP)
    .legalForCartesianProduct(AddrSpaces64, {S64})
    .legalForCartesianProduct(AddrSpaces32, {S32})
    .scalarize(0);

  setAction({G_BLOCK_ADDR, CodePtr}, Legal);

  getActionDefinitionsBuilder(G_ICMP)
    .legalForCartesianProduct(
      {S1}, {S32, S64, GlobalPtr, LocalPtr, ConstantPtr, PrivatePtr, FlatPtr})
    .legalFor({{S1, S32}, {S1, S64}})
    .widenScalarToNextPow2(1)
    .clampScalar(1, S32, S64)
    .scalarize(0)
    .legalIf(all(typeIs(0, S1), isPointer(1)));

  getActionDefinitionsBuilder(G_FCMP)
    .legalFor({{S1, S32}, {S1, S64}})
    .widenScalarToNextPow2(1)
    .clampScalar(1, S32, S64)
    .scalarize(0);

  // FIXME: fexp, flog2, flog10 needs to be custom lowered.
  getActionDefinitionsBuilder({G_FPOW, G_FEXP, G_FEXP2,
                               G_FLOG, G_FLOG2, G_FLOG10})
    .legalFor({S32})
    .scalarize(0);

  // The 64-bit versions produce 32-bit results, but only on the SALU.
  getActionDefinitionsBuilder({G_CTLZ, G_CTLZ_ZERO_UNDEF,
                               G_CTTZ, G_CTTZ_ZERO_UNDEF,
                               G_CTPOP})
    .legalFor({{S32, S32}, {S32, S64}})
    .clampScalar(0, S32, S32)
    .clampScalar(1, S32, S64)
    .scalarize(0)
    .widenScalarToNextPow2(0, 32)
    .widenScalarToNextPow2(1, 32);

  // TODO: Expand for > s32
  getActionDefinitionsBuilder(G_BSWAP)
    .legalFor({S32})
    .clampScalar(0, S32, S32)
    .scalarize(0);


  auto smallerThan = [](unsigned TypeIdx0, unsigned TypeIdx1) {
    return [=](const LegalityQuery &Query) {
      return Query.Types[TypeIdx0].getSizeInBits() <
             Query.Types[TypeIdx1].getSizeInBits();
    };
  };

  auto greaterThan = [](unsigned TypeIdx0, unsigned TypeIdx1) {
    return [=](const LegalityQuery &Query) {
      return Query.Types[TypeIdx0].getSizeInBits() >
             Query.Types[TypeIdx1].getSizeInBits();
    };
  };

  getActionDefinitionsBuilder(G_INTTOPTR)
    // List the common cases
    .legalForCartesianProduct(AddrSpaces64, {S64})
    .legalForCartesianProduct(AddrSpaces32, {S32})
    .scalarize(0)
    // Accept any address space as long as the size matches
    .legalIf(sameSize(0, 1))
    .widenScalarIf(smallerThan(1, 0),
      [](const LegalityQuery &Query) {
        return std::make_pair(1, LLT::scalar(Query.Types[0].getSizeInBits()));
      })
    .narrowScalarIf(greaterThan(1, 0),
      [](const LegalityQuery &Query) {
        return std::make_pair(1, LLT::scalar(Query.Types[0].getSizeInBits()));
      });

  getActionDefinitionsBuilder(G_PTRTOINT)
    // List the common cases
    .legalForCartesianProduct(AddrSpaces64, {S64})
    .legalForCartesianProduct(AddrSpaces32, {S32})
    .scalarize(0)
    // Accept any address space as long as the size matches
    .legalIf(sameSize(0, 1))
    .widenScalarIf(smallerThan(0, 1),
      [](const LegalityQuery &Query) {
        return std::make_pair(0, LLT::scalar(Query.Types[1].getSizeInBits()));
      })
    .narrowScalarIf(
      greaterThan(0, 1),
      [](const LegalityQuery &Query) {
        return std::make_pair(0, LLT::scalar(Query.Types[1].getSizeInBits()));
      });

  if (ST.hasFlatAddressSpace()) {
    getActionDefinitionsBuilder(G_ADDRSPACE_CAST)
      .scalarize(0)
      .custom();
  }

  getActionDefinitionsBuilder({G_LOAD, G_STORE})
    .narrowScalarIf([](const LegalityQuery &Query) {
        unsigned Size = Query.Types[0].getSizeInBits();
        unsigned MemSize = Query.MMODescrs[0].SizeInBits;
        return (Size > 32 && MemSize < Size);
      },
      [](const LegalityQuery &Query) {
        return std::make_pair(0, LLT::scalar(32));
      })
    .fewerElementsIf([=, &ST](const LegalityQuery &Query) {
        unsigned MemSize = Query.MMODescrs[0].SizeInBits;
        return (MemSize == 96) &&
               Query.Types[0].isVector() &&
               ST.getGeneration() < AMDGPUSubtarget::SEA_ISLANDS;
      },
      [=](const LegalityQuery &Query) {
        return std::make_pair(0, V2S32);
      })
    .legalIf([=, &ST](const LegalityQuery &Query) {
        const LLT &Ty0 = Query.Types[0];

        unsigned Size = Ty0.getSizeInBits();
        unsigned MemSize = Query.MMODescrs[0].SizeInBits;
        if (Size < 32 || (Size > 32 && MemSize < Size))
          return false;

        if (Ty0.isVector() && Size != MemSize)
          return false;

        // TODO: Decompose private loads into 4-byte components.
        // TODO: Illegal flat loads on SI
        switch (MemSize) {
        case 8:
        case 16:
          return Size == 32;
        case 32:
        case 64:
        case 128:
          return true;

        case 96:
          // XXX hasLoadX3
          return (ST.getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS);

        case 256:
        case 512:
          // TODO: constant loads
        default:
          return false;
        }
      })
    .clampScalar(0, S32, S64);


  // FIXME: Handle alignment requirements.
  auto &ExtLoads = getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD})
    .legalForTypesWithMemDesc({
        {S32, GlobalPtr, 8, 8},
        {S32, GlobalPtr, 16, 8},
        {S32, LocalPtr, 8, 8},
        {S32, LocalPtr, 16, 8},
        {S32, PrivatePtr, 8, 8},
        {S32, PrivatePtr, 16, 8}});
  if (ST.hasFlatAddressSpace()) {
    ExtLoads.legalForTypesWithMemDesc({{S32, FlatPtr, 8, 8},
                                       {S32, FlatPtr, 16, 8}});
  }

  ExtLoads.clampScalar(0, S32, S32)
          .widenScalarToNextPow2(0)
          .unsupportedIfMemSizeNotPow2()
          .lower();

  auto &Atomics = getActionDefinitionsBuilder(
    {G_ATOMICRMW_XCHG, G_ATOMICRMW_ADD, G_ATOMICRMW_SUB,
     G_ATOMICRMW_AND, G_ATOMICRMW_OR, G_ATOMICRMW_XOR,
     G_ATOMICRMW_MAX, G_ATOMICRMW_MIN, G_ATOMICRMW_UMAX,
     G_ATOMICRMW_UMIN, G_ATOMIC_CMPXCHG})
    .legalFor({{S32, GlobalPtr}, {S32, LocalPtr},
               {S64, GlobalPtr}, {S64, LocalPtr}});
  if (ST.hasFlatAddressSpace()) {
    Atomics.legalFor({{S32, FlatPtr}, {S64, FlatPtr}});
  }

  // TODO: Pointer types, any 32-bit or 64-bit vector
  getActionDefinitionsBuilder(G_SELECT)
    .legalForCartesianProduct({S32, S64, V2S32, V2S16, V4S16,
          GlobalPtr, LocalPtr, FlatPtr, PrivatePtr,
          LLT::vector(2, LocalPtr), LLT::vector(2, PrivatePtr)}, {S1})
    .clampScalar(0, S32, S64)
    .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
    .fewerElementsIf(numElementsNotEven(0), scalarize(0))
    .scalarize(1)
    .clampMaxNumElements(0, S32, 2)
    .clampMaxNumElements(0, LocalPtr, 2)
    .clampMaxNumElements(0, PrivatePtr, 2)
    .scalarize(0)
    .widenScalarToNextPow2(0)
    .legalIf(all(isPointer(0), typeIs(1, S1)));

  // TODO: Only the low 4/5/6 bits of the shift amount are observed, so we can
  // be more flexible with the shift amount type.
  auto &Shifts = getActionDefinitionsBuilder({G_SHL, G_LSHR, G_ASHR})
    .legalFor({{S32, S32}, {S64, S32}});
  if (ST.has16BitInsts()) {
    if (ST.hasVOP3PInsts()) {
      Shifts.legalFor({{S16, S32}, {S16, S16}, {V2S16, V2S16}})
            .clampMaxNumElements(0, S16, 2);
    } else
      Shifts.legalFor({{S16, S32}, {S16, S16}});

    Shifts.clampScalar(1, S16, S32);
    Shifts.clampScalar(0, S16, S64);
    Shifts.widenScalarToNextPow2(0, 16);
  } else {
    // Make sure we legalize the shift amount type first, as the general
    // expansion for the shifted type will produce much worse code if it hasn't
    // been truncated already.
    Shifts.clampScalar(1, S32, S32);
    Shifts.clampScalar(0, S32, S64);
    Shifts.widenScalarToNextPow2(0, 32);
  }
  Shifts.scalarize(0);

  for (unsigned Op : {G_EXTRACT_VECTOR_ELT, G_INSERT_VECTOR_ELT}) {
    unsigned VecTypeIdx = Op == G_EXTRACT_VECTOR_ELT ? 1 : 0;
    unsigned EltTypeIdx = Op == G_EXTRACT_VECTOR_ELT ? 0 : 1;
    unsigned IdxTypeIdx = 2;

    getActionDefinitionsBuilder(Op)
      .legalIf([=](const LegalityQuery &Query) {
          const LLT &VecTy = Query.Types[VecTypeIdx];
          const LLT &IdxTy = Query.Types[IdxTypeIdx];
          return VecTy.getSizeInBits() % 32 == 0 &&
            VecTy.getSizeInBits() <= 512 &&
            IdxTy.getSizeInBits() == 32;
        })
      .clampScalar(EltTypeIdx, S32, S64)
      .clampScalar(VecTypeIdx, S32, S64)
      .clampScalar(IdxTypeIdx, S32, S32);
  }

  getActionDefinitionsBuilder(G_EXTRACT_VECTOR_ELT)
    .unsupportedIf([=](const LegalityQuery &Query) {
        const LLT &EltTy = Query.Types[1].getElementType();
        return Query.Types[0] != EltTy;
      });

  for (unsigned Op : {G_EXTRACT, G_INSERT}) {
    unsigned BigTyIdx = Op == G_EXTRACT ? 1 : 0;
    unsigned LitTyIdx = Op == G_EXTRACT ? 0 : 1;

    // FIXME: Doesn't handle extract of illegal sizes.
    getActionDefinitionsBuilder(Op)
      .legalIf([=](const LegalityQuery &Query) {
          const LLT BigTy = Query.Types[BigTyIdx];
          const LLT LitTy = Query.Types[LitTyIdx];
          return (BigTy.getSizeInBits() % 32 == 0) &&
                 (LitTy.getSizeInBits() % 16 == 0);
        })
      .widenScalarIf(
        [=](const LegalityQuery &Query) {
          const LLT BigTy = Query.Types[BigTyIdx];
          return (BigTy.getScalarSizeInBits() < 16);
        },
        LegalizeMutations::widenScalarOrEltToNextPow2(BigTyIdx, 16))
      .widenScalarIf(
        [=](const LegalityQuery &Query) {
          const LLT LitTy = Query.Types[LitTyIdx];
          return (LitTy.getScalarSizeInBits() < 16);
        },
        LegalizeMutations::widenScalarOrEltToNextPow2(LitTyIdx, 16))
      .moreElementsIf(isSmallOddVector(BigTyIdx), oneMoreElement(BigTyIdx))
      .widenScalarToNextPow2(BigTyIdx, 32);

  }

  // TODO: vectors of pointers
  getActionDefinitionsBuilder(G_BUILD_VECTOR)
      .legalForCartesianProduct(AllS32Vectors, {S32})
      .legalForCartesianProduct(AllS64Vectors, {S64})
      .clampNumElements(0, V16S32, V16S32)
      .clampNumElements(0, V2S64, V8S64)
      .minScalarSameAs(1, 0)
      // FIXME: Sort of a hack to make progress on other legalizations.
      .legalIf([=](const LegalityQuery &Query) {
        return Query.Types[0].getScalarSizeInBits() <= 32 ||
               Query.Types[0].getScalarSizeInBits() == 64;
      });

  // TODO: Support any combination of v2s32
  getActionDefinitionsBuilder(G_CONCAT_VECTORS)
    .legalFor({{V4S32, V2S32},
               {V8S32, V2S32},
               {V8S32, V4S32},
               {V4S64, V2S64},
               {V4S16, V2S16},
               {V8S16, V2S16},
               {V8S16, V4S16},
               {LLT::vector(4, LocalPtr), LLT::vector(2, LocalPtr)},
               {LLT::vector(4, PrivatePtr), LLT::vector(2, PrivatePtr)}});

  // Merge/Unmerge
  for (unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) {
    unsigned BigTyIdx = Op == G_MERGE_VALUES ? 0 : 1;
    unsigned LitTyIdx = Op == G_MERGE_VALUES ? 1 : 0;

    auto notValidElt = [=](const LegalityQuery &Query, unsigned TypeIdx) {
      const LLT &Ty = Query.Types[TypeIdx];
      if (Ty.isVector()) {
        const LLT &EltTy = Ty.getElementType();
        if (EltTy.getSizeInBits() < 8 || EltTy.getSizeInBits() > 64)
          return true;
        if (!isPowerOf2_32(EltTy.getSizeInBits()))
          return true;
      }
      return false;
    };

    getActionDefinitionsBuilder(Op)
      .widenScalarToNextPow2(LitTyIdx, /*Min*/ 16)
      // Clamp the little scalar to s8-s256 and make it a power of 2. It's not
      // worth considering the multiples of 64 since 2*192 and 2*384 are not
      // valid.
      .clampScalar(LitTyIdx, S16, S256)
      .widenScalarToNextPow2(LitTyIdx, /*Min*/ 32)

      // Break up vectors with weird elements into scalars
      .fewerElementsIf(
        [=](const LegalityQuery &Query) { return notValidElt(Query, 0); },
        scalarize(0))
      .fewerElementsIf(
        [=](const LegalityQuery &Query) { return notValidElt(Query, 1); },
        scalarize(1))
      .clampScalar(BigTyIdx, S32, S512)
      .widenScalarIf(
        [=](const LegalityQuery &Query) {
          const LLT &Ty = Query.Types[BigTyIdx];
          return !isPowerOf2_32(Ty.getSizeInBits()) &&
                 Ty.getSizeInBits() % 16 != 0;
        },
        [=](const LegalityQuery &Query) {
          // Pick the next power of 2, or a multiple of 64 over 128.
          // Whichever is smaller.
          const LLT &Ty = Query.Types[BigTyIdx];
          unsigned NewSizeInBits = 1 << Log2_32_Ceil(Ty.getSizeInBits() + 1);
          if (NewSizeInBits >= 256) {
            unsigned RoundedTo = alignTo<64>(Ty.getSizeInBits() + 1);
            if (RoundedTo < NewSizeInBits)
              NewSizeInBits = RoundedTo;
          }
          return std::make_pair(BigTyIdx, LLT::scalar(NewSizeInBits));
        })
      .legalIf([=](const LegalityQuery &Query) {
          const LLT &BigTy = Query.Types[BigTyIdx];
          const LLT &LitTy = Query.Types[LitTyIdx];

          if (BigTy.isVector() && BigTy.getSizeInBits() < 32)
            return false;
          if (LitTy.isVector() && LitTy.getSizeInBits() < 32)
            return false;

          return BigTy.getSizeInBits() % 16 == 0 &&
                 LitTy.getSizeInBits() % 16 == 0 &&
                 BigTy.getSizeInBits() <= 512;
        })
      // Any vectors left are the wrong size. Scalarize them.
      .scalarize(0)
      .scalarize(1);
  }

  computeTables();
  verify(*ST.getInstrInfo());
}
Example #30
0
bool AMDGPULegalizerInfo::legalizeAddrSpaceCast(
  MachineInstr &MI, MachineRegisterInfo &MRI,
  MachineIRBuilder &MIRBuilder) const {
  MachineFunction &MF = MIRBuilder.getMF();

  MIRBuilder.setInstr(MI);

  unsigned Dst = MI.getOperand(0).getReg();
  unsigned Src = MI.getOperand(1).getReg();

  LLT DstTy = MRI.getType(Dst);
  LLT SrcTy = MRI.getType(Src);
  unsigned DestAS = DstTy.getAddressSpace();
  unsigned SrcAS = SrcTy.getAddressSpace();

  // TODO: Avoid reloading from the queue ptr for each cast, or at least each
  // vector element.
  assert(!DstTy.isVector());

  const AMDGPUTargetMachine &TM
    = static_cast<const AMDGPUTargetMachine &>(MF.getTarget());

  const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
  if (ST.getTargetLowering()->isNoopAddrSpaceCast(SrcAS, DestAS)) {
    MI.setDesc(MIRBuilder.getTII().get(TargetOpcode::G_BITCAST));
    return true;
  }

  if (SrcAS == AMDGPUAS::FLAT_ADDRESS) {
    assert(DestAS == AMDGPUAS::LOCAL_ADDRESS ||
           DestAS == AMDGPUAS::PRIVATE_ADDRESS);
    unsigned NullVal = TM.getNullPointerValue(DestAS);

    auto SegmentNull = MIRBuilder.buildConstant(DstTy, NullVal);
    auto FlatNull = MIRBuilder.buildConstant(SrcTy, 0);

    unsigned PtrLo32 = MRI.createGenericVirtualRegister(DstTy);

    // Extract low 32-bits of the pointer.
    MIRBuilder.buildExtract(PtrLo32, Src, 0);

    unsigned CmpRes = MRI.createGenericVirtualRegister(LLT::scalar(1));
    MIRBuilder.buildICmp(CmpInst::ICMP_NE, CmpRes, Src, FlatNull.getReg(0));
    MIRBuilder.buildSelect(Dst, CmpRes, PtrLo32, SegmentNull.getReg(0));

    MI.eraseFromParent();
    return true;
  }

  assert(SrcAS == AMDGPUAS::LOCAL_ADDRESS ||
         SrcAS == AMDGPUAS::PRIVATE_ADDRESS);

  auto SegmentNull =
      MIRBuilder.buildConstant(SrcTy, TM.getNullPointerValue(SrcAS));
  auto FlatNull =
      MIRBuilder.buildConstant(DstTy, TM.getNullPointerValue(DestAS));

  unsigned ApertureReg = getSegmentAperture(DestAS, MRI, MIRBuilder);

  unsigned CmpRes = MRI.createGenericVirtualRegister(LLT::scalar(1));
  MIRBuilder.buildICmp(CmpInst::ICMP_NE, CmpRes, Src, SegmentNull.getReg(0));

  unsigned BuildPtr = MRI.createGenericVirtualRegister(DstTy);

  // Coerce the type of the low half of the result so we can use merge_values.
  unsigned SrcAsInt = MRI.createGenericVirtualRegister(LLT::scalar(32));
  MIRBuilder.buildInstr(TargetOpcode::G_PTRTOINT)
    .addDef(SrcAsInt)
    .addUse(Src);

  // TODO: Should we allow mismatched types but matching sizes in merges to
  // avoid the ptrtoint?
  MIRBuilder.buildMerge(BuildPtr, {SrcAsInt, ApertureReg});
  MIRBuilder.buildSelect(Dst, CmpRes, BuildPtr, FlatNull.getReg(0));

  MI.eraseFromParent();
  return true;
}