Beispiel #1
0
static unsigned getFramePointerReg(const ARMSubtarget &STI) {
  if (STI.isTargetMachO())
    return ARM::R7;
  else if (STI.isTargetWindows())
    return ARM::R11;
  else // ARM EABI
    return STI.isThumb() ? ARM::R7 : ARM::R11;
}
Beispiel #2
0
static std::string computeDataLayout(ARMSubtarget &ST) {
  std::string Ret = "";

  if (ST.isLittle())
    // Little endian.
    Ret += "e";
  else
    // Big endian.
    Ret += "E";

  Ret += DataLayout::getManglingComponent(ST.getTargetTriple());

  // Pointers are 32 bits and aligned to 32 bits.
  Ret += "-p:32:32";

  // On thumb, i16,i18 and i1 have natural aligment requirements, but we try to
  // align to 32.
  if (ST.isThumb())
    Ret += "-i1:8:32-i8:8:32-i16:16:32";

  // ABIs other than APCS have 64 bit integers with natural alignment.
  if (!ST.isAPCS_ABI())
    Ret += "-i64:64";

  // We have 64 bits floats. The APCS ABI requires them to be aligned to 32
  // bits, others to 64 bits. We always try to align to 64 bits.
  if (ST.isAPCS_ABI())
    Ret += "-f64:32:64";

  // We have 128 and 64 bit vectors. The APCS ABI aligns them to 32 bits, others
  // to 64. We always ty to give them natural alignment.
  if (ST.isAPCS_ABI())
    Ret += "-v64:32:64-v128:32:128";
  else
    Ret += "-v128:64:128";

  // On thumb and APCS, only try to align aggregates to 32 bits (the default is
  // 64 bits).
  if (ST.isThumb() || ST.isAPCS_ABI())
    Ret += "-a:0:32";

  // Integer registers are 32 bits.
  Ret += "-n32";

  // The stack is 128 bit aligned on NaCl, 64 bit aligned on AAPCS and 32 bit
  // aligned everywhere else.
  if (ST.isTargetNaCl())
    Ret += "-S128";
  else if (ST.isAAPCS_ABI())
    Ret += "-S64";
  else
    Ret += "-S32";

  return Ret;
}
ARMInstructionSelector::ARMInstructionSelector(const ARMBaseTargetMachine &TM,
                                               const ARMSubtarget &STI,
                                               const ARMRegisterBankInfo &RBI)
    : InstructionSelector(), TII(*STI.getInstrInfo()),
      TRI(*STI.getRegisterInfo()), TM(TM), RBI(RBI), STI(STI), Opcodes(STI),
#define GET_GLOBALISEL_PREDICATES_INIT
#include "ARMGenGlobalISel.inc"
#undef GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
#include "ARMGenGlobalISel.inc"
#undef GET_GLOBALISEL_TEMPORARIES_INIT
{
}
ARMInstructionSelector::OpcodeCache::OpcodeCache(const ARMSubtarget &STI) {
  bool isThumb = STI.isThumb();

  using namespace TargetOpcode;

#define STORE_OPCODE(VAR, OPC) VAR = isThumb ? ARM::t2##OPC : ARM::OPC
  STORE_OPCODE(SEXT16, SXTH);
  STORE_OPCODE(ZEXT16, UXTH);

  STORE_OPCODE(SEXT8, SXTB);
  STORE_OPCODE(ZEXT8, UXTB);

  STORE_OPCODE(AND, ANDri);
  STORE_OPCODE(RSB, RSBri);

  STORE_OPCODE(STORE32, STRi12);
  STORE_OPCODE(LOAD32, LDRi12);

  // LDRH/STRH are special...
  STORE16 = isThumb ? ARM::t2STRHi12 : ARM::STRH;
  LOAD16 = isThumb ? ARM::t2LDRHi12 : ARM::LDRH;

  STORE_OPCODE(STORE8, STRBi12);
  STORE_OPCODE(LOAD8, LDRBi12);
#undef MAP_OPCODE
}
Beispiel #5
0
ARMLegalizerInfo::ARMLegalizerInfo(const ARMSubtarget &ST) {
  using namespace TargetOpcode;

  const LLT p0 = LLT::pointer(0, 32);

  const LLT s1 = LLT::scalar(1);
  const LLT s8 = LLT::scalar(8);
  const LLT s16 = LLT::scalar(16);
  const LLT s32 = LLT::scalar(32);
  const LLT s64 = LLT::scalar(64);

  setAction({G_FRAME_INDEX, p0}, Legal);

  for (unsigned Op : {G_LOAD, G_STORE}) {
    for (auto Ty : {s1, s8, s16, s32, p0})
      setAction({Op, Ty}, Legal);
    setAction({Op, 1, p0}, Legal);
  }

  for (unsigned Op : {G_ADD, G_SUB, G_MUL})
    for (auto Ty : {s1, s8, s16, s32})
      setAction({Op, Ty}, Legal);

  for (unsigned Op : {G_SDIV, G_UDIV}) {
    for (auto Ty : {s8, s16})
      // FIXME: We need WidenScalar here, but in the case of targets with
      // software division we'll also need Libcall afterwards. Treat as Custom
      // until we have better support for chaining legalization actions.
      setAction({Op, Ty}, Custom);
    if (ST.hasDivideInARMMode())
      setAction({Op, s32}, Legal);
    else
      setAction({Op, s32}, Libcall);
  }

  for (unsigned Op : {G_SEXT, G_ZEXT}) {
    setAction({Op, s32}, Legal);
    for (auto Ty : {s1, s8, s16})
      setAction({Op, 1, Ty}, Legal);
  }

  setAction({G_GEP, p0}, Legal);
  setAction({G_GEP, 1, s32}, Legal);

  setAction({G_CONSTANT, s32}, Legal);

  if (!ST.useSoftFloat() && ST.hasVFP2()) {
    setAction({G_FADD, s32}, Legal);
    setAction({G_FADD, s64}, Legal);

    setAction({G_LOAD, s64}, Legal);
    setAction({G_STORE, s64}, Legal);
  } else {
    for (auto Ty : {s32, s64})
      setAction({G_FADD, Ty}, Libcall);
  }

  for (unsigned Op : {G_FREM, G_FPOW})
    for (auto Ty : {s32, s64})
      setAction({Op, Ty}, Libcall);

  computeTables();
}
static unsigned getFramePointerReg(const ARMSubtarget &STI) {
  return STI.useR7AsFramePointer() ? ARM::R7 : ARM::R11;
}
Beispiel #7
0
ARMLegalizerInfo::ARMLegalizerInfo(const ARMSubtarget &ST) {
  using namespace TargetOpcode;

  const LLT p0 = LLT::pointer(0, 32);

  const LLT s1 = LLT::scalar(1);
  const LLT s8 = LLT::scalar(8);
  const LLT s16 = LLT::scalar(16);
  const LLT s32 = LLT::scalar(32);
  const LLT s64 = LLT::scalar(64);

  setAction({G_GLOBAL_VALUE, p0}, Legal);
  setAction({G_FRAME_INDEX, p0}, Legal);

  for (unsigned Op : {G_LOAD, G_STORE}) {
    for (auto Ty : {s1, s8, s16, s32, p0})
      setAction({Op, Ty}, Legal);
    setAction({Op, 1, p0}, Legal);
  }

  for (unsigned Op : {G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR}) {
    for (auto Ty : {s1, s8, s16})
      setAction({Op, Ty}, WidenScalar);
    setAction({Op, s32}, Legal);
  }

  for (unsigned Op : {G_SDIV, G_UDIV}) {
    for (auto Ty : {s8, s16})
      setAction({Op, Ty}, WidenScalar);
    if (ST.hasDivideInARMMode())
      setAction({Op, s32}, Legal);
    else
      setAction({Op, s32}, Libcall);
  }

  for (unsigned Op : {G_SREM, G_UREM}) {
    for (auto Ty : {s8, s16})
      setAction({Op, Ty}, WidenScalar);
    if (ST.hasDivideInARMMode())
      setAction({Op, s32}, Lower);
    else if (AEABI(ST))
      setAction({Op, s32}, Custom);
    else
      setAction({Op, s32}, Libcall);
  }

  for (unsigned Op : {G_SEXT, G_ZEXT}) {
    setAction({Op, s32}, Legal);
    for (auto Ty : {s1, s8, s16})
      setAction({Op, 1, Ty}, Legal);
  }

  for (unsigned Op : {G_ASHR, G_LSHR, G_SHL})
    setAction({Op, s32}, Legal);

  setAction({G_GEP, p0}, Legal);
  setAction({G_GEP, 1, s32}, Legal);

  setAction({G_SELECT, s32}, Legal);
  setAction({G_SELECT, p0}, Legal);
  setAction({G_SELECT, 1, s1}, Legal);

  setAction({G_BRCOND, s1}, Legal);

  setAction({G_CONSTANT, s32}, Legal);
  for (auto Ty : {s1, s8, s16})
    setAction({G_CONSTANT, Ty}, WidenScalar);

  setAction({G_ICMP, s1}, Legal);
  for (auto Ty : {s8, s16})
    setAction({G_ICMP, 1, Ty}, WidenScalar);
  for (auto Ty : {s32, p0})
    setAction({G_ICMP, 1, Ty}, Legal);

  if (!ST.useSoftFloat() && ST.hasVFP2()) {
    setAction({G_FADD, s32}, Legal);
    setAction({G_FADD, s64}, Legal);

    setAction({G_LOAD, s64}, Legal);
    setAction({G_STORE, s64}, Legal);

    setAction({G_FCMP, s1}, Legal);
    setAction({G_FCMP, 1, s32}, Legal);
    setAction({G_FCMP, 1, s64}, Legal);
  } else {
    for (auto Ty : {s32, s64})
      setAction({G_FADD, Ty}, Libcall);

    setAction({G_FCMP, s1}, Legal);
    setAction({G_FCMP, 1, s32}, Custom);
    setAction({G_FCMP, 1, s64}, Custom);

    if (AEABI(ST))
      setFCmpLibcallsAEABI();
    else
      setFCmpLibcallsGNU();
  }

  for (unsigned Op : {G_FREM, G_FPOW})
    for (auto Ty : {s32, s64})
      setAction({Op, Ty}, Libcall);

  computeTables();
}
Beispiel #8
0
static bool AEABI(const ARMSubtarget &ST) {
  return ST.isTargetAEABI() || ST.isTargetGNUAEABI() || ST.isTargetMuslAEABI();
}