Example #1
0
std::string ARM_MC::ParseARMTriple(const Triple &TT, StringRef CPU) {
  bool isThumb =
      TT.getArch() == Triple::thumb || TT.getArch() == Triple::thumbeb;

  std::string ARMArchFeature;

  unsigned ArchID = ARM::parseArch(TT.getArchName());
  if (ArchID != ARM::AK_INVALID &&  (CPU.empty() || CPU == "generic"))
    ARMArchFeature = (ARMArchFeature + "+" + ARM::getArchName(ArchID)).str();

  if (isThumb) {
    if (ARMArchFeature.empty())
      ARMArchFeature = "+thumb-mode";
    else
      ARMArchFeature += ",+thumb-mode";
  }

  if (TT.isOSNaCl()) {
    if (ARMArchFeature.empty())
      ARMArchFeature = "+nacl-trap";
    else
      ARMArchFeature += ",+nacl-trap";
  }

  return ARMArchFeature;
}
X86ELFMCAsmInfo::X86ELFMCAsmInfo(const Triple &T) {
  bool is64Bit = T.getArch() == Triple::x86_64;
  bool isX32 = T.getEnvironment() == Triple::GNUX32;

  // @LOCALMOD-BEGIN(eliben)
  // Until Nacl implies x32, we add &&!isNaCl in the PointerSize condition
  bool isNaCl = T.isOSNaCl();

  // For ELF, x86-64 pointer size depends on the ABI.
  // For x86-64 without the x32 ABI, pointer size is 8. For x86 and for x86-64
  // with the x32 ABI, pointer size remains the default 4.
  PointerSize = (is64Bit && !isX32 && !isNaCl) ? 8 : 4;
  // @LOCALMOD-END

  // OTOH, stack slot size is always 8 for x86-64, even with the x32 ABI.
  CalleeSaveStackSlotSize = is64Bit ? 8 : 4;

  AssemblerDialect = AsmWriterFlavor;

  TextAlignFillValue = 0x90;

  // Debug Information
  SupportsDebugInformation = true;

  // Exceptions handling
  ExceptionsType = ExceptionHandling::DwarfCFI;

  // Always enable the integrated assembler by default.
  // Clang also enabled it when the OS is Solaris but that is redundant here.
  UseIntegratedAssembler = true;
}
Example #3
0
static std::string computeDataLayout(const Triple &TT) {
  // X86 is little endian
  std::string Ret = "e";

  Ret += DataLayout::getManglingComponent(TT);
  // X86 and x32 have 32 bit pointers.
  if ((TT.isArch64Bit() &&
       (TT.getEnvironment() == Triple::GNUX32 || TT.isOSNaCl())) ||
      !TT.isArch64Bit())
    Ret += "-p:32:32";

  // Some ABIs align 64 bit integers and doubles to 64 bits, others to 32.
  if (TT.isArch64Bit() || TT.isOSWindows() || TT.isOSNaCl())
    Ret += "-i64:64";
  else if (TT.isOSIAMCU())
    Ret += "-i64:32-f64:32";
  else
    Ret += "-f64:32:64";

  // Some ABIs align long double to 128 bits, others to 32.
  if (TT.isOSNaCl() || TT.isOSIAMCU())
    ; // No f80
  else if (TT.isArch64Bit() || TT.isOSDarwin())
    Ret += "-f80:128";
  else
    Ret += "-f80:32";

  if (TT.isOSIAMCU())
    Ret += "-f128:32";

  // The registers can hold 8, 16, 32 or, in x86-64, 64 bits.
  if (TT.isArch64Bit())
    Ret += "-n8:16:32:64";
  else
    Ret += "-n8:16:32";

  // The stack is aligned to 32 bits on some ABIs and 128 bits on others.
  if ((!TT.isArch64Bit() && TT.isOSWindows()) || TT.isOSIAMCU())
    Ret += "-a:0:32-S32";
  else
    Ret += "-S128";

  return Ret;
}
Example #4
0
static std::string computeDataLayout(const Triple &TT, StringRef CPU,
                                     const TargetOptions &Options,
                                     bool isLittle) {
  auto ABI = computeTargetABI(TT, CPU, Options);
  std::string Ret;

  if (isLittle)
    // Little endian.
    Ret += "e";
  else
    // Big endian.
    Ret += "E";

  Ret += DataLayout::getManglingComponent(TT);

  // Pointers are 32 bits and aligned to 32 bits.
  Ret += "-p:32:32";

  // Function pointers are aligned to 8 bits (because the LSB stores the
  // ARM/Thumb state).
  Ret += "-Fi8";

  // ABIs other than APCS have 64 bit integers with natural alignment.
  if (ABI != ARMBaseTargetMachine::ARM_ABI_APCS)
    Ret += "-i64:64";

  // We have 64 bits floats. The APCS ABI requires them to be aligned to 32
  // bits, others to 64 bits. We always try to align to 64 bits.
  if (ABI == ARMBaseTargetMachine::ARM_ABI_APCS)
    Ret += "-f64:32:64";

  // We have 128 and 64 bit vectors. The APCS ABI aligns them to 32 bits, others
  // to 64. We always ty to give them natural alignment.
  if (ABI == ARMBaseTargetMachine::ARM_ABI_APCS)
    Ret += "-v64:32:64-v128:32:128";
  else if (ABI != ARMBaseTargetMachine::ARM_ABI_AAPCS16)
    Ret += "-v128:64:128";

  // Try to align aggregates to 32 bits (the default is 64 bits, which has no
  // particular hardware support on 32-bit ARM).
  Ret += "-a:0:32";

  // Integer registers are 32 bits.
  Ret += "-n32";

  // The stack is 128 bit aligned on NaCl, 64 bit aligned on AAPCS and 32 bit
  // aligned everywhere else.
  if (TT.isOSNaCl() || ABI == ARMBaseTargetMachine::ARM_ABI_AAPCS16)
    Ret += "-S128";
  else if (ABI == ARMBaseTargetMachine::ARM_ABI_AAPCS)
    Ret += "-S64";
  else
    Ret += "-S32";

  return Ret;
}
Example #5
0
static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
  if (TT.isOSBinFormatMachO()) {
    if (TT.getArch() == Triple::x86_64)
      return make_unique<X86_64MachoTargetObjectFile>();
    return make_unique<TargetLoweringObjectFileMachO>();
  }

  if (TT.isOSLinux() || TT.isOSNaCl())
    return make_unique<X86LinuxNaClTargetObjectFile>();
  if (TT.isOSBinFormatELF())
    return make_unique<X86ELFTargetObjectFile>();
  if (TT.isKnownWindowsMSVCEnvironment() || TT.isWindowsCoreCLREnvironment())
    return make_unique<X86WindowsTargetObjectFile>();
  if (TT.isOSBinFormatCOFF())
    return make_unique<TargetLoweringObjectFileCOFF>();
  llvm_unreachable("unknown subtarget type");
}
Example #6
0
static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
  if (TT.isOSBinFormatMachO()) {
    if (TT.getArch() == Triple::x86_64)
      return llvm::make_unique<X86_64MachoTargetObjectFile>();
    return llvm::make_unique<TargetLoweringObjectFileMachO>();
  }

  if (TT.isOSFreeBSD())
    return llvm::make_unique<X86FreeBSDTargetObjectFile>();
  if (TT.isOSLinux() || TT.isOSNaCl() || TT.isOSIAMCU())
    return llvm::make_unique<X86LinuxNaClTargetObjectFile>();
  if (TT.isOSSolaris())
    return llvm::make_unique<X86SolarisTargetObjectFile>();
  if (TT.isOSFuchsia())
    return llvm::make_unique<X86FuchsiaTargetObjectFile>();
  if (TT.isOSBinFormatELF())
    return llvm::make_unique<X86ELFTargetObjectFile>();
  if (TT.isOSBinFormatCOFF())
    return llvm::make_unique<TargetLoweringObjectFileCOFF>();
  llvm_unreachable("unknown subtarget type");
}
Example #7
0
std::string ARM_MC::ParseARMTriple(const Triple &TT, StringRef CPU) {
  bool isThumb =
      TT.getArch() == Triple::thumb || TT.getArch() == Triple::thumbeb;

  bool NoCPU = CPU == "generic" || CPU.empty();
  std::string ARMArchFeature;
  switch (TT.getSubArch()) {
  default:
    llvm_unreachable("invalid sub-architecture for ARM");
  case Triple::ARMSubArch_v8:
    if (NoCPU)
      // v8a: FeatureDB, FeatureFPARMv8, FeatureNEON, FeatureDSPThumb2,
      //      FeatureMP, FeatureHWDiv, FeatureHWDivARM, FeatureTrustZone,
      //      FeatureT2XtPk, FeatureCrypto, FeatureCRC
      ARMArchFeature = "+v8,+db,+fp-armv8,+neon,+t2dsp,+mp,+hwdiv,+hwdiv-arm,"
                       "+trustzone,+t2xtpk,+crypto,+crc";
    else
      // Use CPU to figure out the exact features
      ARMArchFeature = "+v8";
    break;
  case Triple::ARMSubArch_v8_1a:
    if (NoCPU)
      // v8.1a: FeatureDB, FeatureFPARMv8, FeatureNEON, FeatureDSPThumb2,
      //      FeatureMP, FeatureHWDiv, FeatureHWDivARM, FeatureTrustZone,
      //      FeatureT2XtPk, FeatureCrypto, FeatureCRC, FeatureV8_1a
      ARMArchFeature = "+v8.1a,+db,+fp-armv8,+neon,+t2dsp,+mp,+hwdiv,+hwdiv-arm,"
                       "+trustzone,+t2xtpk,+crypto,+crc";
    else
      // Use CPU to figure out the exact features
      ARMArchFeature = "+v8.1a";
    break;
  case Triple::ARMSubArch_v7m:
    isThumb = true;
    if (NoCPU)
      // v7m: FeatureNoARM, FeatureDB, FeatureHWDiv, FeatureMClass
      ARMArchFeature = "+v7,+noarm,+db,+hwdiv,+mclass";
    else
      // Use CPU to figure out the exact features.
      ARMArchFeature = "+v7";
    break;
  case Triple::ARMSubArch_v7em:
    if (NoCPU)
      // v7em: FeatureNoARM, FeatureDB, FeatureHWDiv, FeatureDSPThumb2,
      //       FeatureT2XtPk, FeatureMClass
      ARMArchFeature = "+v7,+noarm,+db,+hwdiv,+t2dsp,+t2xtpk,+mclass";
    else
      // Use CPU to figure out the exact features.
      ARMArchFeature = "+v7";
    break;
  case Triple::ARMSubArch_v7s:
    if (NoCPU)
      // v7s: FeatureNEON, FeatureDB, FeatureDSPThumb2, FeatureHasRAS
      //      Swift
      ARMArchFeature = "+v7,+swift,+neon,+db,+t2dsp,+ras";
    else
      // Use CPU to figure out the exact features.
      ARMArchFeature = "+v7";
    break;
  case Triple::ARMSubArch_v7:
    // v7 CPUs have lots of different feature sets. If no CPU is specified,
    // then assume v7a (e.g. cortex-a8) feature set. Otherwise, return
    // the "minimum" feature set and use CPU string to figure out the exact
    // features.
    if (NoCPU)
      // v7a: FeatureNEON, FeatureDB, FeatureDSPThumb2, FeatureT2XtPk
      ARMArchFeature = "+v7,+neon,+db,+t2dsp,+t2xtpk";
    else
      // Use CPU to figure out the exact features.
      ARMArchFeature = "+v7";
    break;
  case Triple::ARMSubArch_v6t2:
    ARMArchFeature = "+v6t2";
    break;
  case Triple::ARMSubArch_v6k:
    ARMArchFeature = "+v6k";
    break;
  case Triple::ARMSubArch_v6m:
    isThumb = true;
    if (NoCPU)
      // v6m: FeatureNoARM, FeatureMClass
      ARMArchFeature = "+v6m,+noarm,+mclass";
    else
      ARMArchFeature = "+v6";
    break;
  case Triple::ARMSubArch_v6:
    ARMArchFeature = "+v6";
    break;
  case Triple::ARMSubArch_v5te:
    ARMArchFeature = "+v5te";
    break;
  case Triple::ARMSubArch_v5:
    ARMArchFeature = "+v5t";
    break;
  case Triple::ARMSubArch_v4t:
    ARMArchFeature = "+v4t";
    break;
  case Triple::NoSubArch:
    break;
  }

  if (isThumb) {
    if (ARMArchFeature.empty())
      ARMArchFeature = "+thumb-mode";
    else
      ARMArchFeature += ",+thumb-mode";
  }

  if (TT.isOSNaCl()) {
    if (ARMArchFeature.empty())
      ARMArchFeature = "+nacl-trap";
    else
      ARMArchFeature += ",+nacl-trap";
  }

  return ARMArchFeature;
}