Ejemplo n.º 1
0
void arm::getARMTargetFeatures(const ToolChain &TC,
                               const llvm::Triple &Triple,
                               const ArgList &Args,
                               ArgStringList &CmdArgs,
                               std::vector<StringRef> &Features,
                               bool ForAS) {
  const Driver &D = TC.getDriver();

  bool KernelOrKext =
      Args.hasArg(options::OPT_mkernel, options::OPT_fapple_kext);
  arm::FloatABI ABI = arm::getARMFloatABI(TC, Args);
  const Arg *WaCPU = nullptr, *WaFPU = nullptr;
  const Arg *WaHDiv = nullptr, *WaArch = nullptr;

  if (!ForAS) {
    // FIXME: Note, this is a hack, the LLVM backend doesn't actually use these
    // yet (it uses the -mfloat-abi and -msoft-float options), and it is
    // stripped out by the ARM target. We should probably pass this a new
    // -target-option, which is handled by the -cc1/-cc1as invocation.
    //
    // FIXME2:  For consistency, it would be ideal if we set up the target
    // machine state the same when using the frontend or the assembler. We don't
    // currently do that for the assembler, we pass the options directly to the
    // backend and never even instantiate the frontend TargetInfo. If we did,
    // and used its handleTargetFeatures hook, then we could ensure the
    // assembler and the frontend behave the same.

    // Use software floating point operations?
    if (ABI == arm::FloatABI::Soft)
      Features.push_back("+soft-float");

    // Use software floating point argument passing?
    if (ABI != arm::FloatABI::Hard)
      Features.push_back("+soft-float-abi");
  } else {
    // Here, we make sure that -Wa,-mfpu/cpu/arch/hwdiv will be passed down
    // to the assembler correctly.
    for (const Arg *A :
         Args.filtered(options::OPT_Wa_COMMA, options::OPT_Xassembler)) {
      StringRef Value = A->getValue();
      if (Value.startswith("-mfpu=")) {
        WaFPU = A;
      } else if (Value.startswith("-mcpu=")) {
        WaCPU = A;
      } else if (Value.startswith("-mhwdiv=")) {
        WaHDiv = A;
      } else if (Value.startswith("-march=")) {
        WaArch = A;
      }
    }
  }

  // Check -march. ClangAs gives preference to -Wa,-march=.
  const Arg *ArchArg = Args.getLastArg(options::OPT_march_EQ);
  StringRef ArchName;
  if (WaArch) {
    if (ArchArg)
      D.Diag(clang::diag::warn_drv_unused_argument)
          << ArchArg->getAsString(Args);
    ArchName = StringRef(WaArch->getValue()).substr(7);
    checkARMArchName(D, WaArch, Args, ArchName, Features, Triple);
    // FIXME: Set Arch.
    D.Diag(clang::diag::warn_drv_unused_argument) << WaArch->getAsString(Args);
  } else if (ArchArg) {
    ArchName = ArchArg->getValue();
    checkARMArchName(D, ArchArg, Args, ArchName, Features, Triple);
  }

  // Check -mcpu. ClangAs gives preference to -Wa,-mcpu=.
  const Arg *CPUArg = Args.getLastArg(options::OPT_mcpu_EQ);
  StringRef CPUName;
  if (WaCPU) {
    if (CPUArg)
      D.Diag(clang::diag::warn_drv_unused_argument)
          << CPUArg->getAsString(Args);
    CPUName = StringRef(WaCPU->getValue()).substr(6);
    checkARMCPUName(D, WaCPU, Args, CPUName, ArchName, Features, Triple);
  } else if (CPUArg) {
    CPUName = CPUArg->getValue();
    checkARMCPUName(D, CPUArg, Args, CPUName, ArchName, Features, Triple);
  }

  // Add CPU features for generic CPUs
  if (CPUName == "native") {
    llvm::StringMap<bool> HostFeatures;
    if (llvm::sys::getHostCPUFeatures(HostFeatures))
      for (auto &F : HostFeatures)
        Features.push_back(
            Args.MakeArgString((F.second ? "+" : "-") + F.first()));
  } else if (!CPUName.empty()) {
    DecodeARMFeaturesFromCPU(D, CPUName, Features);
  }

  // Honor -mfpu=. ClangAs gives preference to -Wa,-mfpu=.
  const Arg *FPUArg = Args.getLastArg(options::OPT_mfpu_EQ);
  if (WaFPU) {
    if (FPUArg)
      D.Diag(clang::diag::warn_drv_unused_argument)
          << FPUArg->getAsString(Args);
    getARMFPUFeatures(D, WaFPU, Args, StringRef(WaFPU->getValue()).substr(6),
                      Features);
  } else if (FPUArg) {
    getARMFPUFeatures(D, FPUArg, Args, FPUArg->getValue(), Features);
  }

  // Honor -mhwdiv=. ClangAs gives preference to -Wa,-mhwdiv=.
  const Arg *HDivArg = Args.getLastArg(options::OPT_mhwdiv_EQ);
  if (WaHDiv) {
    if (HDivArg)
      D.Diag(clang::diag::warn_drv_unused_argument)
          << HDivArg->getAsString(Args);
    getARMHWDivFeatures(D, WaHDiv, Args,
                        StringRef(WaHDiv->getValue()).substr(8), Features);
  } else if (HDivArg)
    getARMHWDivFeatures(D, HDivArg, Args, HDivArg->getValue(), Features);

  // Setting -msoft-float effectively disables NEON because of the GCC
  // implementation, although the same isn't true of VFP or VFP3.
  if (ABI == arm::FloatABI::Soft) {
    Features.push_back("-neon");
    // Also need to explicitly disable features which imply NEON.
    Features.push_back("-crypto");
  }

  // En/disable crc code generation.
  if (Arg *A = Args.getLastArg(options::OPT_mcrc, options::OPT_mnocrc)) {
    if (A->getOption().matches(options::OPT_mcrc))
      Features.push_back("+crc");
    else
      Features.push_back("-crc");
  }

  // Look for the last occurrence of -mlong-calls or -mno-long-calls. If
  // neither options are specified, see if we are compiling for kernel/kext and
  // decide whether to pass "+long-calls" based on the OS and its version.
  if (Arg *A = Args.getLastArg(options::OPT_mlong_calls,
                               options::OPT_mno_long_calls)) {
    if (A->getOption().matches(options::OPT_mlong_calls))
      Features.push_back("+long-calls");
  } else if (KernelOrKext && (!Triple.isiOS() || Triple.isOSVersionLT(6)) &&
             !Triple.isWatchOS()) {
      Features.push_back("+long-calls");
  }

  // Generate execute-only output (no data access to code sections).
  // This only makes sense for the compiler, not for the assembler.
  if (!ForAS) {
    // Supported only on ARMv6T2 and ARMv7 and above.
    // Cannot be combined with -mno-movt or -mlong-calls
    if (Arg *A = Args.getLastArg(options::OPT_mexecute_only, options::OPT_mno_execute_only)) {
      if (A->getOption().matches(options::OPT_mexecute_only)) {
        if (getARMSubArchVersionNumber(Triple) < 7 &&
            llvm::ARM::parseArch(Triple.getArchName()) != llvm::ARM::ArchKind::ARMV6T2)
              D.Diag(diag::err_target_unsupported_execute_only) << Triple.getArchName();
        else if (Arg *B = Args.getLastArg(options::OPT_mno_movt))
          D.Diag(diag::err_opt_not_valid_with_opt) << A->getAsString(Args) << B->getAsString(Args);
        // Long calls create constant pool entries and have not yet been fixed up
        // to play nicely with execute-only. Hence, they cannot be used in
        // execute-only code for now
        else if (Arg *B = Args.getLastArg(options::OPT_mlong_calls, options::OPT_mno_long_calls)) {
          if (B->getOption().matches(options::OPT_mlong_calls))
            D.Diag(diag::err_opt_not_valid_with_opt) << A->getAsString(Args) << B->getAsString(Args);
        }
	Features.push_back("+execute-only");
      }
    }
  }

  // Kernel code has more strict alignment requirements.
  if (KernelOrKext)
    Features.push_back("+strict-align");
  else if (Arg *A = Args.getLastArg(options::OPT_mno_unaligned_access,
                                    options::OPT_munaligned_access)) {
    if (A->getOption().matches(options::OPT_munaligned_access)) {
      // No v6M core supports unaligned memory access (v6M ARM ARM A3.2).
      if (Triple.getSubArch() == llvm::Triple::SubArchType::ARMSubArch_v6m)
        D.Diag(diag::err_target_unsupported_unaligned) << "v6m";
      // v8M Baseline follows on from v6M, so doesn't support unaligned memory
      // access either.
      else if (Triple.getSubArch() == llvm::Triple::SubArchType::ARMSubArch_v8m_baseline)
        D.Diag(diag::err_target_unsupported_unaligned) << "v8m.base";
    } else
      Features.push_back("+strict-align");
  } else {
    // Assume pre-ARMv6 doesn't support unaligned accesses.
    //
    // ARMv6 may or may not support unaligned accesses depending on the
    // SCTLR.U bit, which is architecture-specific. We assume ARMv6
    // Darwin and NetBSD targets support unaligned accesses, and others don't.
    //
    // ARMv7 always has SCTLR.U set to 1, but it has a new SCTLR.A bit
    // which raises an alignment fault on unaligned accesses. Linux
    // defaults this bit to 0 and handles it as a system-wide (not
    // per-process) setting. It is therefore safe to assume that ARMv7+
    // Linux targets support unaligned accesses. The same goes for NaCl.
    //
    // The above behavior is consistent with GCC.
    int VersionNum = getARMSubArchVersionNumber(Triple);
    if (Triple.isOSDarwin() || Triple.isOSNetBSD()) {
      if (VersionNum < 6 ||
          Triple.getSubArch() == llvm::Triple::SubArchType::ARMSubArch_v6m)
        Features.push_back("+strict-align");
    } else if (Triple.isOSLinux() || Triple.isOSNaCl()) {
      if (VersionNum < 7)
        Features.push_back("+strict-align");
    } else
      Features.push_back("+strict-align");
  }

  // llvm does not support reserving registers in general. There is support
  // for reserving r9 on ARM though (defined as a platform-specific register
  // in ARM EABI).
  if (Args.hasArg(options::OPT_ffixed_r9))
    Features.push_back("+reserve-r9");

  // The kext linker doesn't know how to deal with movw/movt.
  if (KernelOrKext || Args.hasArg(options::OPT_mno_movt))
    Features.push_back("+no-movt");

  if (Args.hasArg(options::OPT_mno_neg_immediates))
    Features.push_back("+no-neg-immediates");
}