static void updateRuntimeLibraryPath(SearchPathOptions &SearchPathOpts, llvm::Triple &Triple) { llvm::SmallString<128> LibPath(SearchPathOpts.RuntimeResourcePath); llvm::sys::path::append(LibPath, getPlatformNameForTriple(Triple)); SearchPathOpts.RuntimeLibraryPath = LibPath.str(); // The linux provided triple for ARM contains a trailing 'l' // denoting little-endian. This is not used in the path for // libraries. LLVM matches these SubArchTypes to the generic // ARMSubArch_v7 (for example) type. If that is the case, // use the base of the architecture type in the library path. if (Triple.isOSLinux()) { switch(Triple.getSubArch()) { default: llvm::sys::path::append(LibPath, Triple.getArchName()); break; case llvm::Triple::SubArchType::ARMSubArch_v7: llvm::sys::path::append(LibPath, "armv7"); break; case llvm::Triple::SubArchType::ARMSubArch_v6: llvm::sys::path::append(LibPath, "armv6"); break; } } else { llvm::sys::path::append(LibPath, Triple.getArchName()); } SearchPathOpts.RuntimeLibraryImportPath = LibPath.str(); }
MyriadToolChain::MyriadToolChain(const Driver &D, const llvm::Triple &Triple, const ArgList &Args) : Generic_ELF(D, Triple, Args) { // If a target of 'sparc-myriad-elf' is specified to clang, it wants to use // 'sparc-myriad--elf' (note the unknown OS) as the canonical triple. // This won't work to find gcc. Instead we give the installation detector an // extra triple, which is preferable to further hacks of the logic that at // present is based solely on getArch(). In particular, it would be wrong to // choose the myriad installation when targeting a non-myriad sparc install. switch (Triple.getArch()) { default: D.Diag(clang::diag::err_target_unsupported_arch) << Triple.getArchName() << "myriad"; case llvm::Triple::sparc: case llvm::Triple::sparcel: case llvm::Triple::shave: GCCInstallation.init(Triple, Args, {"sparc-myriad-elf"}); } if (GCCInstallation.isValid()) { // This directory contains crt{i,n,begin,end}.o as well as libgcc. // These files are tied to a particular version of gcc. SmallString<128> CompilerSupportDir(GCCInstallation.getInstallPath()); addPathIfExists(D, CompilerSupportDir, getFilePaths()); } // libstd++ and libc++ must both be found in this one place. addPathIfExists(D, D.Dir + "/../sparc-myriad-elf/lib", getFilePaths()); }
static std::string normalizeTriple(llvm::Triple Triple) { SmallString<64> T; T += Triple.getArchName(); T += "-"; T += Triple.getOSName(); return T.str(); }
const std::string arm::getARMArch(StringRef Arch, const llvm::Triple &Triple) { std::string MArch; if (!Arch.empty()) MArch = Arch; else MArch = Triple.getArchName(); MArch = StringRef(MArch).split("+").first.lower(); // Handle -march=native. if (MArch == "native") { std::string CPU = llvm::sys::getHostCPUName(); if (CPU != "generic") { // Translate the native cpu into the architecture suffix for that CPU. StringRef Suffix = arm::getLLVMArchSuffixForARM(CPU, MArch, Triple); // If there is no valid architecture suffix for this CPU we don't know how // to handle it, so return no architecture. if (Suffix.empty()) MArch = ""; else MArch = std::string("arm") + Suffix.str(); } } return MArch; }
StringRef swift::getMajorArchitectureName(const llvm::Triple &Triple) { if (Triple.isOSLinux()) { switch(Triple.getSubArch()) { default: return Triple.getArchName(); break; case llvm::Triple::SubArchType::ARMSubArch_v7: return "armv7"; break; case llvm::Triple::SubArchType::ARMSubArch_v6: return "armv6"; break; } } else { return Triple.getArchName(); } }
/// Get the (LLVM) name of the Z80 cpu we are targeting. static StringRef getZ80TargetCPU(const ArgList &Args, const llvm::Triple &Triple) { if (const Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) return A->getValue(); // Select the default CPU if none was given (or detection failed). return Triple.getArchName(); }
// This function maps triples to the architecture component of the path // where the swift_begin.o and swift_end.o objects can be found. This // is a stop-gap until full Triple support (ala Clang) exists within swiftc. StringRef getSectionMagicArch(const llvm::Triple &Triple) { if (Triple.isOSLinux()) { switch(Triple.getSubArch()) { default: return Triple.getArchName(); break; case llvm::Triple::SubArchType::ARMSubArch_v7: return "armv7"; break; case llvm::Triple::SubArchType::ARMSubArch_v6: return "armv6"; break; } } else { return Triple.getArchName(); } }
static void updateRuntimeLibraryPath(SearchPathOptions &SearchPathOpts, llvm::Triple &Triple) { llvm::SmallString<128> LibPath(SearchPathOpts.RuntimeResourcePath); llvm::sys::path::append(LibPath, getPlatformNameForTriple(Triple)); SearchPathOpts.RuntimeLibraryPath = LibPath.str(); llvm::sys::path::append(LibPath, Triple.getArchName()); SearchPathOpts.RuntimeLibraryImportPath = LibPath.str(); }
static std::string getCPU(llvm::Triple const &triple) { switch (triple.getArch()) { case llvm::Triple::arm : case llvm::Triple::thumb : return "arm"; case llvm::Triple::ppc : case llvm::Triple::ppc64 : return "ppc"; case llvm::Triple::sparc : case llvm::Triple::sparcv9 : return "sparc"; case llvm::Triple::x86 : case llvm::Triple::x86_64 : return "x86"; default : return triple.getArchName().str(); } }
Statement* asmSemantic(AsmStatement *s, Scope *sc) { if (sc->func && sc->func->isSafe()) s->error("inline assembler not allowed in @safe function %s", sc->func->toChars()); bool err = false; llvm::Triple const t = global.params.targetTriple; if (!(t.getArch() == llvm::Triple::x86 || t.getArch() == llvm::Triple::x86_64)) { s->error("inline asm is not supported for the \"%s\" architecture", t.getArchName().str().c_str()); err = true; } if (!global.params.useInlineAsm) { s->error("inline asm is not allowed when the -noasm switch is used"); err = true; } if (err) fatal(); //puts(toChars()); sc->func->hasReturnExp |= 8; // empty statement -- still do the above things because they might be expected? if (!s->tokens) return s; if (!asmparser) { if (t.getArch() == llvm::Triple::x86) asmparser = new AsmParserx8632::AsmParser; else if (t.getArch() == llvm::Triple::x86_64) asmparser = new AsmParserx8664::AsmParser; } asmparser->run(sc, s); return s; }
static std::string getARMTargetCPU(const llvm::Triple &triple) { const char *result = llvm::StringSwitch<const char *>(triple.getArchName()) .Cases("armv2", "armv2a", "arm2") .Case("armv3", "arm6") .Case("armv3m", "arm7m") .Case("armv4", "strongarm") .Case("armv4t", "arm7tdmi") .Cases("armv5", "armv5t", "arm10tdmi") .Cases("armv5e", "armv5te", "arm1026ejs") .Case("armv5tej", "arm926ej-s") .Cases("armv6", "armv6k", "arm1136jf-s") .Case("armv6j", "arm1136j-s") .Cases("armv6z", "armv6zk", "arm1176jzf-s") .Case("armv6t2", "arm1156t2-s") .Cases("armv6m", "armv6-m", "cortex-m0") .Cases("armv7", "armv7a", "armv7-a", "cortex-a8") .Cases("armv7l", "armv7-l", "cortex-a8") .Cases("armv7f", "armv7-f", "cortex-a9-mp") .Cases("armv7s", "armv7-s", "swift") .Cases("armv7r", "armv7-r", "cortex-r4") .Cases("armv7m", "armv7-m", "cortex-m3") .Cases("armv7em", "armv7e-m", "cortex-m4") .Cases("armv8", "armv8a", "armv8-a", "cortex-a53") .Case("ep9312", "ep9312") .Case("iwmmxt", "iwmmxt") .Case("xscale", "xscale") // If all else failed, return the most base CPU with // thumb interworking // supported by LLVM. .Default(nullptr); if (result) { return result; } return (triple.getEnvironment() == llvm::Triple::GNUEABIHF) ? "arm1176jzf-s" : "arm7tdmi"; }
bool Driver::ShouldUseClangCompiler(const Compilation &C, const JobAction &JA, const llvm::Triple &Triple) const { // Check if user requested no clang, or clang doesn't understand this type (we // only handle single inputs for now). if (!CCCUseClang || JA.size() != 1 || !types::isAcceptedByClang((*JA.begin())->getType())) return false; // Otherwise make sure this is an action clang understands. if (isa<PreprocessJobAction>(JA)) { if (!CCCUseClangCPP) { Diag(clang::diag::warn_drv_not_using_clang_cpp); return false; } } else if (!isa<PrecompileJobAction>(JA) && !isa<CompileJobAction>(JA)) return false; // Use clang for C++? if (!CCCUseClangCXX && types::isCXX((*JA.begin())->getType())) { Diag(clang::diag::warn_drv_not_using_clang_cxx); return false; } // Always use clang for precompiling, AST generation, and rewriting, // regardless of archs. if (isa<PrecompileJobAction>(JA) || JA.getType() == types::TY_AST || JA.getType() == types::TY_RewrittenObjC) return true; // Finally, don't use clang if this isn't one of the user specified archs to // build. if (!CCCClangArchs.empty() && !CCCClangArchs.count(Triple.getArch())) { Diag(clang::diag::warn_drv_not_using_clang_arch) << Triple.getArchName(); return false; } return true; }
void x86::getX86TargetFeatures(const Driver &D, const llvm::Triple &Triple, const ArgList &Args, std::vector<StringRef> &Features) { // If -march=native, autodetect the feature list. if (const Arg *A = Args.getLastArg(clang::driver::options::OPT_march_EQ)) { if (StringRef(A->getValue()) == "native") { llvm::StringMap<bool> HostFeatures; if (llvm::sys::getHostCPUFeatures(HostFeatures)) for (auto &F : HostFeatures) Features.push_back( Args.MakeArgString((F.second ? "+" : "-") + F.first())); } } if (Triple.getArchName() == "x86_64h") { // x86_64h implies quite a few of the more modern subtarget features // for Haswell class CPUs, but not all of them. Opt-out of a few. Features.push_back("-rdrnd"); Features.push_back("-aes"); Features.push_back("-pclmul"); Features.push_back("-rtm"); Features.push_back("-fsgsbase"); } const llvm::Triple::ArchType ArchType = Triple.getArch(); // Add features to be compatible with gcc for Android. if (Triple.isAndroid()) { if (ArchType == llvm::Triple::x86_64) { Features.push_back("+sse4.2"); Features.push_back("+popcnt"); } else Features.push_back("+ssse3"); } // Set features according to the -arch flag on MSVC. if (Arg *A = Args.getLastArg(options::OPT__SLASH_arch)) { StringRef Arch = A->getValue(); bool ArchUsed = false; // First, look for flags that are shared in x86 and x86-64. if (ArchType == llvm::Triple::x86_64 || ArchType == llvm::Triple::x86) { if (Arch == "AVX" || Arch == "AVX2") { ArchUsed = true; Features.push_back(Args.MakeArgString("+" + Arch.lower())); } } // Then, look for x86-specific flags. if (ArchType == llvm::Triple::x86) { if (Arch == "IA32") { ArchUsed = true; } else if (Arch == "SSE" || Arch == "SSE2") { ArchUsed = true; Features.push_back(Args.MakeArgString("+" + Arch.lower())); } } if (!ArchUsed) D.Diag(clang::diag::warn_drv_unused_argument) << A->getAsString(Args); } // Now add any that the user explicitly requested on the command line, // which may override the defaults. handleTargetFeaturesGroup(Args, Features, options::OPT_m_x86_Features_Group); }
// True if M-profile. bool arm::isARMMProfile(const llvm::Triple &Triple) { llvm::StringRef Arch = Triple.getArchName(); return llvm::ARM::parseArchProfile(Arch) == llvm::ARM::ProfileKind::M; }
void arm::getARMTargetFeatures(const ToolChain &TC, const llvm::Triple &Triple, const ArgList &Args, ArgStringList &CmdArgs, std::vector<StringRef> &Features, bool ForAS) { const Driver &D = TC.getDriver(); bool KernelOrKext = Args.hasArg(options::OPT_mkernel, options::OPT_fapple_kext); arm::FloatABI ABI = arm::getARMFloatABI(TC, Args); const Arg *WaCPU = nullptr, *WaFPU = nullptr; const Arg *WaHDiv = nullptr, *WaArch = nullptr; if (!ForAS) { // FIXME: Note, this is a hack, the LLVM backend doesn't actually use these // yet (it uses the -mfloat-abi and -msoft-float options), and it is // stripped out by the ARM target. We should probably pass this a new // -target-option, which is handled by the -cc1/-cc1as invocation. // // FIXME2: For consistency, it would be ideal if we set up the target // machine state the same when using the frontend or the assembler. We don't // currently do that for the assembler, we pass the options directly to the // backend and never even instantiate the frontend TargetInfo. If we did, // and used its handleTargetFeatures hook, then we could ensure the // assembler and the frontend behave the same. // Use software floating point operations? if (ABI == arm::FloatABI::Soft) Features.push_back("+soft-float"); // Use software floating point argument passing? if (ABI != arm::FloatABI::Hard) Features.push_back("+soft-float-abi"); } else { // Here, we make sure that -Wa,-mfpu/cpu/arch/hwdiv will be passed down // to the assembler correctly. for (const Arg *A : Args.filtered(options::OPT_Wa_COMMA, options::OPT_Xassembler)) { StringRef Value = A->getValue(); if (Value.startswith("-mfpu=")) { WaFPU = A; } else if (Value.startswith("-mcpu=")) { WaCPU = A; } else if (Value.startswith("-mhwdiv=")) { WaHDiv = A; } else if (Value.startswith("-march=")) { WaArch = A; } } } // Check -march. ClangAs gives preference to -Wa,-march=. const Arg *ArchArg = Args.getLastArg(options::OPT_march_EQ); StringRef ArchName; if (WaArch) { if (ArchArg) D.Diag(clang::diag::warn_drv_unused_argument) << ArchArg->getAsString(Args); ArchName = StringRef(WaArch->getValue()).substr(7); checkARMArchName(D, WaArch, Args, ArchName, Features, Triple); // FIXME: Set Arch. D.Diag(clang::diag::warn_drv_unused_argument) << WaArch->getAsString(Args); } else if (ArchArg) { ArchName = ArchArg->getValue(); checkARMArchName(D, ArchArg, Args, ArchName, Features, Triple); } // Check -mcpu. ClangAs gives preference to -Wa,-mcpu=. const Arg *CPUArg = Args.getLastArg(options::OPT_mcpu_EQ); StringRef CPUName; if (WaCPU) { if (CPUArg) D.Diag(clang::diag::warn_drv_unused_argument) << CPUArg->getAsString(Args); CPUName = StringRef(WaCPU->getValue()).substr(6); checkARMCPUName(D, WaCPU, Args, CPUName, ArchName, Features, Triple); } else if (CPUArg) { CPUName = CPUArg->getValue(); checkARMCPUName(D, CPUArg, Args, CPUName, ArchName, Features, Triple); } // Add CPU features for generic CPUs if (CPUName == "native") { llvm::StringMap<bool> HostFeatures; if (llvm::sys::getHostCPUFeatures(HostFeatures)) for (auto &F : HostFeatures) Features.push_back( Args.MakeArgString((F.second ? "+" : "-") + F.first())); } else if (!CPUName.empty()) { DecodeARMFeaturesFromCPU(D, CPUName, Features); } // Honor -mfpu=. ClangAs gives preference to -Wa,-mfpu=. const Arg *FPUArg = Args.getLastArg(options::OPT_mfpu_EQ); if (WaFPU) { if (FPUArg) D.Diag(clang::diag::warn_drv_unused_argument) << FPUArg->getAsString(Args); getARMFPUFeatures(D, WaFPU, Args, StringRef(WaFPU->getValue()).substr(6), Features); } else if (FPUArg) { getARMFPUFeatures(D, FPUArg, Args, FPUArg->getValue(), Features); } // Honor -mhwdiv=. ClangAs gives preference to -Wa,-mhwdiv=. const Arg *HDivArg = Args.getLastArg(options::OPT_mhwdiv_EQ); if (WaHDiv) { if (HDivArg) D.Diag(clang::diag::warn_drv_unused_argument) << HDivArg->getAsString(Args); getARMHWDivFeatures(D, WaHDiv, Args, StringRef(WaHDiv->getValue()).substr(8), Features); } else if (HDivArg) getARMHWDivFeatures(D, HDivArg, Args, HDivArg->getValue(), Features); // Setting -msoft-float effectively disables NEON because of the GCC // implementation, although the same isn't true of VFP or VFP3. if (ABI == arm::FloatABI::Soft) { Features.push_back("-neon"); // Also need to explicitly disable features which imply NEON. Features.push_back("-crypto"); } // En/disable crc code generation. if (Arg *A = Args.getLastArg(options::OPT_mcrc, options::OPT_mnocrc)) { if (A->getOption().matches(options::OPT_mcrc)) Features.push_back("+crc"); else Features.push_back("-crc"); } // Look for the last occurrence of -mlong-calls or -mno-long-calls. If // neither options are specified, see if we are compiling for kernel/kext and // decide whether to pass "+long-calls" based on the OS and its version. if (Arg *A = Args.getLastArg(options::OPT_mlong_calls, options::OPT_mno_long_calls)) { if (A->getOption().matches(options::OPT_mlong_calls)) Features.push_back("+long-calls"); } else if (KernelOrKext && (!Triple.isiOS() || Triple.isOSVersionLT(6)) && !Triple.isWatchOS()) { Features.push_back("+long-calls"); } // Generate execute-only output (no data access to code sections). // This only makes sense for the compiler, not for the assembler. if (!ForAS) { // Supported only on ARMv6T2 and ARMv7 and above. // Cannot be combined with -mno-movt or -mlong-calls if (Arg *A = Args.getLastArg(options::OPT_mexecute_only, options::OPT_mno_execute_only)) { if (A->getOption().matches(options::OPT_mexecute_only)) { if (getARMSubArchVersionNumber(Triple) < 7 && llvm::ARM::parseArch(Triple.getArchName()) != llvm::ARM::ArchKind::ARMV6T2) D.Diag(diag::err_target_unsupported_execute_only) << Triple.getArchName(); else if (Arg *B = Args.getLastArg(options::OPT_mno_movt)) D.Diag(diag::err_opt_not_valid_with_opt) << A->getAsString(Args) << B->getAsString(Args); // Long calls create constant pool entries and have not yet been fixed up // to play nicely with execute-only. Hence, they cannot be used in // execute-only code for now else if (Arg *B = Args.getLastArg(options::OPT_mlong_calls, options::OPT_mno_long_calls)) { if (B->getOption().matches(options::OPT_mlong_calls)) D.Diag(diag::err_opt_not_valid_with_opt) << A->getAsString(Args) << B->getAsString(Args); } Features.push_back("+execute-only"); } } } // Kernel code has more strict alignment requirements. if (KernelOrKext) Features.push_back("+strict-align"); else if (Arg *A = Args.getLastArg(options::OPT_mno_unaligned_access, options::OPT_munaligned_access)) { if (A->getOption().matches(options::OPT_munaligned_access)) { // No v6M core supports unaligned memory access (v6M ARM ARM A3.2). if (Triple.getSubArch() == llvm::Triple::SubArchType::ARMSubArch_v6m) D.Diag(diag::err_target_unsupported_unaligned) << "v6m"; // v8M Baseline follows on from v6M, so doesn't support unaligned memory // access either. else if (Triple.getSubArch() == llvm::Triple::SubArchType::ARMSubArch_v8m_baseline) D.Diag(diag::err_target_unsupported_unaligned) << "v8m.base"; } else Features.push_back("+strict-align"); } else { // Assume pre-ARMv6 doesn't support unaligned accesses. // // ARMv6 may or may not support unaligned accesses depending on the // SCTLR.U bit, which is architecture-specific. We assume ARMv6 // Darwin and NetBSD targets support unaligned accesses, and others don't. // // ARMv7 always has SCTLR.U set to 1, but it has a new SCTLR.A bit // which raises an alignment fault on unaligned accesses. Linux // defaults this bit to 0 and handles it as a system-wide (not // per-process) setting. It is therefore safe to assume that ARMv7+ // Linux targets support unaligned accesses. The same goes for NaCl. // // The above behavior is consistent with GCC. int VersionNum = getARMSubArchVersionNumber(Triple); if (Triple.isOSDarwin() || Triple.isOSNetBSD()) { if (VersionNum < 6 || Triple.getSubArch() == llvm::Triple::SubArchType::ARMSubArch_v6m) Features.push_back("+strict-align"); } else if (Triple.isOSLinux() || Triple.isOSNaCl()) { if (VersionNum < 7) Features.push_back("+strict-align"); } else Features.push_back("+strict-align"); } // llvm does not support reserving registers in general. There is support // for reserving r9 on ARM though (defined as a platform-specific register // in ARM EABI). if (Args.hasArg(options::OPT_ffixed_r9)) Features.push_back("+reserve-r9"); // The kext linker doesn't know how to deal with movw/movt. if (KernelOrKext || Args.hasArg(options::OPT_mno_movt)) Features.push_back("+no-movt"); if (Args.hasArg(options::OPT_mno_neg_immediates)) Features.push_back("+no-neg-immediates"); }
// Get SubArch (vN). int arm::getARMSubArchVersionNumber(const llvm::Triple &Triple) { llvm::StringRef Arch = Triple.getArchName(); return llvm::ARM::parseArchVersion(Arch); }