/// Returns the LLVM name of the target CPU to use given the provided /// -mcpu argument and target triple. static std::string getTargetCPU(const std::string &cpu, const llvm::Triple &triple) { if (!cpu.empty()) { if (cpu != "native") return cpu; // FIXME: Reject attempts to use -mcpu=native unless the target matches // the host. std::string hostCPU = llvm::sys::getHostCPUName(); if (!hostCPU.empty() && hostCPU != "generic") return hostCPU; } switch (triple.getArch()) { default: // We don't know about the specifics of this platform, just return the // empty string and let LLVM decide. return cpu; case llvm::Triple::x86: case llvm::Triple::x86_64: return getX86TargetCPU(triple); case llvm::Triple::arm: return getARMTargetCPU(triple); } }
std::string getTargetCPU(const std::string& specifiedCPU, const llvm::Triple& triple) { if (!specifiedCPU.empty()) { if (specifiedCPU == "native") { // Try to get the host CPU name. const std::string hostCPU = llvm::sys::getHostCPUName(); if (!hostCPU.empty() && hostCPU != "generic") { return hostCPU; } } else { return specifiedCPU; } } switch (triple.getArch()) { case llvm::Triple::x86: case llvm::Triple::x86_64: return getX86TargetCPU(triple); case llvm::Triple::arm: return getARMTargetCPU(triple); default: // Unknown platform, so just pass to LLVM and let it decide. return specifiedCPU; } }
llvm::TargetMachine* createTargetMachine( std::string targetTriple, std::string arch, std::string cpu, std::vector<std::string> attrs, ExplicitBitness::Type bitness, llvm::Reloc::Model relocModel, llvm::CodeModel::Model codeModel, llvm::CodeGenOpt::Level codeGenOptLevel, bool genDebugInfo) { // override triple if needed std::string defaultTriple = llvm::sys::getDefaultTargetTriple(); if (sizeof(void*) == 4 && bitness == ExplicitBitness::M64) { defaultTriple = llvm::Triple(defaultTriple).get64BitArchVariant().str(); } else if (sizeof(void*) == 8 && bitness == ExplicitBitness::M32) { defaultTriple = llvm::Triple(defaultTriple).get32BitArchVariant().str(); } llvm::Triple triple; // did the user override the target triple? if (targetTriple.empty()) { if (!arch.empty()) { error("you must specify a target triple as well with -mtriple when using the -arch option"); fatal(); } triple = llvm::Triple(defaultTriple); } else { triple = llvm::Triple(llvm::Triple::normalize(targetTriple)); } // Allocate target machine. const llvm::Target *theTarget = NULL; // Check whether the user has explicitly specified an architecture to compile for. if (arch.empty()) { std::string Err; theTarget = llvm::TargetRegistry::lookupTarget(triple.str(), Err); if (theTarget == 0) { error("%s Please use the -march option.", Err.c_str()); fatal(); } } else { for (llvm::TargetRegistry::iterator it = llvm::TargetRegistry::begin(), ie = llvm::TargetRegistry::end(); it != ie; ++it) { if (arch == it->getName()) { theTarget = &*it; break; } } if (!theTarget) { error("invalid target '%s'", arch.c_str()); fatal(); } } // With an empty CPU string, LLVM will default to the host CPU, which is // usually not what we want (expected behavior from other compilers is // to default to "generic"). if (cpu.empty()) { if (triple.getArch() == llvm::Triple::x86_64 || triple.getArch() == llvm::Triple::x86) { cpu = getX86TargetCPU(arch, triple); } } // Package up features to be passed to target/subtarget std::string FeaturesStr; if (cpu.size() || attrs.size()) { llvm::SubtargetFeatures Features; for (unsigned i = 0; i != attrs.size(); ++i) Features.AddFeature(attrs[i]); FeaturesStr = Features.getString(); } if (triple.isMacOSX() && relocModel == llvm::Reloc::Default) { // OS X defaults to PIC (and as of 10.7.5/LLVM 3.1-3.3, TLS use leads // to crashes for non-PIC code). LLVM doesn't handle this. relocModel = llvm::Reloc::PIC_; } llvm::TargetOptions targetOptions; targetOptions.NoFramePointerElim = genDebugInfo; return theTarget->createTargetMachine( triple.str(), cpu, FeaturesStr, targetOptions, relocModel, codeModel, codeGenOptLevel ); }