llvm::TargetMachine *createTargetMachine( std::string targetTriple, std::string arch, std::string cpu, std::vector<std::string> attrs, ExplicitBitness::Type bitness, FloatABI::Type floatABI, llvm::Reloc::Model relocModel, llvm::CodeModel::Model codeModel, llvm::CodeGenOpt::Level codeGenOptLevel, bool noFramePointerElim, bool noLinkerStripDead) { // Determine target triple. If the user didn't explicitly specify one, use // the one set at LLVM configure time. llvm::Triple triple; if (targetTriple.empty()) { triple = llvm::Triple(llvm::sys::getDefaultTargetTriple()); // We only support OSX, so darwin should really be macosx. if (triple.getOS() == llvm::Triple::Darwin) { triple.setOS(llvm::Triple::MacOSX); } // Handle -m32/-m64. if (sizeof(void *) == 4 && bitness == ExplicitBitness::M64) { triple = triple.get64BitArchVariant(); } else if (sizeof(void *) == 8 && bitness == ExplicitBitness::M32) { triple = triple.get32BitArchVariant(); } } else { triple = llvm::Triple(llvm::Triple::normalize(targetTriple)); } // Look up the LLVM backend to use. This also updates triple with the // user-specified arch, if any. std::string errMsg; const llvm::Target *target = lookupTarget(arch, triple, errMsg); if (target == nullptr) { error(Loc(), "%s", errMsg.c_str()); fatal(); } // Package up features to be passed to target/subtarget. llvm::SubtargetFeatures features; features.getDefaultSubtargetFeatures(triple); if (cpu == "native") { llvm::StringMap<bool> hostFeatures; if (llvm::sys::getHostCPUFeatures(hostFeatures)) { for (const auto &hf : hostFeatures) { features.AddFeature( std::string(hf.second ? "+" : "-").append(hf.first())); } } } #if LDC_LLVM_VER < 307 if (triple.getArch() == llvm::Triple::mips || triple.getArch() == llvm::Triple::mipsel || triple.getArch() == llvm::Triple::mips64 || triple.getArch() == llvm::Triple::mips64el) { addMipsABI(triple, attrs); } #endif for (auto &attr : attrs) { features.AddFeature(attr); } // With an empty CPU string, LLVM will default to the host CPU, which is // usually not what we want (expected behavior from other compilers is // to default to "generic"). cpu = getTargetCPU(cpu, triple); // cmpxchg16b is not available on old 64bit CPUs. Enable code generation // if the user did not make an explicit choice. if (cpu == "x86-64") { const char *cx16_plus = "+cx16"; const char *cx16_minus = "-cx16"; bool cx16 = false; for (auto &attr : attrs) { if (attr == cx16_plus || attr == cx16_minus) { cx16 = true; } } if (!cx16) { features.AddFeature(cx16_plus); } } if (Logger::enabled()) { Logger::println("Targeting '%s' (CPU '%s' with features '%s')", triple.str().c_str(), cpu.c_str(), features.getString().c_str()); } if (triple.isMacOSX() && relocModel == llvm::Reloc::Default) { // OS X defaults to PIC (and as of 10.7.5/LLVM 3.1-3.3, TLS use leads // to crashes for non-PIC code). LLVM doesn't handle this. relocModel = llvm::Reloc::PIC_; } if (floatABI == FloatABI::Default) { switch (triple.getArch()) { default: // X86, ... floatABI = FloatABI::Hard; break; case llvm::Triple::arm: case llvm::Triple::thumb: floatABI = getARMFloatABI(triple, getLLVMArchSuffixForARM(cpu)); break; } } llvm::TargetOptions targetOptions; #if LDC_LLVM_VER < 307 targetOptions.NoFramePointerElim = noFramePointerElim; #endif #if LDC_LLVM_VER >= 307 targetOptions.MCOptions.ABIName = getABI(triple); #endif switch (floatABI) { default: llvm_unreachable("Floating point ABI type unknown."); case FloatABI::Soft: #if LDC_LLVM_VER < 307 targetOptions.UseSoftFloat = true; #endif targetOptions.FloatABIType = llvm::FloatABI::Soft; break; case FloatABI::SoftFP: #if LDC_LLVM_VER < 307 targetOptions.UseSoftFloat = false; #endif targetOptions.FloatABIType = llvm::FloatABI::Soft; break; case FloatABI::Hard: #if LDC_LLVM_VER < 307 targetOptions.UseSoftFloat = false; #endif targetOptions.FloatABIType = llvm::FloatABI::Hard; break; } // Right now, we only support linker-level dead code elimination on Linux // using the GNU toolchain (based on ld's --gc-sections flag). The Apple ld // on OS X supports a similar flag (-dead_strip) that doesn't require // emitting the symbols into different sections. The MinGW ld doesn't seem // to support --gc-sections at all, and FreeBSD needs more investigation. if (!noLinkerStripDead && (triple.getOS() == llvm::Triple::Linux || triple.getOS() == llvm::Triple::Win32)) { targetOptions.FunctionSections = true; targetOptions.DataSections = true; } return target->createTargetMachine(triple.str(), cpu, features.getString(), targetOptions, relocModel, codeModel, codeGenOptLevel); }
llvm::TargetMachine* createTargetMachine( std::string targetTriple, std::string arch, std::string cpu, std::vector<std::string> attrs, ExplicitBitness::Type bitness, FloatABI::Type floatABI, llvm::Reloc::Model relocModel, llvm::CodeModel::Model codeModel, llvm::CodeGenOpt::Level codeGenOptLevel, bool noFramePointerElim, bool noLinkerStripDead) { // Determine target triple. If the user didn't explicitly specify one, use // the one set at LLVM configure time. llvm::Triple triple; if (targetTriple.empty()) { triple = llvm::Triple(llvm::sys::getDefaultTargetTriple()); // Handle -m32/-m64. if (sizeof(void*) == 4 && bitness == ExplicitBitness::M64) { triple = triple.get64BitArchVariant(); } else if (sizeof(void*) == 8 && bitness == ExplicitBitness::M32) { triple = triple.get32BitArchVariant(); } } else { triple = llvm::Triple(llvm::Triple::normalize(targetTriple)); } // Look up the LLVM backend to use. This also updates triple with the // user-specified arch, if any. std::string errMsg; const llvm::Target *target = lookupTarget(arch, triple, errMsg); if (target == 0) { error(Loc(), "%s", errMsg.c_str()); fatal(); } // Package up features to be passed to target/subtarget. llvm::SubtargetFeatures features; features.getDefaultSubtargetFeatures(triple); if (cpu == "native") { llvm::StringMap<bool> hostFeatures; if (llvm::sys::getHostCPUFeatures(hostFeatures)) { llvm::StringMapConstIterator<bool> i = hostFeatures.begin(), end = hostFeatures.end(); for (; i != end; ++i) #if LDC_LLVM_VER >= 305 features.AddFeature(std::string((i->second ? "+" : "-")).append(i->first())); #else features.AddFeature(i->first(), i->second); #endif } } if (triple.getArch() == llvm::Triple::mips || triple.getArch() == llvm::Triple::mipsel || triple.getArch() == llvm::Triple::mips64 || triple.getArch() == llvm::Triple::mips64el) addMipsABI(triple, attrs); for (unsigned i = 0; i < attrs.size(); ++i) features.AddFeature(attrs[i]); // With an empty CPU string, LLVM will default to the host CPU, which is // usually not what we want (expected behavior from other compilers is // to default to "generic"). cpu = getTargetCPU(cpu, triple); if (Logger::enabled()) { Logger::println("Targeting '%s' (CPU '%s' with features '%s')", triple.str().c_str(), cpu.c_str(), features.getString().c_str()); } if (triple.isMacOSX() && relocModel == llvm::Reloc::Default) { // OS X defaults to PIC (and as of 10.7.5/LLVM 3.1-3.3, TLS use leads // to crashes for non-PIC code). LLVM doesn't handle this. relocModel = llvm::Reloc::PIC_; } if (floatABI == FloatABI::Default) { switch (triple.getArch()) { default: // X86, ... floatABI = FloatABI::Hard; break; case llvm::Triple::arm: case llvm::Triple::thumb: floatABI = getARMFloatABI(triple, getLLVMArchSuffixForARM(cpu)); break; } } #if LDC_LLVM_VER < 305 if (triple.getArch() == llvm::Triple::arm && !triple.isOSDarwin()) { // On ARM, we want to use EHABI exception handling, as we don't support // SJLJ EH in druntime. Unfortunately, it is still in a partly // experimental state, and the -arm-enable-ehabi-descriptors command // line option is not exposed via an internal API at all. const char *backendArgs[3] = { "ldc2", // Fake name, irrelevant. "-arm-enable-ehabi", "-arm-enable-ehabi-descriptors" }; llvm::cl::ParseCommandLineOptions(3, backendArgs); } #endif llvm::TargetOptions targetOptions; targetOptions.NoFramePointerElim = noFramePointerElim; switch (floatABI) { default: llvm_unreachable("Floating point ABI type unknown."); case FloatABI::Soft: targetOptions.UseSoftFloat = true; targetOptions.FloatABIType = llvm::FloatABI::Soft; break; case FloatABI::SoftFP: targetOptions.UseSoftFloat = false; targetOptions.FloatABIType = llvm::FloatABI::Soft; break; case FloatABI::Hard: targetOptions.UseSoftFloat = false; targetOptions.FloatABIType = llvm::FloatABI::Hard; break; } // Right now, we only support linker-level dead code elimination on Linux // using the GNU toolchain (based on ld's --gc-sections flag). The Apple ld // on OS X supports a similar flag (-dead_strip) that doesn't require // emitting the symbols into different sections. The MinGW ld doesn't seem // to support --gc-sections at all, and FreeBSD needs more investigation. if (!noLinkerStripDead && (triple.getOS() == llvm::Triple::Linux || triple.getOS() == llvm::Triple::Win32)) { #if LDC_LLVM_VER < 305 llvm::TargetMachine::setDataSections(true); llvm::TargetMachine::setFunctionSections(true); #else targetOptions.FunctionSections = true; targetOptions.DataSections = true; #endif } return target->createTargetMachine( triple.str(), cpu, features.getString(), targetOptions, relocModel, codeModel, codeGenOptLevel ); }
llvm::TargetMachine* createTargetMachine( std::string targetTriple, std::string arch, std::string cpu, std::vector<std::string> attrs, ExplicitBitness::Type bitness, FloatABI::Type floatABI, llvm::Reloc::Model relocModel, llvm::CodeModel::Model codeModel, llvm::CodeGenOpt::Level codeGenOptLevel, bool genDebugInfo) { // Determine target triple. If the user didn't explicitly specify one, use // the one set at LLVM configure time. llvm::Triple triple; if (targetTriple.empty()) { triple = llvm::Triple(llvm::sys::getDefaultTargetTriple()); // Handle -m32/-m64. if (sizeof(void*) == 4 && bitness == ExplicitBitness::M64) { triple = triple.get64BitArchVariant(); } else if (sizeof(void*) == 8 && bitness == ExplicitBitness::M32) { triple = triple.get32BitArchVariant(); } } else { triple = llvm::Triple(llvm::Triple::normalize(targetTriple)); } // Look up the LLVM backend to use. This also updates triple with the // user-specified arch, if any. std::string errMsg; const llvm::Target *target = lookupTarget(arch, triple, errMsg); if (target == 0) { error("%s", errMsg.c_str()); fatal(); } // Package up features to be passed to target/subtarget. llvm::SubtargetFeatures features; features.getDefaultSubtargetFeatures(triple); if (cpu == "native") { llvm::StringMap<bool> hostFeatures; if (llvm::sys::getHostCPUFeatures(hostFeatures)) { llvm::StringMapConstIterator<bool> i = hostFeatures.begin(), end = hostFeatures.end(); for (; i != end; ++i) features.AddFeature(i->first(), i->second); } } for (unsigned i = 0; i < attrs.size(); ++i) features.AddFeature(attrs[i]); // With an empty CPU string, LLVM will default to the host CPU, which is // usually not what we want (expected behavior from other compilers is // to default to "generic"). cpu = getTargetCPU(cpu, triple); if (Logger::enabled()) { Logger::println("Targeting '%s' (CPU '%s' with features '%s')", triple.str().c_str(), cpu.c_str(), features.getString().c_str()); } if (triple.isMacOSX() && relocModel == llvm::Reloc::Default) { // OS X defaults to PIC (and as of 10.7.5/LLVM 3.1-3.3, TLS use leads // to crashes for non-PIC code). LLVM doesn't handle this. relocModel = llvm::Reloc::PIC_; } if (floatABI == FloatABI::Default) { switch (triple.getArch()) { default: // X86, ... floatABI = FloatABI::Hard; break; case llvm::Triple::arm: floatABI = getARMFloatABI(triple, getLLVMArchSuffixForARM(cpu)); break; case llvm::Triple::thumb: floatABI = FloatABI::Soft; break; } } if (triple.getArch() == llvm::Triple::arm) { // On ARM, we want to use EHABI exception handling, as we don't support // SJLJ EH in druntime. Unfortunately, it is still in a partly // experimental state, and the -arm-enable-ehabi-descriptors command // line option is not exposed via an internal API at all. const char *backendArgs[3] = { "ldc2", // Fake name, irrelevant. "-arm-enable-ehabi", "-arm-enable-ehabi-descriptors" }; llvm::cl::ParseCommandLineOptions(3, backendArgs); } llvm::TargetOptions targetOptions; targetOptions.NoFramePointerElim = genDebugInfo; switch (floatABI) { default: llvm_unreachable("Floating point ABI type unknown."); case FloatABI::Soft: targetOptions.UseSoftFloat = true; targetOptions.FloatABIType = llvm::FloatABI::Soft; break; case FloatABI::SoftFP: targetOptions.UseSoftFloat = false; targetOptions.FloatABIType = llvm::FloatABI::Soft; break; case FloatABI::Hard: targetOptions.UseSoftFloat = false; targetOptions.FloatABIType = llvm::FloatABI::Hard; break; } return target->createTargetMachine( triple.str(), cpu, features.getString(), targetOptions, relocModel, codeModel, codeGenOptLevel ); }