Exemplo n.º 1
0
LLVMBool
lp_build_create_jit_compiler_for_module(LLVMExecutionEngineRef *OutJIT,
                                        lp_generated_code **OutCode,
                                        LLVMModuleRef M,
                                        LLVMMCJITMemoryManagerRef CMM,
                                        unsigned OptLevel,
                                        int useMCJIT,
                                        char **OutError)
{
   using namespace llvm;

   std::string Error;
#if HAVE_LLVM >= 0x0306
   EngineBuilder builder(std::unique_ptr<Module>(unwrap(M)));
#else
   EngineBuilder builder(unwrap(M));
#endif

   /**
    * LLVM 3.1+ haven't more "extern unsigned llvm::StackAlignmentOverride" and
    * friends for configuring code generation options, like stack alignment.
    */
   TargetOptions options;
#if defined(PIPE_ARCH_X86)
   options.StackAlignmentOverride = 4;
#if HAVE_LLVM < 0x0304
   options.RealignStack = true;
#endif
#endif

#if defined(DEBUG) && HAVE_LLVM < 0x0307
   options.JITEmitDebugInfo = true;
#endif

   /* XXX: Workaround http://llvm.org/PR21435 */
#if defined(DEBUG) || defined(PROFILE) || \
    (HAVE_LLVM >= 0x0303 && (defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)))
#if HAVE_LLVM < 0x0304
   options.NoFramePointerElimNonLeaf = true;
#endif
#if HAVE_LLVM < 0x0307
   options.NoFramePointerElim = true;
#endif
#endif

   builder.setEngineKind(EngineKind::JIT)
          .setErrorStr(&Error)
          .setTargetOptions(options)
          .setOptLevel((CodeGenOpt::Level)OptLevel);

   if (useMCJIT) {
#if HAVE_LLVM < 0x0306
       builder.setUseMCJIT(true);
#endif
#ifdef _WIN32
       /*
        * MCJIT works on Windows, but currently only through ELF object format.
        *
        * XXX: We could use `LLVM_HOST_TRIPLE "-elf"` but LLVM_HOST_TRIPLE has
        * different strings for MinGW/MSVC, so better play it safe and be
        * explicit.
        */
#  ifdef _WIN64
       LLVMSetTarget(M, "x86_64-pc-win32-elf");
#  else
       LLVMSetTarget(M, "i686-pc-win32-elf");
#  endif
#endif
   }

   llvm::SmallVector<std::string, 16> MAttrs;

#if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
   /*
    * We need to unset attributes because sometimes LLVM mistakenly assumes
    * certain features are present given the processor name.
    *
    * https://bugs.freedesktop.org/show_bug.cgi?id=92214
    * http://llvm.org/PR25021
    * http://llvm.org/PR19429
    * http://llvm.org/PR16721
    */
   MAttrs.push_back(util_cpu_caps.has_sse    ? "+sse"    : "-sse"   );
   MAttrs.push_back(util_cpu_caps.has_sse2   ? "+sse2"   : "-sse2"  );
   MAttrs.push_back(util_cpu_caps.has_sse3   ? "+sse3"   : "-sse3"  );
   MAttrs.push_back(util_cpu_caps.has_ssse3  ? "+ssse3"  : "-ssse3" );
#if HAVE_LLVM >= 0x0304
   MAttrs.push_back(util_cpu_caps.has_sse4_1 ? "+sse4.1" : "-sse4.1");
#else
   MAttrs.push_back(util_cpu_caps.has_sse4_1 ? "+sse41"  : "-sse41" );
#endif
#if HAVE_LLVM >= 0x0304
   MAttrs.push_back(util_cpu_caps.has_sse4_2 ? "+sse4.2" : "-sse4.2");
#else
   MAttrs.push_back(util_cpu_caps.has_sse4_2 ? "+sse42"  : "-sse42" );
#endif
   /*
    * AVX feature is not automatically detected from CPUID by the X86 target
    * yet, because the old (yet default) JIT engine is not capable of
    * emitting the opcodes. On newer llvm versions it is and at least some
    * versions (tested with 3.3) will emit avx opcodes without this anyway.
    */
   MAttrs.push_back(util_cpu_caps.has_avx  ? "+avx"  : "-avx");
   MAttrs.push_back(util_cpu_caps.has_f16c ? "+f16c" : "-f16c");
   if (HAVE_LLVM >= 0x0304) {
      MAttrs.push_back(util_cpu_caps.has_fma  ? "+fma"  : "-fma");
   } else {
      /*
       * The old JIT in LLVM 3.3 has a bug encoding llvm.fmuladd.f32 and
       * llvm.fmuladd.v2f32 intrinsics when FMA is available.
       */
      MAttrs.push_back("-fma");
   }
   MAttrs.push_back(util_cpu_caps.has_avx2 ? "+avx2" : "-avx2");
   /* disable avx512 and all subvariants */
#if HAVE_LLVM >= 0x0304
   MAttrs.push_back("-avx512cd");
   MAttrs.push_back("-avx512er");
   MAttrs.push_back("-avx512f");
   MAttrs.push_back("-avx512pf");
#endif
#if HAVE_LLVM >= 0x0305
   MAttrs.push_back("-avx512bw");
   MAttrs.push_back("-avx512dq");
   MAttrs.push_back("-avx512vl");
#endif
#endif

#if defined(PIPE_ARCH_PPC)
   MAttrs.push_back(util_cpu_caps.has_altivec ? "+altivec" : "-altivec");
#if HAVE_LLVM >= 0x0304
   /*
    * Make sure VSX instructions are disabled
    * See LLVM bug https://llvm.org/bugs/show_bug.cgi?id=25503#c7
    */
   if (util_cpu_caps.has_altivec) {
      MAttrs.push_back("-vsx");
   }
#endif
#endif

   builder.setMAttrs(MAttrs);

#if HAVE_LLVM >= 0x0305
   StringRef MCPU = llvm::sys::getHostCPUName();
   /*
    * The cpu bits are no longer set automatically, so need to set mcpu manually.
    * Note that the MAttrs set above will be sort of ignored (since we should
    * not set any which would not be set by specifying the cpu anyway).
    * It ought to be safe though since getHostCPUName() should include bits
    * not only from the cpu but environment as well (for instance if it's safe
    * to use avx instructions which need OS support). According to
    * http://llvm.org/bugs/show_bug.cgi?id=19429 however if I understand this
    * right it may be necessary to specify older cpu (or disable mattrs) though
    * when not using MCJIT so no instructions are generated which the old JIT
    * can't handle. Not entirely sure if we really need to do anything yet.
    */
   builder.setMCPU(MCPU);
#endif

   ShaderMemoryManager *MM = NULL;
   if (useMCJIT) {
       BaseMemoryManager* JMM = reinterpret_cast<BaseMemoryManager*>(CMM);
       MM = new ShaderMemoryManager(JMM);
       *OutCode = MM->getGeneratedCode();

#if HAVE_LLVM >= 0x0306
       builder.setMCJITMemoryManager(std::unique_ptr<RTDyldMemoryManager>(MM));
       MM = NULL; // ownership taken by std::unique_ptr
#elif HAVE_LLVM > 0x0303
       builder.setMCJITMemoryManager(MM);
#else
       builder.setJITMemoryManager(MM);
#endif
   } else {
#if HAVE_LLVM < 0x0306
       BaseMemoryManager* JMM = reinterpret_cast<BaseMemoryManager*>(CMM);
       MM = new ShaderMemoryManager(JMM);
       *OutCode = MM->getGeneratedCode();

       builder.setJITMemoryManager(MM);
#else
       assert(0);
#endif
   }

   ExecutionEngine *JIT;

   JIT = builder.create();
#if LLVM_USE_INTEL_JITEVENTS
   JITEventListener *JEL = JITEventListener::createIntelJITEventListener();
   JIT->RegisterJITEventListener(JEL);
#endif
   if (JIT) {
      *OutJIT = wrap(JIT);
      return 0;
   }
   lp_free_generated_code(*OutCode);
   *OutCode = 0;
   delete MM;
   *OutError = strdup(Error.c_str());
   return 1;
}
Exemplo n.º 2
0
LLVMBool
lp_build_create_jit_compiler_for_module(LLVMExecutionEngineRef *OutJIT,
                                        lp_generated_code **OutCode,
                                        LLVMModuleRef M,
                                        LLVMMCJITMemoryManagerRef CMM,
                                        unsigned OptLevel,
                                        int useMCJIT,
                                        char **OutError)
{
   using namespace llvm;

   std::string Error;
#if HAVE_LLVM >= 0x0306
   EngineBuilder builder(std::unique_ptr<Module>(unwrap(M)));
#else
   EngineBuilder builder(unwrap(M));
#endif

   /**
    * LLVM 3.1+ haven't more "extern unsigned llvm::StackAlignmentOverride" and
    * friends for configuring code generation options, like stack alignment.
    */
   TargetOptions options;
#if defined(PIPE_ARCH_X86)
   options.StackAlignmentOverride = 4;
#if HAVE_LLVM < 0x0304
   options.RealignStack = true;
#endif
#endif

#if defined(DEBUG) && HAVE_LLVM < 0x0307
   options.JITEmitDebugInfo = true;
#endif

   /* XXX: Workaround http://llvm.org/PR21435 */
#if defined(DEBUG) || defined(PROFILE) || \
    (HAVE_LLVM >= 0x0303 && (defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)))
#if HAVE_LLVM < 0x0304
   options.NoFramePointerElimNonLeaf = true;
#endif
#if HAVE_LLVM < 0x0307
   options.NoFramePointerElim = true;
#endif
#endif

   builder.setEngineKind(EngineKind::JIT)
          .setErrorStr(&Error)
          .setTargetOptions(options)
          .setOptLevel((CodeGenOpt::Level)OptLevel);

   if (useMCJIT) {
#if HAVE_LLVM < 0x0306
       builder.setUseMCJIT(true);
#endif
#ifdef _WIN32
       /*
        * MCJIT works on Windows, but currently only through ELF object format.
        *
        * XXX: We could use `LLVM_HOST_TRIPLE "-elf"` but LLVM_HOST_TRIPLE has
        * different strings for MinGW/MSVC, so better play it safe and be
        * explicit.
        */
#  ifdef _WIN64
       LLVMSetTarget(M, "x86_64-pc-win32-elf");
#  else
       LLVMSetTarget(M, "i686-pc-win32-elf");
#  endif
#endif
   }

   llvm::SmallVector<std::string, 16> MAttrs;

#if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
#if HAVE_LLVM >= 0x0400
   /* llvm-3.7+ implements sys::getHostCPUFeatures for x86,
    * which allows us to enable/disable code generation based
    * on the results of cpuid.
    */
   llvm::StringMap<bool> features;
   llvm::sys::getHostCPUFeatures(features);

   for (StringMapIterator<bool> f = features.begin();
        f != features.end();
        ++f) {
      MAttrs.push_back(((*f).second ? "+" : "-") + (*f).first().str());
   }
#else
   /*
    * We need to unset attributes because sometimes LLVM mistakenly assumes
    * certain features are present given the processor name.
    *
    * https://bugs.freedesktop.org/show_bug.cgi?id=92214
    * http://llvm.org/PR25021
    * http://llvm.org/PR19429
    * http://llvm.org/PR16721
    */
   MAttrs.push_back(util_cpu_caps.has_sse    ? "+sse"    : "-sse"   );
   MAttrs.push_back(util_cpu_caps.has_sse2   ? "+sse2"   : "-sse2"  );
   MAttrs.push_back(util_cpu_caps.has_sse3   ? "+sse3"   : "-sse3"  );
   MAttrs.push_back(util_cpu_caps.has_ssse3  ? "+ssse3"  : "-ssse3" );
#if HAVE_LLVM >= 0x0304
   MAttrs.push_back(util_cpu_caps.has_sse4_1 ? "+sse4.1" : "-sse4.1");
#else
   MAttrs.push_back(util_cpu_caps.has_sse4_1 ? "+sse41"  : "-sse41" );
#endif
#if HAVE_LLVM >= 0x0304
   MAttrs.push_back(util_cpu_caps.has_sse4_2 ? "+sse4.2" : "-sse4.2");
#else
   MAttrs.push_back(util_cpu_caps.has_sse4_2 ? "+sse42"  : "-sse42" );
#endif
   /*
    * AVX feature is not automatically detected from CPUID by the X86 target
    * yet, because the old (yet default) JIT engine is not capable of
    * emitting the opcodes. On newer llvm versions it is and at least some
    * versions (tested with 3.3) will emit avx opcodes without this anyway.
    */
   MAttrs.push_back(util_cpu_caps.has_avx  ? "+avx"  : "-avx");
   MAttrs.push_back(util_cpu_caps.has_f16c ? "+f16c" : "-f16c");
   if (HAVE_LLVM >= 0x0304) {
      MAttrs.push_back(util_cpu_caps.has_fma  ? "+fma"  : "-fma");
   } else {
      /*
       * The old JIT in LLVM 3.3 has a bug encoding llvm.fmuladd.f32 and
       * llvm.fmuladd.v2f32 intrinsics when FMA is available.
       */
      MAttrs.push_back("-fma");
   }
   MAttrs.push_back(util_cpu_caps.has_avx2 ? "+avx2" : "-avx2");
   /* disable avx512 and all subvariants */
#if HAVE_LLVM >= 0x0304
   MAttrs.push_back("-avx512cd");
   MAttrs.push_back("-avx512er");
   MAttrs.push_back("-avx512f");
   MAttrs.push_back("-avx512pf");
#endif
#if HAVE_LLVM >= 0x0305
   MAttrs.push_back("-avx512bw");
   MAttrs.push_back("-avx512dq");
   MAttrs.push_back("-avx512vl");
#endif
#endif
#endif

#if defined(PIPE_ARCH_PPC)
   MAttrs.push_back(util_cpu_caps.has_altivec ? "+altivec" : "-altivec");
#if (HAVE_LLVM >= 0x0304)
#if (HAVE_LLVM < 0x0400)
   /*
    * Make sure VSX instructions are disabled
    * See LLVM bugs:
    * https://llvm.org/bugs/show_bug.cgi?id=25503#c7 (fixed in 3.8.1)
    * https://llvm.org/bugs/show_bug.cgi?id=26775 (fixed in 3.8.1)
    * https://llvm.org/bugs/show_bug.cgi?id=33531 (fixed in 4.0)
    * https://llvm.org/bugs/show_bug.cgi?id=34647 (llc performance on certain unusual shader IR; intro'd in 4.0, pending as of 5.0)
    */
   if (util_cpu_caps.has_altivec) {
      MAttrs.push_back("-vsx");
   }
#else
   /*
    * Bug 25503 is fixed, by the same fix that fixed
    * bug 26775, in versions of LLVM later than 3.8 (starting with 3.8.1).
    * BZ 33531 actually comprises more than one bug, all of
    * which are fixed in LLVM 4.0.
    *
    * With LLVM 4.0 or higher:
    * Make sure VSX instructions are ENABLED, unless
    * a) the entire -mattr option is overridden via GALLIVM_MATTRS, or
    * b) VSX instructions are explicitly enabled/disabled via GALLIVM_VSX=1 or 0.
    */
   if (util_cpu_caps.has_altivec) {
      char *env_mattrs = getenv("GALLIVM_MATTRS");
      if (env_mattrs) {
         MAttrs.push_back(env_mattrs);
      }
      else {
         boolean enable_vsx = true;
         char *env_vsx = getenv("GALLIVM_VSX");
         if (env_vsx && env_vsx[0] == '0') {
            enable_vsx = false;
         }
         if (enable_vsx)
            MAttrs.push_back("+vsx");
         else
            MAttrs.push_back("-vsx");
      }
   }
#endif
#endif
#endif

   builder.setMAttrs(MAttrs);

   if (gallivm_debug & (GALLIVM_DEBUG_IR | GALLIVM_DEBUG_ASM | GALLIVM_DEBUG_DUMP_BC)) {
      int n = MAttrs.size();
      if (n > 0) {
         debug_printf("llc -mattr option(s): ");
         for (int i = 0; i < n; i++)
            debug_printf("%s%s", MAttrs[i].c_str(), (i < n - 1) ? "," : "");
         debug_printf("\n");
      }
   }

#if HAVE_LLVM >= 0x0305
   StringRef MCPU = llvm::sys::getHostCPUName();
   /*
    * The cpu bits are no longer set automatically, so need to set mcpu manually.
    * Note that the MAttrs set above will be sort of ignored (since we should
    * not set any which would not be set by specifying the cpu anyway).
    * It ought to be safe though since getHostCPUName() should include bits
    * not only from the cpu but environment as well (for instance if it's safe
    * to use avx instructions which need OS support). According to
    * http://llvm.org/bugs/show_bug.cgi?id=19429 however if I understand this
    * right it may be necessary to specify older cpu (or disable mattrs) though
    * when not using MCJIT so no instructions are generated which the old JIT
    * can't handle. Not entirely sure if we really need to do anything yet.
    */
#if defined(PIPE_ARCH_LITTLE_ENDIAN)  && defined(PIPE_ARCH_PPC_64)
   /*
    * Versions of LLVM prior to 4.0 lacked a table entry for "POWER8NVL",
    * resulting in (big-endian) "generic" being returned on
    * little-endian Power8NVL systems.  The result was that code that
    * attempted to load the least significant 32 bits of a 64-bit quantity
    * from memory loaded the wrong half.  This resulted in failures in some
    * Piglit tests, e.g.
    * .../arb_gpu_shader_fp64/execution/conversion/frag-conversion-explicit-double-uint
    */
   if (MCPU == "generic")
      MCPU = "pwr8";
#endif
   builder.setMCPU(MCPU);
   if (gallivm_debug & (GALLIVM_DEBUG_IR | GALLIVM_DEBUG_ASM | GALLIVM_DEBUG_DUMP_BC)) {
      debug_printf("llc -mcpu option: %s\n", MCPU.str().c_str());
   }
#endif

   ShaderMemoryManager *MM = NULL;
   if (useMCJIT) {
       BaseMemoryManager* JMM = reinterpret_cast<BaseMemoryManager*>(CMM);
       MM = new ShaderMemoryManager(JMM);
       *OutCode = MM->getGeneratedCode();

#if HAVE_LLVM >= 0x0306
       builder.setMCJITMemoryManager(std::unique_ptr<RTDyldMemoryManager>(MM));
       MM = NULL; // ownership taken by std::unique_ptr
#elif HAVE_LLVM > 0x0303
       builder.setMCJITMemoryManager(MM);
#else
       builder.setJITMemoryManager(MM);
#endif
   } else {
#if HAVE_LLVM < 0x0306
       BaseMemoryManager* JMM = reinterpret_cast<BaseMemoryManager*>(CMM);
       MM = new ShaderMemoryManager(JMM);
       *OutCode = MM->getGeneratedCode();

       builder.setJITMemoryManager(MM);
#else
       assert(0);
#endif
   }

   ExecutionEngine *JIT;

   JIT = builder.create();
#if LLVM_USE_INTEL_JITEVENTS
   JITEventListener *JEL = JITEventListener::createIntelJITEventListener();
   JIT->RegisterJITEventListener(JEL);
#endif
   if (JIT) {
      *OutJIT = wrap(JIT);
      return 0;
   }
   lp_free_generated_code(*OutCode);
   *OutCode = 0;
   delete MM;
   *OutError = strdup(Error.c_str());
   return 1;
}
Exemplo n.º 3
0
LLVMBool
lp_build_create_jit_compiler_for_module(LLVMExecutionEngineRef *OutJIT,
                                        lp_generated_code **OutCode,
                                        LLVMModuleRef M,
                                        unsigned OptLevel,
                                        int useMCJIT,
                                        char **OutError)
{
   using namespace llvm;

   std::string Error;
   EngineBuilder builder(unwrap(M));

   /**
    * LLVM 3.1+ haven't more "extern unsigned llvm::StackAlignmentOverride" and
    * friends for configuring code generation options, like stack alignment.
    */
   TargetOptions options;
#if defined(PIPE_ARCH_X86)
   options.StackAlignmentOverride = 4;
#if HAVE_LLVM < 0x0304
   options.RealignStack = true;
#endif
#endif

#if defined(DEBUG)
   options.JITEmitDebugInfo = true;
#endif

#if defined(DEBUG) || defined(PROFILE)
#if HAVE_LLVM < 0x0304
   options.NoFramePointerElimNonLeaf = true;
#endif
   options.NoFramePointerElim = true;
#endif

   builder.setEngineKind(EngineKind::JIT)
          .setErrorStr(&Error)
          .setTargetOptions(options)
          .setOptLevel((CodeGenOpt::Level)OptLevel);

   if (useMCJIT) {
       builder.setUseMCJIT(true);
#ifdef _WIN32
       /*
        * MCJIT works on Windows, but currently only through ELF object format.
        */
       std::string targetTriple = llvm::sys::getProcessTriple();
       targetTriple.append("-elf");
       unwrap(M)->setTargetTriple(targetTriple);
#endif
   }

   llvm::SmallVector<std::string, 1> MAttrs;
   if (util_cpu_caps.has_avx) {
      /*
       * AVX feature is not automatically detected from CPUID by the X86 target
       * yet, because the old (yet default) JIT engine is not capable of
       * emitting the opcodes. On newer llvm versions it is and at least some
       * versions (tested with 3.3) will emit avx opcodes without this anyway.
       */
      MAttrs.push_back("+avx");
      if (util_cpu_caps.has_f16c) {
         MAttrs.push_back("+f16c");
      }
      builder.setMAttrs(MAttrs);
   }

#if HAVE_LLVM >= 0x0305
   StringRef MCPU = llvm::sys::getHostCPUName();
   /*
    * The cpu bits are no longer set automatically, so need to set mcpu manually.
    * Note that the MAttrs set above will be sort of ignored (since we should
    * not set any which would not be set by specifying the cpu anyway).
    * It ought to be safe though since getHostCPUName() should include bits
    * not only from the cpu but environment as well (for instance if it's safe
    * to use avx instructions which need OS support). According to
    * http://llvm.org/bugs/show_bug.cgi?id=19429 however if I understand this
    * right it may be necessary to specify older cpu (or disable mattrs) though
    * when not using MCJIT so no instructions are generated which the old JIT
    * can't handle. Not entirely sure if we really need to do anything yet.
    */
   builder.setMCPU(MCPU);
#endif

   ShaderMemoryManager *MM = new ShaderMemoryManager();
   *OutCode = MM->getGeneratedCode();

   builder.setJITMemoryManager(MM);

   ExecutionEngine *JIT;

#if HAVE_LLVM >= 0x0302
   JIT = builder.create();
#else
   /*
    * Workaround http://llvm.org/PR12833
    */
   StringRef MArch = "";
   StringRef MCPU = "";
   Triple TT(unwrap(M)->getTargetTriple());
   JIT = builder.create(builder.selectTarget(TT, MArch, MCPU, MAttrs));
#endif
   if (JIT) {
      *OutJIT = wrap(JIT);
      return 0;
   }
   lp_free_generated_code(*OutCode);
   *OutCode = 0;
   delete MM;
   *OutError = strdup(Error.c_str());
   return 1;
}