/**
 * Match a display to a driver.  The matching is done by finding the first
 * driver that can initialize the display.
 */
_EGLDriver *
_eglMatchDriver(_EGLDisplay *dpy)
{
   _EGLDriver *best_drv;

   assert(!dpy->Initialized);

   /* set options */
   dpy->Options.UseFallback =
      env_var_as_boolean("LIBGL_ALWAYS_SOFTWARE", false);

   best_drv = _eglMatchAndInitialize(dpy);
   if (!best_drv) {
      dpy->Options.UseFallback = EGL_TRUE;
      best_drv = _eglMatchAndInitialize(dpy);
   }

   if (best_drv) {
      _eglLog(_EGL_DEBUG, "the best driver is %s",
            best_drv->Name);
      dpy->Driver = best_drv;
      dpy->Initialized = EGL_TRUE;
   }

   return best_drv;
}
Example #2
0
EGLBoolean
dri2_initialize_x11(_EGLDriver *drv, _EGLDisplay *disp)
{
   EGLBoolean initialized = EGL_FALSE;

   if (!disp->Options.ForceSoftware) {
#ifdef HAVE_DRI3
      if (!env_var_as_boolean("LIBGL_DRI3_DISABLE", false))
         initialized = dri2_initialize_x11_dri3(drv, disp);
#endif

      if (!initialized)
         initialized = dri2_initialize_x11_dri2(drv, disp);
   }

   if (!initialized)
      initialized = dri2_initialize_x11_swrast(drv, disp);

   return initialized;
}
Example #3
0
VkResult
__vk_errorf(VkResult error, const char *file, int line, const char *format, ...)
{
   va_list ap;
   char buffer[256];

   const char *error_str = vk_Result_to_str(error);

   if (format) {
      va_start(ap, format);
      vsnprintf(buffer, sizeof(buffer), format, ap);
      va_end(ap);

      fprintf(stderr, "%s:%d: %s (%s)\n", file, line, buffer, error_str);
   } else {
      fprintf(stderr, "%s:%d: %s\n", file, line, error_str);
   }

   if (error == VK_ERROR_DEVICE_LOST &&
       env_var_as_boolean("ANV_ABORT_ON_DEVICE_LOSS", false))
      abort();

   return error;
}
Example #4
0
/**
 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
 * and manage map buffer objections.
 *
 * \param fd File descriptor of the opened DRM device.
 */
struct iris_bufmgr *
iris_bufmgr_init(struct gen_device_info *devinfo, int fd)
{
   uint64_t gtt_size = iris_gtt_size(fd);
   if (gtt_size <= IRIS_MEMZONE_OTHER_START)
      return NULL;

   struct iris_bufmgr *bufmgr = calloc(1, sizeof(*bufmgr));
   if (bufmgr == NULL)
      return NULL;

   /* Handles to buffer objects belong to the device fd and are not
    * reference counted by the kernel.  If the same fd is used by
    * multiple parties (threads sharing the same screen bufmgr, or
    * even worse the same device fd passed to multiple libraries)
    * ownership of those handles is shared by those independent parties.
    *
    * Don't do this! Ensure that each library/bufmgr has its own device
    * fd so that its namespace does not clash with another.
    */
   bufmgr->fd = fd;

   if (mtx_init(&bufmgr->lock, mtx_plain) != 0) {
      free(bufmgr);
      return NULL;
   }

   bufmgr->has_llc = devinfo->has_llc;

   STATIC_ASSERT(IRIS_MEMZONE_SHADER_START == 0ull);
   const uint64_t _4GB = 1ull << 32;

   /* The STATE_BASE_ADDRESS size field can only hold 1 page shy of 4GB */
   const uint64_t _4GB_minus_1 = _4GB - PAGE_SIZE;

   util_vma_heap_init(&bufmgr->vma_allocator[IRIS_MEMZONE_SHADER],
                      PAGE_SIZE, _4GB_minus_1 - PAGE_SIZE);
   util_vma_heap_init(&bufmgr->vma_allocator[IRIS_MEMZONE_SURFACE],
                      IRIS_MEMZONE_SURFACE_START,
                      _4GB_minus_1 - IRIS_MAX_BINDERS * IRIS_BINDER_SIZE);
   util_vma_heap_init(&bufmgr->vma_allocator[IRIS_MEMZONE_DYNAMIC],
                      IRIS_MEMZONE_DYNAMIC_START + IRIS_BORDER_COLOR_POOL_SIZE,
                      _4GB_minus_1 - IRIS_BORDER_COLOR_POOL_SIZE);

   /* Leave the last 4GB out of the high vma range, so that no state
    * base address + size can overflow 48 bits.
    */
   util_vma_heap_init(&bufmgr->vma_allocator[IRIS_MEMZONE_OTHER],
                      IRIS_MEMZONE_OTHER_START,
                      (gtt_size - _4GB) - IRIS_MEMZONE_OTHER_START);

   // XXX: driconf
   bufmgr->bo_reuse = env_var_as_boolean("bo_reuse", true);

   init_cache_buckets(bufmgr);

   bufmgr->name_table =
      _mesa_hash_table_create(NULL, key_hash_uint, key_uint_equal);
   bufmgr->handle_table =
      _mesa_hash_table_create(NULL, key_hash_uint, key_uint_equal);

   return bufmgr;
}
Example #5
0
struct brw_compiler *
brw_compiler_create(void *mem_ctx, const struct brw_device_info *devinfo)
{
   struct brw_compiler *compiler = rzalloc(mem_ctx, struct brw_compiler);

   compiler->devinfo = devinfo;
   compiler->shader_debug_log = shader_debug_log_mesa;
   compiler->shader_perf_log = shader_perf_log_mesa;

   brw_fs_alloc_reg_sets(compiler);
   brw_vec4_alloc_reg_set(compiler);

   compiler->scalar_stage[MESA_SHADER_VERTEX] =
      devinfo->gen >= 8 && !(INTEL_DEBUG & DEBUG_VEC4VS);
   compiler->scalar_stage[MESA_SHADER_GEOMETRY] =
      devinfo->gen >= 8 && env_var_as_boolean("INTEL_SCALAR_GS", false);
   compiler->scalar_stage[MESA_SHADER_FRAGMENT] = true;
   compiler->scalar_stage[MESA_SHADER_COMPUTE] = true;

   nir_shader_compiler_options *nir_options =
      rzalloc(compiler, nir_shader_compiler_options);
   nir_options->native_integers = true;
   /* In order to help allow for better CSE at the NIR level we tell NIR
    * to split all ffma instructions during opt_algebraic and we then
    * re-combine them as a later step.
    */
   nir_options->lower_ffma = true;
   nir_options->lower_sub = true;
   /* In the vec4 backend, our dpN instruction replicates its result to all
    * the components of a vec4.  We would like NIR to give us replicated fdot
    * instructions because it can optimize better for us.
    *
    * For the FS backend, it should be lowered away by the scalarizing pass so
    * we should never see fdot anyway.
    */
   nir_options->fdot_replicates = true;

   /* We want the GLSL compiler to emit code that uses condition codes */
   for (int i = 0; i < MESA_SHADER_STAGES; i++) {
      compiler->glsl_compiler_options[i].MaxUnrollIterations = 32;
      compiler->glsl_compiler_options[i].MaxIfDepth =
         devinfo->gen < 6 ? 16 : UINT_MAX;

      compiler->glsl_compiler_options[i].EmitCondCodes = true;
      compiler->glsl_compiler_options[i].EmitNoNoise = true;
      compiler->glsl_compiler_options[i].EmitNoMainReturn = true;
      compiler->glsl_compiler_options[i].EmitNoIndirectInput = true;
      compiler->glsl_compiler_options[i].EmitNoIndirectUniform = false;
      compiler->glsl_compiler_options[i].LowerClipDistance = true;

      bool is_scalar = compiler->scalar_stage[i];

      compiler->glsl_compiler_options[i].EmitNoIndirectOutput = is_scalar;
      compiler->glsl_compiler_options[i].EmitNoIndirectTemp = is_scalar;
      compiler->glsl_compiler_options[i].OptimizeForAOS = !is_scalar;

      /* !ARB_gpu_shader5 */
      if (devinfo->gen < 7)
         compiler->glsl_compiler_options[i].EmitNoIndirectSampler = true;

      compiler->glsl_compiler_options[i].NirOptions = nir_options;

      compiler->glsl_compiler_options[i].LowerBufferInterfaceBlocks = true;
   }

   if (compiler->scalar_stage[MESA_SHADER_GEOMETRY])
      compiler->glsl_compiler_options[MESA_SHADER_GEOMETRY].EmitNoIndirectInput = false;

   compiler->glsl_compiler_options[MESA_SHADER_COMPUTE]
      .LowerShaderSharedVariables = true;

   return compiler;
}
Example #6
0
struct brw_compiler *
brw_compiler_create(void *mem_ctx, const struct gen_device_info *devinfo)
{
   struct brw_compiler *compiler = rzalloc(mem_ctx, struct brw_compiler);

   compiler->devinfo = devinfo;

   brw_fs_alloc_reg_sets(compiler);
   brw_vec4_alloc_reg_set(compiler);

   compiler->precise_trig = env_var_as_boolean("INTEL_PRECISE_TRIG", false);

   compiler->scalar_stage[MESA_SHADER_VERTEX] =
      devinfo->gen >= 8 && !(INTEL_DEBUG & DEBUG_VEC4VS);
   compiler->scalar_stage[MESA_SHADER_TESS_CTRL] =
      devinfo->gen >= 8 && env_var_as_boolean("INTEL_SCALAR_TCS", true);
   compiler->scalar_stage[MESA_SHADER_TESS_EVAL] =
      devinfo->gen >= 8 && env_var_as_boolean("INTEL_SCALAR_TES", true);
   compiler->scalar_stage[MESA_SHADER_GEOMETRY] =
      devinfo->gen >= 8 && env_var_as_boolean("INTEL_SCALAR_GS", true);
   compiler->scalar_stage[MESA_SHADER_FRAGMENT] = true;
   compiler->scalar_stage[MESA_SHADER_COMPUTE] = true;

   /* We want the GLSL compiler to emit code that uses condition codes */
   for (int i = 0; i < MESA_SHADER_STAGES; i++) {
      compiler->glsl_compiler_options[i].MaxUnrollIterations = 32;
      compiler->glsl_compiler_options[i].MaxIfDepth =
         devinfo->gen < 6 ? 16 : UINT_MAX;

      compiler->glsl_compiler_options[i].EmitNoNoise = true;
      compiler->glsl_compiler_options[i].EmitNoMainReturn = true;
      compiler->glsl_compiler_options[i].EmitNoIndirectInput = true;
      compiler->glsl_compiler_options[i].EmitNoIndirectUniform = false;
      compiler->glsl_compiler_options[i].LowerCombinedClipCullDistance = true;

      bool is_scalar = compiler->scalar_stage[i];

      compiler->glsl_compiler_options[i].EmitNoIndirectOutput = is_scalar;
      compiler->glsl_compiler_options[i].EmitNoIndirectTemp = is_scalar;
      compiler->glsl_compiler_options[i].OptimizeForAOS = !is_scalar;

      /* !ARB_gpu_shader5 */
      if (devinfo->gen < 7)
         compiler->glsl_compiler_options[i].EmitNoIndirectSampler = true;

      if (is_scalar) {
         compiler->glsl_compiler_options[i].NirOptions = &scalar_nir_options;
      } else {
         compiler->glsl_compiler_options[i].NirOptions =
            devinfo->gen < 6 ? &vector_nir_options : &vector_nir_options_gen6;
      }

      compiler->glsl_compiler_options[i].LowerBufferInterfaceBlocks = true;
      compiler->glsl_compiler_options[i].ClampBlockIndicesToArrayBounds = true;
   }

   compiler->glsl_compiler_options[MESA_SHADER_TESS_CTRL].EmitNoIndirectInput = false;
   compiler->glsl_compiler_options[MESA_SHADER_TESS_EVAL].EmitNoIndirectInput = false;
   compiler->glsl_compiler_options[MESA_SHADER_TESS_CTRL].EmitNoIndirectOutput = false;

   if (compiler->scalar_stage[MESA_SHADER_GEOMETRY])
      compiler->glsl_compiler_options[MESA_SHADER_GEOMETRY].EmitNoIndirectInput = false;

   compiler->glsl_compiler_options[MESA_SHADER_COMPUTE]
      .LowerShaderSharedVariables = true;

   return compiler;
}