Пример #1
0
/**
 * Initialize the attribute groups in a GL context.
 *
 * \param ctx GL context.
 *
 * Initializes all the attributes, calling the respective <tt>init*</tt>
 * functions for the more complex data structures.
 */
static GLboolean
init_attrib_groups(GLcontext *ctx)
{
   assert(ctx);

   /* Constants */
   _mesa_init_constants( ctx );

   /* Extensions */
   _mesa_init_extensions( ctx );

   /* Attribute Groups */
   _mesa_init_accum( ctx );
   _mesa_init_attrib( ctx );
   _mesa_init_buffer_objects( ctx );
   _mesa_init_color( ctx );
   _mesa_init_colortables( ctx );
   _mesa_init_current( ctx );
   _mesa_init_depth( ctx );
   _mesa_init_debug( ctx );
   _mesa_init_display_list( ctx );
   _mesa_init_eval( ctx );
   _mesa_init_fbobjects( ctx );
   _mesa_init_feedback( ctx );
   _mesa_init_fog( ctx );
   _mesa_init_histogram( ctx );
   _mesa_init_hint( ctx );
   _mesa_init_line( ctx );
   _mesa_init_lighting( ctx );
   _mesa_init_matrix( ctx );
   _mesa_init_multisample( ctx );
   _mesa_init_pixel( ctx );
   _mesa_init_pixelstore( ctx );
   _mesa_init_point( ctx );
   _mesa_init_polygon( ctx );
   _mesa_init_program( ctx );
   _mesa_init_queryobj( ctx );
#if FEATURE_ARB_sync
   _mesa_init_sync( ctx );
#endif
   _mesa_init_rastpos( ctx );
   _mesa_init_scissor( ctx );
   _mesa_init_shader_state( ctx );
   _mesa_init_stencil( ctx );
   _mesa_init_transform( ctx );
   _mesa_init_varray( ctx );
   _mesa_init_viewport( ctx );

   if (!_mesa_init_texture( ctx ))
      return GL_FALSE;

   _mesa_init_texture_s3tc( ctx );

   /* Miscellaneous */
   ctx->NewState = _NEW_ALL;
   ctx->ErrorValue = (GLenum) GL_NO_ERROR;
   ctx->varying_vp_inputs = ~0;

   return GL_TRUE;
}
Пример #2
0
GLboolean
intelInitContext(struct intel_context *intel,
                 const __GLcontextModes * mesaVis,
                 __DRIcontext * driContextPriv,
                 void *sharedContextPrivate,
                 struct dd_function_table *functions)
{
   GLcontext *ctx = &intel->ctx;
   GLcontext *shareCtx = (GLcontext *) sharedContextPrivate;
   __DRIscreen *sPriv = driContextPriv->driScreenPriv;
   struct intel_screen *intelScreen = sPriv->private;
   int bo_reuse_mode;

   /* we can't do anything without a connection to the device */
   if (intelScreen->bufmgr == NULL)
      return GL_FALSE;

   if (!_mesa_initialize_context(&intel->ctx, mesaVis, shareCtx,
                                 functions, (void *) intel)) {
      printf("%s: failed to init mesa context\n", __FUNCTION__);
      return GL_FALSE;
   }

   driContextPriv->driverPrivate = intel;
   intel->intelScreen = intelScreen;
   intel->driScreen = sPriv;
   intel->driContext = driContextPriv;
   intel->driFd = sPriv->fd;

   intel->has_xrgb_textures = GL_TRUE;
   if (IS_GEN6(intel->intelScreen->deviceID)) {
      intel->gen = 6;
      intel->needs_ff_sync = GL_TRUE;
      intel->has_luminance_srgb = GL_TRUE;
   } else if (IS_GEN5(intel->intelScreen->deviceID)) {
      intel->gen = 5;
      intel->needs_ff_sync = GL_TRUE;
      intel->has_luminance_srgb = GL_TRUE;
   } else if (IS_965(intel->intelScreen->deviceID)) {
      intel->gen = 4;
      if (IS_G4X(intel->intelScreen->deviceID)) {
	  intel->has_luminance_srgb = GL_TRUE;
	  intel->is_g4x = GL_TRUE;
      }
   } else if (IS_9XX(intel->intelScreen->deviceID)) {
      intel->gen = 3;
      if (IS_945(intel->intelScreen->deviceID)) {
	 intel->is_945 = GL_TRUE;
      }
   } else {
      intel->gen = 2;
      if (intel->intelScreen->deviceID == PCI_CHIP_I830_M ||
	  intel->intelScreen->deviceID == PCI_CHIP_845_G) {
	 intel->has_xrgb_textures = GL_FALSE;
      }
   }

   driParseConfigFiles(&intel->optionCache, &intelScreen->optionCache,
                       intel->driScreen->myNum,
		       (intel->gen >= 4) ? "i965" : "i915");
   if (intelScreen->deviceID == PCI_CHIP_I865_G)
      intel->maxBatchSize = 4096;
   else
      intel->maxBatchSize = BATCH_SZ;

   intel->bufmgr = intelScreen->bufmgr;

   bo_reuse_mode = driQueryOptioni(&intel->optionCache, "bo_reuse");
   switch (bo_reuse_mode) {
   case DRI_CONF_BO_REUSE_DISABLED:
      break;
   case DRI_CONF_BO_REUSE_ALL:
      intel_bufmgr_gem_enable_reuse(intel->bufmgr);
      break;
   }

   /* This doesn't yet catch all non-conformant rendering, but it's a
    * start.
    */
   if (getenv("INTEL_STRICT_CONFORMANCE")) {
      unsigned int value = atoi(getenv("INTEL_STRICT_CONFORMANCE"));
      if (value > 0) {
         intel->conformance_mode = value;
      }
      else {
         intel->conformance_mode = 1;
      }
   }

   if (intel->conformance_mode > 0) {
      ctx->Const.MinLineWidth = 1.0;
      ctx->Const.MinLineWidthAA = 1.0;
      ctx->Const.MaxLineWidth = 1.0;
      ctx->Const.MaxLineWidthAA = 1.0;
      ctx->Const.LineWidthGranularity = 1.0;
   }
   else {
      ctx->Const.MinLineWidth = 1.0;
      ctx->Const.MinLineWidthAA = 1.0;
      ctx->Const.MaxLineWidth = 5.0;
      ctx->Const.MaxLineWidthAA = 5.0;
      ctx->Const.LineWidthGranularity = 0.5;
   }

   ctx->Const.MinPointSize = 1.0;
   ctx->Const.MinPointSizeAA = 1.0;
   ctx->Const.MaxPointSize = 255.0;
   ctx->Const.MaxPointSizeAA = 3.0;
   ctx->Const.PointSizeGranularity = 1.0;

   /* reinitialize the context point state.
    * It depend on constants in __GLcontextRec::Const
    */
   _mesa_init_point(ctx);

   meta_init_metaops(ctx, &intel->meta);
   ctx->Const.MaxColorAttachments = 4;  /* XXX FBO: review this */
   if (intel->gen >= 4) {
      if (MAX_WIDTH > 8192)
	 ctx->Const.MaxRenderbufferSize = 8192;
   } else {
      if (MAX_WIDTH > 2048)
	 ctx->Const.MaxRenderbufferSize = 2048;
   }

   /* Initialize the software rasterizer and helper modules. */
   _swrast_CreateContext(ctx);
   _vbo_CreateContext(ctx);
   _tnl_CreateContext(ctx);
   _swsetup_CreateContext(ctx);
 
   /* Configure swrast to match hardware characteristics: */
   _swrast_allow_pixel_fog(ctx, GL_FALSE);
   _swrast_allow_vertex_fog(ctx, GL_TRUE);

   _mesa_meta_init(ctx);

   intel->hw_stencil = mesaVis->stencilBits && mesaVis->depthBits == 24;
   intel->hw_stipple = 1;

   /* XXX FBO: this doesn't seem to be used anywhere */
   switch (mesaVis->depthBits) {
   case 0:                     /* what to do in this case? */
   case 16:
      intel->polygon_offset_scale = 1.0;
      break;
   case 24:
      intel->polygon_offset_scale = 2.0;     /* req'd to pass glean */
      break;
   default:
      assert(0);
      break;
   }

   if (intel->gen >= 4)
      intel->polygon_offset_scale /= 0xffff;

   intel->RenderIndex = ~0;

   intelInitExtensions(ctx);

   INTEL_DEBUG = driParseDebugString(getenv("INTEL_DEBUG"), debug_control);
   if (INTEL_DEBUG & DEBUG_BUFMGR)
      dri_bufmgr_set_debug(intel->bufmgr, GL_TRUE);

   intel->batch = intel_batchbuffer_alloc(intel);

   intel_fbo_init(intel);

   if (intel->ctx.Mesa_DXTn) {
      _mesa_enable_extension(ctx, "GL_EXT_texture_compression_s3tc");
      _mesa_enable_extension(ctx, "GL_S3_s3tc");
   }
   else if (driQueryOptionb(&intel->optionCache, "force_s3tc_enable")) {
      _mesa_enable_extension(ctx, "GL_EXT_texture_compression_s3tc");
   }
   intel->use_texture_tiling = driQueryOptionb(&intel->optionCache,
					       "texture_tiling");
   intel->use_early_z = driQueryOptionb(&intel->optionCache, "early_z");

   intel->prim.primitive = ~0;

   /* Force all software fallbacks */
   if (driQueryOptionb(&intel->optionCache, "no_rast")) {
      fprintf(stderr, "disabling 3D rasterization\n");
      intel->no_rast = 1;
   }

   if (driQueryOptionb(&intel->optionCache, "always_flush_batch")) {
      fprintf(stderr, "flushing batchbuffer before/after each draw call\n");
      intel->always_flush_batch = 1;
   }

   if (driQueryOptionb(&intel->optionCache, "always_flush_cache")) {
      fprintf(stderr, "flushing GPU caches before/after each draw call\n");
      intel->always_flush_cache = 1;
   }

   /* Disable all hardware rendering (skip emitting batches and fences/waits
    * to the kernel)
    */
   intel->no_hw = getenv("INTEL_NO_HW") != NULL;

   return GL_TRUE;
}
Пример #3
0
bool
intelInitContext(struct intel_context *intel,
		 int api,
                 const struct gl_config * mesaVis,
                 __DRIcontext * driContextPriv,
                 void *sharedContextPrivate,
                 struct dd_function_table *functions)
{
   struct gl_context *ctx = &intel->ctx;
   struct gl_context *shareCtx = (struct gl_context *) sharedContextPrivate;
   __DRIscreen *sPriv = driContextPriv->driScreenPriv;
   struct intel_screen *intelScreen = sPriv->driverPrivate;
   int bo_reuse_mode;
   struct gl_config visual;

   /* we can't do anything without a connection to the device */
   if (intelScreen->bufmgr == NULL)
      return false;

   /* Can't rely on invalidate events, fall back to glViewport hack */
   if (!driContextPriv->driScreenPriv->dri2.useInvalidate) {
      intel->saved_viewport = functions->Viewport;
      functions->Viewport = intel_viewport;
   }

   if (mesaVis == NULL) {
      memset(&visual, 0, sizeof visual);
      mesaVis = &visual;
   }

   if (!_mesa_initialize_context(&intel->ctx, api, mesaVis, shareCtx,
                                 functions)) {
      printf("%s: failed to init mesa context\n", __FUNCTION__);
      return false;
   }

   driContextPriv->driverPrivate = intel;
   intel->intelScreen = intelScreen;
   intel->driContext = driContextPriv;
   intel->driFd = sPriv->fd;

   intel->gen = intelScreen->gen;

   const int devID = intelScreen->deviceID;
   if (IS_SNB_GT1(devID) || IS_IVB_GT1(devID) || IS_HSW_GT1(devID))
      intel->gt = 1;
   else if (IS_SNB_GT2(devID) || IS_IVB_GT2(devID) || IS_HSW_GT2(devID))
      intel->gt = 2;
   else
      intel->gt = 0;

   if (IS_HASWELL(devID)) {
      intel->is_haswell = true;
   } else if (IS_G4X(devID)) {
      intel->is_g4x = true;
   } else if (IS_945(devID)) {
      intel->is_945 = true;
   }

   if (intel->gen >= 5) {
      intel->needs_ff_sync = true;
   }

   intel->has_separate_stencil = intel->intelScreen->hw_has_separate_stencil;
   intel->must_use_separate_stencil = intel->intelScreen->hw_must_use_separate_stencil;
   intel->has_hiz = intel->gen >= 6 && !intel->is_haswell;
   intel->has_llc = intel->intelScreen->hw_has_llc;
   intel->has_swizzling = intel->intelScreen->hw_has_swizzling;

   memset(&ctx->TextureFormatSupported,
	  0, sizeof(ctx->TextureFormatSupported));

   driParseConfigFiles(&intel->optionCache, &intelScreen->optionCache,
                       sPriv->myNum, (intel->gen >= 4) ? "i965" : "i915");
   if (intel->gen < 4)
      intel->maxBatchSize = 4096;
   else
      intel->maxBatchSize = sizeof(intel->batch.map);

   intel->bufmgr = intelScreen->bufmgr;

   bo_reuse_mode = driQueryOptioni(&intel->optionCache, "bo_reuse");
   switch (bo_reuse_mode) {
   case DRI_CONF_BO_REUSE_DISABLED:
      break;
   case DRI_CONF_BO_REUSE_ALL:
      intel_bufmgr_gem_enable_reuse(intel->bufmgr);
      break;
   }

   ctx->Const.MinLineWidth = 1.0;
   ctx->Const.MinLineWidthAA = 1.0;
   ctx->Const.MaxLineWidth = 5.0;
   ctx->Const.MaxLineWidthAA = 5.0;
   ctx->Const.LineWidthGranularity = 0.5;

   ctx->Const.MinPointSize = 1.0;
   ctx->Const.MinPointSizeAA = 1.0;
   ctx->Const.MaxPointSize = 255.0;
   ctx->Const.MaxPointSizeAA = 3.0;
   ctx->Const.PointSizeGranularity = 1.0;

   ctx->Const.MaxSamples = 1.0;

   if (intel->gen >= 6)
      ctx->Const.MaxClipPlanes = 8;

   ctx->Const.StripTextureBorder = GL_TRUE;

   /* reinitialize the context point state.
    * It depend on constants in __struct gl_contextRec::Const
    */
   _mesa_init_point(ctx);

   if (intel->gen >= 4) {
      ctx->Const.MaxRenderbufferSize = 8192;
   } else {
      ctx->Const.MaxRenderbufferSize = 2048;
   }

   /* Initialize the software rasterizer and helper modules.
    *
    * As of GL 3.1 core, the gen4+ driver doesn't need the swrast context for
    * software fallbacks (which we have to support on legacy GL to do weird
    * glDrawPixels(), glBitmap(), and other functions).
    */
   if (intel->gen <= 3 || api != API_OPENGL_CORE) {
      _swrast_CreateContext(ctx);
   }

   _vbo_CreateContext(ctx);
   if (ctx->swrast_context) {
      _tnl_CreateContext(ctx);
      _swsetup_CreateContext(ctx);

      /* Configure swrast to match hardware characteristics: */
      _swrast_allow_pixel_fog(ctx, false);
      _swrast_allow_vertex_fog(ctx, true);
   }

   _mesa_meta_init(ctx);

   intel->hw_stencil = mesaVis->stencilBits && mesaVis->depthBits == 24;
   intel->hw_stipple = 1;

   /* XXX FBO: this doesn't seem to be used anywhere */
   switch (mesaVis->depthBits) {
   case 0:                     /* what to do in this case? */
   case 16:
      intel->polygon_offset_scale = 1.0;
      break;
   case 24:
      intel->polygon_offset_scale = 2.0;     /* req'd to pass glean */
      break;
   default:
      assert(0);
      break;
   }

   if (intel->gen >= 4)
      intel->polygon_offset_scale /= 0xffff;

   intel->RenderIndex = ~0;

   intelInitExtensions(ctx);

   INTEL_DEBUG = driParseDebugString(getenv("INTEL_DEBUG"), debug_control);
   if (INTEL_DEBUG & DEBUG_BUFMGR)
      dri_bufmgr_set_debug(intel->bufmgr, true);
   if ((INTEL_DEBUG & DEBUG_SHADER_TIME) && intel->gen < 7) {
      fprintf(stderr,
              "shader_time debugging requires gen7 (Ivybridge) or better.\n");
      INTEL_DEBUG &= ~DEBUG_SHADER_TIME;
   }

   if (INTEL_DEBUG & DEBUG_AUB)
      drm_intel_bufmgr_gem_set_aub_dump(intel->bufmgr, true);

   intel_batchbuffer_init(intel);

   intel_fbo_init(intel);

   intel->use_texture_tiling = driQueryOptionb(&intel->optionCache,
					       "texture_tiling");
   intel->use_early_z = driQueryOptionb(&intel->optionCache, "early_z");

   if (!driQueryOptionb(&intel->optionCache, "hiz")) {
       intel->has_hiz = false;
       /* On gen6, you can only do separate stencil with HIZ. */
       if (intel->gen == 6)
	  intel->has_separate_stencil = false;
   }

   intel->prim.primitive = ~0;

   /* Force all software fallbacks */
#ifdef I915
   if (driQueryOptionb(&intel->optionCache, "no_rast")) {
      fprintf(stderr, "disabling 3D rasterization\n");
      intel->no_rast = 1;
   }
#endif

   if (driQueryOptionb(&intel->optionCache, "always_flush_batch")) {
      fprintf(stderr, "flushing batchbuffer before/after each draw call\n");
      intel->always_flush_batch = 1;
   }

   if (driQueryOptionb(&intel->optionCache, "always_flush_cache")) {
      fprintf(stderr, "flushing GPU caches before/after each draw call\n");
      intel->always_flush_cache = 1;
   }

   return true;
}
Пример #4
0
bool
brwCreateContext(int api,
	         const struct gl_config *mesaVis,
		 __DRIcontext *driContextPriv,
                 unsigned major_version,
                 unsigned minor_version,
                 uint32_t flags,
                 unsigned *error,
	         void *sharedContextPrivate)
{
   __DRIscreen *sPriv = driContextPriv->driScreenPriv;
   struct intel_screen *screen = sPriv->driverPrivate;
   struct dd_function_table functions;

   struct brw_context *brw = rzalloc(NULL, struct brw_context);
   if (!brw) {
      printf("%s: failed to alloc context\n", __FUNCTION__);
      *error = __DRI_CTX_ERROR_NO_MEMORY;
      return false;
   }

   /* brwInitVtbl needs to know the chipset generation so that it can set the
    * right pointers.
    */
   brw->gen = screen->gen;

   brwInitVtbl( brw );

   brwInitDriverFunctions(screen, &functions);

   struct gl_context *ctx = &brw->ctx;

   if (!intelInitContext( brw, api, major_version, minor_version,
                          mesaVis, driContextPriv,
			  sharedContextPrivate, &functions,
			  error)) {
      ralloc_free(brw);
      return false;
   }

   brw_initialize_context_constants(brw);

   /* Reinitialize the context point state.  It depends on ctx->Const values. */
   _mesa_init_point(ctx);

   if (brw->gen >= 6) {
      /* Create a new hardware context.  Using a hardware context means that
       * our GPU state will be saved/restored on context switch, allowing us
       * to assume that the GPU is in the same state we left it in.
       *
       * This is required for transform feedback buffer offsets, query objects,
       * and also allows us to reduce how much state we have to emit.
       */
      brw->hw_ctx = drm_intel_gem_context_create(brw->bufmgr);

      if (!brw->hw_ctx) {
         fprintf(stderr, "Gen6+ requires Kernel 3.6 or later.\n");
         ralloc_free(brw);
         return false;
      }
   }

   brw_init_surface_formats(brw);

   /* Initialize swrast, tnl driver tables: */
   TNLcontext *tnl = TNL_CONTEXT(ctx);
   if (tnl)
      tnl->Driver.RunPipeline = _tnl_run_pipeline;

   ctx->DriverFlags.NewTransformFeedback = BRW_NEW_TRANSFORM_FEEDBACK;
   ctx->DriverFlags.NewRasterizerDiscard = BRW_NEW_RASTERIZER_DISCARD;
   ctx->DriverFlags.NewUniformBuffer = BRW_NEW_UNIFORM_BUFFER;

   if (brw->is_g4x || brw->gen >= 5) {
      brw->CMD_VF_STATISTICS = GM45_3DSTATE_VF_STATISTICS;
      brw->CMD_PIPELINE_SELECT = CMD_PIPELINE_SELECT_GM45;
      brw->has_surface_tile_offset = true;
      if (brw->gen < 6)
	  brw->has_compr4 = true;
      brw->has_aa_line_parameters = true;
      brw->has_pln = true;
  } else {
      brw->CMD_VF_STATISTICS = GEN4_3DSTATE_VF_STATISTICS;
      brw->CMD_PIPELINE_SELECT = CMD_PIPELINE_SELECT_965;
   }

   /* WM maximum threads is number of EUs times number of threads per EU. */
   assert(brw->gen <= 7);

   if (brw->is_haswell) {
      if (brw->gt == 1) {
	 brw->max_wm_threads = 102;
	 brw->max_vs_threads = 70;
	 brw->max_gs_threads = 70;
	 brw->urb.size = 128;
         brw->urb.min_vs_entries = 32;
	 brw->urb.max_vs_entries = 640;
	 brw->urb.max_gs_entries = 256;
      } else if (brw->gt == 2) {
	 brw->max_wm_threads = 204;
	 brw->max_vs_threads = 280;
	 brw->max_gs_threads = 256;
	 brw->urb.size = 256;
         brw->urb.min_vs_entries = 64;
	 brw->urb.max_vs_entries = 1664;
	 brw->urb.max_gs_entries = 640;
      } else if (brw->gt == 3) {
	 brw->max_wm_threads = 408;
	 brw->max_vs_threads = 280;
	 brw->max_gs_threads = 256;
	 brw->urb.size = 512;
         brw->urb.min_vs_entries = 64;
	 brw->urb.max_vs_entries = 1664;
	 brw->urb.max_gs_entries = 640;
      }
   } else if (brw->gen == 7) {
      if (brw->gt == 1) {
	 brw->max_wm_threads = 48;
	 brw->max_vs_threads = 36;
	 brw->max_gs_threads = 36;
	 brw->urb.size = 128;
         brw->urb.min_vs_entries = 32;
	 brw->urb.max_vs_entries = 512;
	 brw->urb.max_gs_entries = 192;
      } else if (brw->gt == 2) {
	 brw->max_wm_threads = 172;
	 brw->max_vs_threads = 128;
	 brw->max_gs_threads = 128;
	 brw->urb.size = 256;
         brw->urb.min_vs_entries = 32;
	 brw->urb.max_vs_entries = 704;
	 brw->urb.max_gs_entries = 320;
      } else {
	 assert(!"Unknown gen7 device.");
      }
   } else if (brw->gen == 6) {
      if (brw->gt == 2) {
	 brw->max_wm_threads = 80;
	 brw->max_vs_threads = 60;
	 brw->max_gs_threads = 60;
	 brw->urb.size = 64;            /* volume 5c.5 section 5.1 */
         brw->urb.min_vs_entries = 24;
	 brw->urb.max_vs_entries = 256; /* volume 2a (see 3DSTATE_URB) */
	 brw->urb.max_gs_entries = 256;
      } else {
	 brw->max_wm_threads = 40;
	 brw->max_vs_threads = 24;
	 brw->max_gs_threads = 21; /* conservative; 24 if rendering disabled */
	 brw->urb.size = 32;            /* volume 5c.5 section 5.1 */
         brw->urb.min_vs_entries = 24;
	 brw->urb.max_vs_entries = 256; /* volume 2a (see 3DSTATE_URB) */
	 brw->urb.max_gs_entries = 256;
      }
      brw->urb.gen6_gs_previously_active = false;
   } else if (brw->gen == 5) {
      brw->urb.size = 1024;
      brw->max_vs_threads = 72;
      brw->max_gs_threads = 32;
      brw->max_wm_threads = 12 * 6;
   } else if (brw->is_g4x) {
      brw->urb.size = 384;
      brw->max_vs_threads = 32;
      brw->max_gs_threads = 2;
      brw->max_wm_threads = 10 * 5;
   } else if (brw->gen < 6) {
      brw->urb.size = 256;
      brw->max_vs_threads = 16;
      brw->max_gs_threads = 2;
      brw->max_wm_threads = 8 * 4;
      brw->has_negative_rhw_bug = true;
   }

   if (brw->gen <= 7) {
      brw->needs_unlit_centroid_workaround = true;
   }

   brw->prim_restart.in_progress = false;
   brw->prim_restart.enable_cut_index = false;

   brw_init_state( brw );

   if (brw->gen < 6) {
      brw->curbe.last_buf = calloc(1, 4096);
      brw->curbe.next_buf = calloc(1, 4096);
   }

   brw->state.dirty.mesa = ~0;
   brw->state.dirty.brw = ~0;

   /* Make sure that brw->state.dirty.brw has enough bits to hold all possible
    * dirty flags.
    */
   STATIC_ASSERT(BRW_NUM_STATE_BITS <= 8 * sizeof(brw->state.dirty.brw));

   brw->emit_state_always = 0;

   brw->batch.need_workaround_flush = true;

   ctx->VertexProgram._MaintainTnlProgram = true;
   ctx->FragmentProgram._MaintainTexEnvProgram = true;

   brw_draw_init( brw );

   brw->precompile = driQueryOptionb(&brw->optionCache, "shader_precompile");

   ctx->Const.ContextFlags = 0;
   if ((flags & __DRI_CTX_FLAG_FORWARD_COMPATIBLE) != 0)
      ctx->Const.ContextFlags |= GL_CONTEXT_FLAG_FORWARD_COMPATIBLE_BIT;

   ctx->Debug.DebugOutput = GL_FALSE;
   if ((flags & __DRI_CTX_FLAG_DEBUG) != 0) {
      ctx->Const.ContextFlags |= GL_CONTEXT_FLAG_DEBUG_BIT;
      ctx->Debug.DebugOutput = GL_TRUE;

      /* Turn on some extra GL_ARB_debug_output generation. */
      brw->perf_debug = true;
   }

   brw_fs_alloc_reg_sets(brw);
   brw_vec4_alloc_reg_set(brw);

   if (INTEL_DEBUG & DEBUG_SHADER_TIME)
      brw_init_shader_time(brw);

   _mesa_compute_version(ctx);

   _mesa_initialize_dispatch_tables(ctx);
   _mesa_initialize_vbo_vtxfmt(ctx);

   return true;
}
Пример #5
0
GLboolean
brwCreateContext(gl_api api,
	         const struct gl_config *mesaVis,
		 __DRIcontext *driContextPriv,
                 unsigned major_version,
                 unsigned minor_version,
                 uint32_t flags,
                 bool notify_reset,
                 unsigned *dri_ctx_error,
	         void *sharedContextPrivate)
{
   __DRIscreen *sPriv = driContextPriv->driScreenPriv;
   struct gl_context *shareCtx = (struct gl_context *) sharedContextPrivate;
   struct intel_screen *screen = sPriv->driverPrivate;
   const struct brw_device_info *devinfo = screen->devinfo;
   struct dd_function_table functions;

   /* Only allow the __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS flag if the kernel
    * provides us with context reset notifications.
    */
   uint32_t allowed_flags = __DRI_CTX_FLAG_DEBUG
      | __DRI_CTX_FLAG_FORWARD_COMPATIBLE;

   if (screen->has_context_reset_notification)
      allowed_flags |= __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS;

   if (flags & ~allowed_flags) {
      *dri_ctx_error = __DRI_CTX_ERROR_UNKNOWN_FLAG;
      return false;
   }

   struct brw_context *brw = rzalloc(NULL, struct brw_context);
   if (!brw) {
      fprintf(stderr, "%s: failed to alloc context\n", __FUNCTION__);
      *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
      return false;
   }

   driContextPriv->driverPrivate = brw;
   brw->driContext = driContextPriv;
   brw->intelScreen = screen;
   brw->bufmgr = screen->bufmgr;

   brw->gen = devinfo->gen;
   brw->gt = devinfo->gt;
   brw->is_g4x = devinfo->is_g4x;
   brw->is_baytrail = devinfo->is_baytrail;
   brw->is_haswell = devinfo->is_haswell;
   brw->has_llc = devinfo->has_llc;
   brw->has_hiz = devinfo->has_hiz_and_separate_stencil;
   brw->has_separate_stencil = devinfo->has_hiz_and_separate_stencil;
   brw->has_pln = devinfo->has_pln;
   brw->has_compr4 = devinfo->has_compr4;
   brw->has_surface_tile_offset = devinfo->has_surface_tile_offset;
   brw->has_negative_rhw_bug = devinfo->has_negative_rhw_bug;
   brw->needs_unlit_centroid_workaround =
      devinfo->needs_unlit_centroid_workaround;

   brw->must_use_separate_stencil = screen->hw_must_use_separate_stencil;
   brw->has_swizzling = screen->hw_has_swizzling;

   brw->vs.base.stage = MESA_SHADER_VERTEX;
   brw->gs.base.stage = MESA_SHADER_GEOMETRY;
   brw->wm.base.stage = MESA_SHADER_FRAGMENT;
   if (brw->gen >= 8) {
      gen8_init_vtable_surface_functions(brw);
      gen7_init_vtable_sampler_functions(brw);
      brw->vtbl.emit_depth_stencil_hiz = gen8_emit_depth_stencil_hiz;
   } else if (brw->gen >= 7) {
      gen7_init_vtable_surface_functions(brw);
      gen7_init_vtable_sampler_functions(brw);
      brw->vtbl.emit_depth_stencil_hiz = gen7_emit_depth_stencil_hiz;
   } else {
      gen4_init_vtable_surface_functions(brw);
      gen4_init_vtable_sampler_functions(brw);
      brw->vtbl.emit_depth_stencil_hiz = brw_emit_depth_stencil_hiz;
   }

   brw_init_driver_functions(brw, &functions);

   if (notify_reset)
      functions.GetGraphicsResetStatus = brw_get_graphics_reset_status;

   struct gl_context *ctx = &brw->ctx;

   if (!_mesa_initialize_context(ctx, api, mesaVis, shareCtx, &functions)) {
      *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
      fprintf(stderr, "%s: failed to init mesa context\n", __FUNCTION__);
      intelDestroyContext(driContextPriv);
      return false;
   }

   driContextSetFlags(ctx, flags);

   /* Initialize the software rasterizer and helper modules.
    *
    * As of GL 3.1 core, the gen4+ driver doesn't need the swrast context for
    * software fallbacks (which we have to support on legacy GL to do weird
    * glDrawPixels(), glBitmap(), and other functions).
    */
   if (api != API_OPENGL_CORE && api != API_OPENGLES2) {
      _swrast_CreateContext(ctx);
   }

   _vbo_CreateContext(ctx);
   if (ctx->swrast_context) {
      _tnl_CreateContext(ctx);
      TNL_CONTEXT(ctx)->Driver.RunPipeline = _tnl_run_pipeline;
      _swsetup_CreateContext(ctx);

      /* Configure swrast to match hardware characteristics: */
      _swrast_allow_pixel_fog(ctx, false);
      _swrast_allow_vertex_fog(ctx, true);
   }

   _mesa_meta_init(ctx);

   brw_process_driconf_options(brw);
   brw_process_intel_debug_variable(brw);
   brw_initialize_context_constants(brw);

   ctx->Const.ResetStrategy = notify_reset
      ? GL_LOSE_CONTEXT_ON_RESET_ARB : GL_NO_RESET_NOTIFICATION_ARB;

   /* Reinitialize the context point state.  It depends on ctx->Const values. */
   _mesa_init_point(ctx);

   intel_fbo_init(brw);

   intel_batchbuffer_init(brw);

   if (brw->gen >= 6) {
      /* Create a new hardware context.  Using a hardware context means that
       * our GPU state will be saved/restored on context switch, allowing us
       * to assume that the GPU is in the same state we left it in.
       *
       * This is required for transform feedback buffer offsets, query objects,
       * and also allows us to reduce how much state we have to emit.
       */
      brw->hw_ctx = drm_intel_gem_context_create(brw->bufmgr);

      if (!brw->hw_ctx) {
         fprintf(stderr, "Gen6+ requires Kernel 3.6 or later.\n");
         intelDestroyContext(driContextPriv);
         return false;
      }
   }

   brw_init_state(brw);

   intelInitExtensions(ctx);

   brw_init_surface_formats(brw);

   brw->max_vs_threads = devinfo->max_vs_threads;
   brw->max_gs_threads = devinfo->max_gs_threads;
   brw->max_wm_threads = devinfo->max_wm_threads;
   brw->urb.size = devinfo->urb.size;
   brw->urb.min_vs_entries = devinfo->urb.min_vs_entries;
   brw->urb.max_vs_entries = devinfo->urb.max_vs_entries;
   brw->urb.max_gs_entries = devinfo->urb.max_gs_entries;

   /* Estimate the size of the mappable aperture into the GTT.  There's an
    * ioctl to get the whole GTT size, but not one to get the mappable subset.
    * It turns out it's basically always 256MB, though some ancient hardware
    * was smaller.
    */
   uint32_t gtt_size = 256 * 1024 * 1024;

   /* We don't want to map two objects such that a memcpy between them would
    * just fault one mapping in and then the other over and over forever.  So
    * we would need to divide the GTT size by 2.  Additionally, some GTT is
    * taken up by things like the framebuffer and the ringbuffer and such, so
    * be more conservative.
    */
   brw->max_gtt_map_object_size = gtt_size / 4;

   if (brw->gen == 6)
      brw->urb.gen6_gs_previously_active = false;

   brw->prim_restart.in_progress = false;
   brw->prim_restart.enable_cut_index = false;
   brw->gs.enabled = false;

   if (brw->gen < 6) {
      brw->curbe.last_buf = calloc(1, 4096);
      brw->curbe.next_buf = calloc(1, 4096);
   }

   ctx->VertexProgram._MaintainTnlProgram = true;
   ctx->FragmentProgram._MaintainTexEnvProgram = true;

   brw_draw_init( brw );

   if ((flags & __DRI_CTX_FLAG_DEBUG) != 0) {
      /* Turn on some extra GL_ARB_debug_output generation. */
      brw->perf_debug = true;
   }

   if ((flags & __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS) != 0)
      ctx->Const.ContextFlags |= GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT_ARB;

   if (INTEL_DEBUG & DEBUG_SHADER_TIME)
      brw_init_shader_time(brw);

   _mesa_compute_version(ctx);

   _mesa_initialize_dispatch_tables(ctx);
   _mesa_initialize_vbo_vtxfmt(ctx);

   if (ctx->Extensions.AMD_performance_monitor) {
      brw_init_performance_monitors(brw);
   }

   vbo_use_buffer_objects(ctx);
   vbo_always_unmap_buffers(ctx);

   return true;
}
bool
intelInitContext(struct intel_context *intel,
                 int api,
                 unsigned major_version,
                 unsigned minor_version,
                 uint32_t flags,
                 const struct gl_config * mesaVis,
                 __DRIcontext * driContextPriv,
                 void *sharedContextPrivate,
                 struct dd_function_table *functions,
                 unsigned *dri_ctx_error)
{
   struct gl_context *ctx = &intel->ctx;
   struct gl_context *shareCtx = (struct gl_context *) sharedContextPrivate;
   __DRIscreen *sPriv = driContextPriv->driScreenPriv;
   struct intel_screen *intelScreen = sPriv->driverPrivate;
   int bo_reuse_mode;

   /* Can't rely on invalidate events, fall back to glViewport hack */
   if (!driContextPriv->driScreenPriv->dri2.useInvalidate)
      functions->Viewport = intel_noninvalidate_viewport;
   else
      functions->Viewport = intel_viewport;

   intel->intelScreen = intelScreen;

   if (!_mesa_initialize_context(&intel->ctx, api, mesaVis, shareCtx,
                                 functions)) {
      *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
      printf("%s: failed to init mesa context\n", __func__);
      return false;
   }

   driContextSetFlags(&intel->ctx, flags);

   driContextPriv->driverPrivate = intel;
   intel->driContext = driContextPriv;

   intel->gen = intelScreen->gen;

   const int devID = intelScreen->deviceID;

   intel->is_945 = IS_945(devID);

   intel->has_swizzling = intel->intelScreen->hw_has_swizzling;

   memset(&ctx->TextureFormatSupported,
	  0, sizeof(ctx->TextureFormatSupported));

   driParseConfigFiles(&intel->optionCache, &intelScreen->optionCache,
                       sPriv->myNum, "i915");
   intel->maxBatchSize = 4096;

   /* Estimate the size of the mappable aperture into the GTT.  There's an
    * ioctl to get the whole GTT size, but not one to get the mappable subset.
    * It turns out it's basically always 256MB, though some ancient hardware
    * was smaller.
    */
   uint32_t gtt_size = 256 * 1024 * 1024;
   if (intel->gen == 2)
      gtt_size = 128 * 1024 * 1024;

   /* We don't want to map two objects such that a memcpy between them would
    * just fault one mapping in and then the other over and over forever.  So
    * we would need to divide the GTT size by 2.  Additionally, some GTT is
    * taken up by things like the framebuffer and the ringbuffer and such, so
    * be more conservative.
    */
   intel->max_gtt_map_object_size = gtt_size / 4;

   intel->bufmgr = intelScreen->bufmgr;

   bo_reuse_mode = driQueryOptioni(&intel->optionCache, "bo_reuse");
   switch (bo_reuse_mode) {
   case DRI_CONF_BO_REUSE_DISABLED:
      break;
   case DRI_CONF_BO_REUSE_ALL:
      intel_bufmgr_gem_enable_reuse(intel->bufmgr);
      break;
   }

   ctx->Const.MinLineWidth = 1.0;
   ctx->Const.MinLineWidthAA = 1.0;
   ctx->Const.MaxLineWidth = 7.0;
   ctx->Const.MaxLineWidthAA = 7.0;
   ctx->Const.LineWidthGranularity = 0.5;

   ctx->Const.MinPointSize = 1.0;
   ctx->Const.MinPointSizeAA = 1.0;
   ctx->Const.MaxPointSize = 255.0;
   ctx->Const.MaxPointSizeAA = 3.0;
   ctx->Const.PointSizeGranularity = 1.0;

   ctx->Const.StripTextureBorder = GL_TRUE;

   /* reinitialize the context point state.
    * It depend on constants in __struct gl_contextRec::Const
    */
   _mesa_init_point(ctx);

   ctx->Const.MaxRenderbufferSize = 2048;

   _swrast_CreateContext(ctx);
   _vbo_CreateContext(ctx);
   if (ctx->swrast_context) {
      _tnl_CreateContext(ctx);
      _swsetup_CreateContext(ctx);

      /* Configure swrast to match hardware characteristics: */
      _swrast_allow_pixel_fog(ctx, false);
      _swrast_allow_vertex_fog(ctx, true);
   }

   _mesa_meta_init(ctx);

   intel->hw_stipple = 1;

   intel->RenderIndex = ~0;

   intelInitExtensions(ctx);

   INTEL_DEBUG = parse_debug_string(getenv("INTEL_DEBUG"), debug_control);
   if (INTEL_DEBUG & DEBUG_BUFMGR)
      dri_bufmgr_set_debug(intel->bufmgr, true);
   if (INTEL_DEBUG & DEBUG_PERF)
      intel->perf_debug = true;

   if (INTEL_DEBUG & DEBUG_AUB)
      drm_intel_bufmgr_gem_set_aub_dump(intel->bufmgr, true);

   intel_batchbuffer_init(intel);

   intel_fbo_init(intel);

   intel->use_early_z = driQueryOptionb(&intel->optionCache, "early_z");

   intel->prim.primitive = ~0;

   /* Force all software fallbacks */
   if (driQueryOptionb(&intel->optionCache, "no_rast")) {
      fprintf(stderr, "disabling 3D rasterization\n");
      intel->no_rast = 1;
   }

   if (driQueryOptionb(&intel->optionCache, "always_flush_batch")) {
      fprintf(stderr, "flushing batchbuffer before/after each draw call\n");
      intel->always_flush_batch = 1;
   }

   if (driQueryOptionb(&intel->optionCache, "always_flush_cache")) {
      fprintf(stderr, "flushing GPU caches before/after each draw call\n");
      intel->always_flush_cache = 1;
   }

   if (driQueryOptionb(&intel->optionCache, "disable_throttling")) {
      fprintf(stderr, "disabling flush throttling\n");
      intel->disable_throttling = 1;
   }

   return true;
}