Exemplo n.º 1
0
static void
intel_viewport(struct gl_context *ctx, GLint x, GLint y, GLsizei w, GLsizei h)
{
   struct brw_context *brw = brw_context(ctx);
   __DRIcontext *driContext = brw->driContext;

   (void) x;
   (void) y;
   (void) w;
   (void) h;

   if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
      dri2InvalidateDrawable(driContext->driDrawablePriv);
      dri2InvalidateDrawable(driContext->driReadablePriv);
   }
}
Exemplo n.º 2
0
/**
 * Check if the hardware's cut index support can handle the primitive
 * restart case.
 */
static bool
can_cut_index_handle_prims(struct gl_context *ctx,
                           const struct _mesa_prim *prim,
                           GLuint nr_prims,
                           const struct _mesa_index_buffer *ib)
{
   struct brw_context *brw = brw_context(ctx);

   if (brw->sol.counting_primitives_generated ||
       brw->sol.counting_primitives_written) {
      /* Counting primitives generated in hardware is not currently
       * supported, so take the software path. We need to investigate
       * the *_PRIMITIVES_COUNT registers to allow this to be handled
       * entirely in hardware.
       */
      return false;
   }

   if (!can_cut_index_handle_restart_index(ctx, ib)) {
      /* The primitive restart index can't be handled, so take
       * the software path
       */
      return false;
   }

   for ( ; nr_prims > 0; nr_prims--) {
      switch(prim->mode) {
      case GL_POINTS:
      case GL_LINES:
      case GL_LINE_STRIP:
      case GL_TRIANGLES:
      case GL_TRIANGLE_STRIP:
         /* Cut index supports these primitive types */
         break;
      default:
         /* Cut index does not support these primitive types */
      //case GL_LINE_LOOP:
      //case GL_TRIANGLE_FAN:
      //case GL_QUADS:
      //case GL_QUAD_STRIP:
      //case GL_POLYGON:
         return false;
      }
   }

   return true;
}
Exemplo n.º 3
0
bool
brw_fs_precompile(struct gl_context *ctx,
                  struct gl_shader_program *shader_prog,
                  struct gl_program *prog)
{
   struct brw_context *brw = brw_context(ctx);
   struct brw_wm_prog_key key;

   struct gl_fragment_program *fp = (struct gl_fragment_program *) prog;
   struct brw_fragment_program *bfp = brw_fragment_program(fp);

   memset(&key, 0, sizeof(key));

   if (brw->gen < 6) {
      if (fp->UsesKill)
         key.iz_lookup |= IZ_PS_KILL_ALPHATEST_BIT;

      if (fp->Base.OutputsWritten & BITFIELD64_BIT(FRAG_RESULT_DEPTH))
         key.iz_lookup |= IZ_PS_COMPUTES_DEPTH_BIT;

      /* Just assume depth testing. */
      key.iz_lookup |= IZ_DEPTH_TEST_ENABLE_BIT;
      key.iz_lookup |= IZ_DEPTH_WRITE_ENABLE_BIT;
   }

   if (brw->gen < 6 || _mesa_bitcount_64(fp->Base.InputsRead &
                                         BRW_FS_VARYING_INPUT_MASK) > 16)
      key.input_slots_valid = fp->Base.InputsRead | VARYING_BIT_POS;

   brw_setup_tex_for_precompile(brw, &key.tex, &fp->Base);

   key.nr_color_regions = _mesa_bitcount_64(fp->Base.OutputsWritten &
         ~(BITFIELD64_BIT(FRAG_RESULT_DEPTH) |
         BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK)));

   key.program_string_id = bfp->id;

   uint32_t old_prog_offset = brw->wm.base.prog_offset;
   struct brw_wm_prog_data *old_prog_data = brw->wm.prog_data;

   bool success = brw_codegen_wm_prog(brw, shader_prog, bfp, &key);

   brw->wm.base.prog_offset = old_prog_offset;
   brw->wm.prog_data = old_prog_data;

   return success;
}
Exemplo n.º 4
0
static void
intel_image_target_texture_2d(struct gl_context *ctx, GLenum target,
			      struct gl_texture_object *texObj,
			      struct gl_texture_image *texImage,
			      GLeglImageOES image_handle)
{
   struct brw_context *brw = brw_context(ctx);
   __DRIscreen *screen;
   __DRIimage *image;

   screen = brw->intelScreen->driScrnPriv;
   image = screen->dri2.image->lookupEGLImage(screen, image_handle,
					      screen->loaderPrivate);
   if (image == NULL)
      return;

   /* We support external textures only for EGLImages created with
    * EGL_EXT_image_dma_buf_import. We may lift that restriction in the future.
    */
   if (target == GL_TEXTURE_EXTERNAL_OES && !image->dma_buf_imported) {
      _mesa_error(ctx, GL_INVALID_OPERATION,
            "glEGLImageTargetTexture2DOES(external target is enabled only "
               "for images created with EGL_EXT_image_dma_buf_import");
      return;
   }

   /* Disallow depth/stencil textures: we don't have a way to pass the
    * separate stencil miptree of a GL_DEPTH_STENCIL texture through.
    */
   if (image->has_depthstencil) {
      _mesa_error(ctx, GL_INVALID_OPERATION, __func__);
      return;
   }

   /* Disable creation of the texture's aux buffers because the driver exposes
    * no EGL API to manage them. That is, there is no API for resolving the aux
    * buffer's content to the main buffer nor for invalidating the aux buffer's
    * content.
    */
   intel_set_texture_image_bo(ctx, texImage, image->bo,
                              target, image->internal_format,
                              image->format, image->offset,
                              image->width,  image->height,
                              image->pitch,
                              image->tile_x, image->tile_y,
                              MIPTREE_LAYOUT_DISABLE_AUX);
}
Exemplo n.º 5
0
static void
intel_image_target_renderbuffer_storage(struct gl_context *ctx,
					struct gl_renderbuffer *rb,
					void *image_handle)
{
   struct brw_context *brw = brw_context(ctx);
   struct intel_renderbuffer *irb;
   __DRIscreen *screen;
   __DRIimage *image;

   screen = brw->intelScreen->driScrnPriv;
   image = screen->dri2.image->lookupEGLImage(screen, image_handle,
					      screen->loaderPrivate);
   if (image == NULL)
      return;

   /* __DRIimage is opaque to the core so it has to be checked here */
   switch (image->format) {
   case MESA_FORMAT_RGBA8888_REV:
      _mesa_error(ctx, GL_INVALID_OPERATION,
            "glEGLImageTargetRenderbufferStorage(unsupported image format");
      return;
      break;
   default:
      break;
   }

   irb = intel_renderbuffer(rb);
   intel_miptree_release(&irb->mt);
   irb->mt = intel_miptree_create_for_bo(brw,
                                         image->region->bo,
                                         image->format,
                                         image->offset,
                                         image->region->width,
                                         image->region->height,
                                         image->region->pitch,
                                         image->region->tiling);
   if (!irb->mt)
      return;

   rb->InternalFormat = image->internal_format;
   rb->Width = image->region->width;
   rb->Height = image->region->height;
   rb->Format = image->format;
   rb->_BaseFormat = _mesa_base_fbo_format(ctx, image->internal_format);
   rb->NeedsFinishRenderTexture = true;
}
Exemplo n.º 6
0
static GLboolean
intel_texture_view(struct gl_context *ctx,
                   struct gl_texture_object *texObj,
                   struct gl_texture_object *origTexObj)
{
   struct brw_context *brw = brw_context(ctx);
   struct intel_texture_object *intel_tex = intel_texture_object(texObj);
   struct intel_texture_object *intel_orig_tex = intel_texture_object(origTexObj);

   assert(intel_orig_tex->mt);
   intel_miptree_reference(&intel_tex->mt, intel_orig_tex->mt);

   /* Since we can only make views of immutable-format textures,
    * we can assume that everything is in origTexObj's miptree.
    *
    * Mesa core has already made us a copy of all the teximage objects,
    * except it hasn't copied our mt pointers, etc.
    */
   const int numFaces = _mesa_num_tex_faces(texObj->Target);
   const int numLevels = texObj->NumLevels;

   int face;
   int level;

   for (face = 0; face < numFaces; face++) {
      for (level = 0; level < numLevels; level++) {
         struct gl_texture_image *image = texObj->Image[face][level];
         struct intel_texture_image *intel_image = intel_texture_image(image);

         intel_miptree_reference(&intel_image->mt, intel_orig_tex->mt);
      }
   }

   /* The miptree is in a validated state, so no need to check later. */
   intel_tex->needs_validate = false;
   intel_tex->validated_first_level = 0;
   intel_tex->validated_last_level = numLevels - 1;

   /* Set the validated texture format, with the same adjustments that
    * would have been applied to determine the underlying texture's
    * mt->format.
    */
   intel_tex->_Format = intel_depth_format_for_depthstencil_format(
         intel_lower_compressed_format(brw, texObj->Image[0][0]->TexFormat));

   return GL_TRUE;
}
Exemplo n.º 7
0
void brw_draw_prims( struct gl_context *ctx,
		     const struct _mesa_prim *prims,
		     GLuint nr_prims,
		     const struct _mesa_index_buffer *ib,
		     GLboolean index_bounds_valid,
		     GLuint min_index,
		     GLuint max_index,
		     struct gl_transform_feedback_object *tfb_vertcount )
{
   struct brw_context *brw = brw_context(ctx);
   const struct gl_client_array **arrays = ctx->Array._DrawArrays;

   if (!_mesa_check_conditional_render(ctx))
      return;

   /* Handle primitive restart if needed */
   if (brw_handle_primitive_restart(ctx, prims, nr_prims, ib)) {
      /* The draw was handled, so we can exit now */
      return;
   }

   /* If we're going to have to upload any of the user's vertex arrays, then
    * get the minimum and maximum of their index buffer so we know what range
    * to upload.
    */
   if (!vbo_all_varyings_in_vbos(arrays) && !index_bounds_valid)
      vbo_get_minmax_indices(ctx, prims, ib, &min_index, &max_index, nr_prims);

   /* Do GL_SELECT and GL_FEEDBACK rendering using swrast, even though it
    * won't support all the extensions we support.
    */
   if (ctx->RenderMode != GL_RENDER) {
      perf_debug("%s render mode not supported in hardware\n",
                 _mesa_lookup_enum_by_nr(ctx->RenderMode));
      _swsetup_Wakeup(ctx);
      _tnl_wakeup(ctx);
      _tnl_draw_prims(ctx, arrays, prims, nr_prims, ib, min_index, max_index);
      return;
   }

   /* Try drawing with the hardware, but don't do anything else if we can't
    * manage it.  swrast doesn't support our featureset, so we can't fall back
    * to it.
    */
   brw_try_draw_prims(ctx, arrays, prims, nr_prims, ib, min_index, max_index);
}
static void *brw_create_fs_state( struct pipe_context *pipe,
				  const struct pipe_shader_state *shader )
{
   struct brw_context *brw = brw_context(pipe);
   struct brw_fragment_shader *fs;
   int i;

   fs = CALLOC_STRUCT(brw_fragment_shader);
   if (fs == NULL)
      return NULL;

   /* Duplicate tokens, scan shader
    */
   fs->id = brw->program_id++;
   fs->has_flow_control = has_flow_control(&fs->info);

   fs->tokens = tgsi_dup_tokens(shader->tokens);
   if (fs->tokens == NULL)
      goto fail;

   tgsi_scan_shader(fs->tokens, &fs->info);
   scan_immediates(fs->tokens, &fs->info, &fs->immediates);

   fs->signature.nr_inputs = fs->info.num_inputs;
   for (i = 0; i < fs->info.num_inputs; i++) {
      fs->signature.input[i].interp = fs->info.input_interpolate[i];
      fs->signature.input[i].semantic = fs->info.input_semantic_name[i];
      fs->signature.input[i].semantic_index = fs->info.input_semantic_index[i];
   }

   for (i = 0; i < fs->info.num_inputs; i++)
      if (fs->info.input_semantic_name[i] == TGSI_SEMANTIC_POSITION)
	 fs->uses_depth = 1;

   if (fs->info.uses_kill)
      fs->iz_lookup |= IZ_PS_KILL_ALPHATEST_BIT;

   if (fs->info.writes_z)
      fs->iz_lookup |= IZ_PS_COMPUTES_DEPTH_BIT;

   return (void *)fs;

fail:
   FREE(fs);
   return NULL;
}
Exemplo n.º 9
0
/**
 * Called by ctx->Driver.Clear.
 */
static void
brw_clear(struct gl_context *ctx, GLbitfield mask)
{
   struct brw_context *brw = brw_context(ctx);
   struct intel_context *intel = &brw->intel;
   struct gl_framebuffer *fb = ctx->DrawBuffer;
   bool partial_clear = ctx->Scissor.Enabled && !noop_scissor(ctx, fb);

   if (!_mesa_check_conditional_render(ctx))
      return;

   if (mask & (BUFFER_BIT_FRONT_LEFT | BUFFER_BIT_FRONT_RIGHT)) {
      intel->front_buffer_dirty = true;
   }

   intel_prepare_render(intel);
   brw_workaround_depthstencil_alignment(brw, partial_clear ? 0 : mask);

   if (mask & BUFFER_BIT_DEPTH) {
      if (brw_fast_clear_depth(ctx)) {
	 DBG("fast clear: depth\n");
	 mask &= ~BUFFER_BIT_DEPTH;
      }
   }

   GLbitfield tri_mask = mask & (BUFFER_BITS_COLOR |
				 BUFFER_BIT_STENCIL |
				 BUFFER_BIT_DEPTH);

   if (tri_mask) {
      debug_mask("tri", tri_mask);
      mask &= ~tri_mask;

      if (ctx->API == API_OPENGLES) {
         _mesa_meta_Clear(&intel->ctx, tri_mask);
      } else {
         _mesa_meta_glsl_Clear(&intel->ctx, tri_mask);
      }
   }

   /* Any strange buffers get passed off to swrast */
   if (mask) {
      debug_mask("swrast", mask);
      _swrast_Clear(ctx, mask);
   }
}
Exemplo n.º 10
0
void
intelReadPixels(struct gl_context * ctx,
                GLint x, GLint y, GLsizei width, GLsizei height,
                GLenum format, GLenum type,
                const struct gl_pixelstore_attrib *pack, GLvoid * pixels)
{
   bool ok;

   struct brw_context *brw = brw_context(ctx);
   bool dirty;

   DBG("%s\n", __func__);

   /* Reading pixels wont dirty the front buffer, so reset the dirty
    * flag after calling intel_prepare_render().
    */
   dirty = brw->front_buffer_dirty;
   intel_prepare_render(brw);
   brw->front_buffer_dirty = dirty;

   if (_mesa_is_bufferobj(pack->BufferObj)) {
      if (intel_readpixels_blorp(ctx, x, y, width, height,
                                 format, type, pixels, pack))
         return;

      perf_debug("%s: fallback to CPU mapping in PBO case\n", __func__);
   }

   ok = intel_readpixels_tiled_memcpy(ctx, x, y, width, height,
                                      format, type, pixels, pack);
   if(ok)
      return;

   /* Update Mesa state before calling _mesa_readpixels().
    * XXX this may not be needed since ReadPixels no longer uses the
    * span code.
    */

   if (ctx->NewState)
      _mesa_update_state(ctx);

   _mesa_readpixels(ctx, x, y, width, height, format, type, pack, pixels);

   /* There's an intel_prepare_render() call in intelSpanRenderStart(). */
   brw->front_buffer_dirty = dirty;
}
Exemplo n.º 11
0
static void meta_depth_replace( struct intel_context *intel )
{
   struct brw_context *brw = brw_context(&intel->ctx);

   /* ctx->Driver.Enable( ctx, GL_DEPTH_TEST, GL_TRUE )
    * ctx->Driver.DepthMask( ctx, GL_TRUE )
    */
   brw->metaops.attribs.Depth->Test = GL_TRUE;
   brw->metaops.attribs.Depth->Mask = GL_TRUE;
   brw->state.dirty.mesa |= _NEW_DEPTH;

   /* ctx->Driver.DepthFunc( ctx, GL_ALWAYS )
    */
   brw->metaops.attribs.Depth->Func = GL_ALWAYS;

   brw->state.dirty.mesa |= _NEW_DEPTH;
}
Exemplo n.º 12
0
static const GLubyte *
intelGetString(struct gl_context * ctx, GLenum name)
{
   const struct brw_context *const brw = brw_context(ctx);

   switch (name) {
   case GL_VENDOR:
      return (GLubyte *) brw_vendor_string;

   case GL_RENDERER:
      return
         (GLubyte *) brw_get_renderer_string(brw->intelScreen->deviceID);

   default:
      return NULL;
   }
}
Exemplo n.º 13
0
/**
 * Check if the hardware's cut index support can handle the primitive
 * restart case.
 */
static bool
can_cut_index_handle_prims(struct gl_context *ctx,
                           const struct _mesa_prim *prim,
                           GLuint nr_prims,
                           const struct _mesa_index_buffer *ib)
{
   struct brw_context *brw = brw_context(ctx);

   /* Otherwise Haswell can do it all. */
   if (brw->gen >= 8 || brw->is_haswell)
      return true;

   if (!can_cut_index_handle_restart_index(ctx, ib)) {
      /* The primitive restart index can't be handled, so take
       * the software path
       */
      return false;
   }

   for (int i = 0; i < nr_prims; i++) {
      switch (prim[i].mode) {
      case GL_POINTS:
      case GL_LINES:
      case GL_LINE_STRIP:
      case GL_TRIANGLES:
      case GL_TRIANGLE_STRIP:
      case GL_LINES_ADJACENCY:
      case GL_LINE_STRIP_ADJACENCY:
      case GL_TRIANGLES_ADJACENCY:
      case GL_TRIANGLE_STRIP_ADJACENCY:
         /* Cut index supports these primitive types */
         break;
      default:
         /* Cut index does not support these primitive types */
      //case GL_LINE_LOOP:
      //case GL_TRIANGLE_FAN:
      //case GL_QUADS:
      //case GL_QUAD_STRIP:
      //case GL_POLYGON:
         return false;
      }
   }

   return true;
}
Exemplo n.º 14
0
static
void brw_update_texture_surface( GLcontext *ctx, 
				 GLuint unit,
				 struct brw_surface_state *surf )
{
   struct intel_context *intel = intel_context(ctx);
   struct brw_context *brw = brw_context(ctx);
   struct gl_texture_object *tObj = brw->attribs.Texture->Unit[unit]._Current;
   struct intel_texture_object *intelObj = intel_texture_object(tObj);
   struct gl_texture_image *firstImage = tObj->Image[0][intelObj->firstLevel];

   memset(surf, 0, sizeof(*surf));

   surf->ss0.mipmap_layout_mode = BRW_SURFACE_MIPMAPLAYOUT_BELOW;   
   surf->ss0.surface_type = translate_tex_target(tObj->Target);
   surf->ss0.surface_format = translate_tex_format(firstImage->TexFormat->MesaFormat, tObj->DepthMode);

   /* This is ok for all textures with channel width 8bit or less:
    */
/*    surf->ss0.data_return_format = BRW_SURFACERETURNFORMAT_S1; */

   /* BRW_NEW_LOCK */
   surf->ss1.base_addr = bmBufferOffset(intel,
					intelObj->mt->region->buffer);

   surf->ss2.mip_count = intelObj->lastLevel - intelObj->firstLevel;
   surf->ss2.width = firstImage->Width - 1;
   surf->ss2.height = firstImage->Height - 1;

   surf->ss3.tile_walk = BRW_TILEWALK_XMAJOR;
   surf->ss3.tiled_surface = intelObj->mt->region->tiled; /* always zero */
   surf->ss3.pitch = (intelObj->mt->pitch * intelObj->mt->cpp) - 1;
   surf->ss3.depth = firstImage->Depth - 1;

   surf->ss4.min_lod = 0;
 
   if (tObj->Target == GL_TEXTURE_CUBE_MAP) {
      surf->ss0.cube_pos_x = 1;
      surf->ss0.cube_pos_y = 1;
      surf->ss0.cube_pos_z = 1;
      surf->ss0.cube_neg_x = 1;
      surf->ss0.cube_neg_y = 1;
      surf->ss0.cube_neg_z = 1;
   }
}
Exemplo n.º 15
0
void
brw_begin_transform_feedback(struct gl_context *ctx, GLenum mode,
			     struct gl_transform_feedback_object *obj)
{
   struct brw_context *brw = brw_context(ctx);
   const struct gl_shader_program *vs_prog =
      ctx->Shader.CurrentVertexProgram;
   const struct gl_transform_feedback_info *linked_xfb_info =
      &vs_prog->LinkedTransformFeedback;
   struct gl_transform_feedback_object *xfb_obj =
      ctx->TransformFeedback.CurrentObject;

   assert(brw->gen == 6);

   /* Compute the maximum number of vertices that we can write without
    * overflowing any of the buffers currently being used for feedback.
    */
   unsigned max_index
      = _mesa_compute_max_transform_feedback_vertices(xfb_obj,
                                                      linked_xfb_info);

   /* 3DSTATE_GS_SVB_INDEX is non-pipelined. */
   intel_emit_post_sync_nonzero_flush(brw);

   /* Initialize the SVBI 0 register to zero and set the maximum index. */
   BEGIN_BATCH(4);
   OUT_BATCH(_3DSTATE_GS_SVB_INDEX << 16 | (4 - 2));
   OUT_BATCH(0); /* SVBI 0 */
   OUT_BATCH(0); /* starting index */
   OUT_BATCH(max_index);
   ADVANCE_BATCH();

   /* Initialize the rest of the unused streams to sane values.  Otherwise,
    * they may indicate that there is no room to write data and prevent
    * anything from happening at all.
    */
   for (int i = 1; i < 4; i++) {
      BEGIN_BATCH(4);
      OUT_BATCH(_3DSTATE_GS_SVB_INDEX << 16 | (4 - 2));
      OUT_BATCH(i << SVB_INDEX_SHIFT);
      OUT_BATCH(0); /* starting index */
      OUT_BATCH(0xffffffff);
      ADVANCE_BATCH();
   }
}
Exemplo n.º 16
0
/**
 * Query information about GPU resets observed by this context
 *
 * Called via \c dd_function_table::GetGraphicsResetStatus.
 */
GLenum
brw_get_graphics_reset_status(struct gl_context *ctx)
{
   struct brw_context *brw = brw_context(ctx);
   int err;
   uint32_t reset_count;
   uint32_t active;
   uint32_t pending;

   /* If hardware contexts are not being used (or
    * DRM_IOCTL_I915_GET_RESET_STATS is not supported), this function should
    * not be accessible.
    */
   assert(brw->hw_ctx != NULL);

#if 0
   /* This is waiting until the kernel code can be merged and a new libdrm
    * actually released.
    */
   err = drm_intel_get_reset_stats(brw->hw_ctx, &reset_count, &active,
                                   &pending);
   if (err)
      return GL_NO_ERROR;
#else
   return GL_NO_ERROR;
#endif

   /* A reset was observed while a batch from this context was executing.
    * Assume that this context was at fault.
    */
   if (active != 0)
      return GL_GUILTY_CONTEXT_RESET_ARB;

   /* A reset was observed while a batch from this context was in progress,
    * but the batch was not executing.  In this case, assume that the context
    * was not at fault.
    */
   if (pending != 0)
      return GL_INNOCENT_CONTEXT_RESET_ARB;

   /* FINISHME: Should we report anything if reset_count > brw->reset_count?
    */

   return GL_NO_ERROR;
}
Exemplo n.º 17
0
static void
intel_texture_barrier(struct gl_context *ctx)
{
   struct brw_context *brw = brw_context(ctx);
   const struct gen_device_info *devinfo = &brw->screen->devinfo;

   if (devinfo->gen >= 6) {
      brw_emit_pipe_control_flush(brw,
                                  PIPE_CONTROL_DEPTH_CACHE_FLUSH |
                                  PIPE_CONTROL_RENDER_TARGET_FLUSH |
                                  PIPE_CONTROL_CS_STALL);

      brw_emit_pipe_control_flush(brw,
                                  PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
   } else {
      brw_emit_mi_flush(brw);
   }
}
Exemplo n.º 18
0
static void brw_bind_fs_state( struct pipe_context *pipe, void *prog )
{
   struct brw_fragment_shader *fs = (struct brw_fragment_shader *)prog;
   struct brw_context *brw = brw_context(pipe);
   
   if (brw->curr.fragment_shader == fs)
      return;

   if (brw->curr.fragment_shader == NULL ||
       fs == NULL ||
       memcmp(&brw->curr.fragment_shader->signature, &fs->signature,
              brw_fs_signature_size(&fs->signature)) != 0) {
      brw->state.dirty.mesa |= PIPE_NEW_FRAGMENT_SIGNATURE;
   }

   brw->curr.fragment_shader = fs;
   brw->state.dirty.mesa |= PIPE_NEW_FRAGMENT_SHADER;
}
Exemplo n.º 19
0
static void
brw_gl_fence_sync(struct gl_context *ctx, struct gl_sync_object *_sync,
                  GLenum condition, GLbitfield flags)
{
   struct brw_context *brw = brw_context(ctx);
   struct brw_gl_sync *sync = (struct brw_gl_sync *) _sync;

   /* brw_fence_insert_locked() assumes it must do a complete flush */
   assert(condition == GL_SYNC_GPU_COMMANDS_COMPLETE);

   brw_fence_init(brw, &sync->fence, BRW_FENCE_TYPE_BO_WAIT);

   if (!brw_fence_insert_locked(brw, &sync->fence)) {
      /* FIXME: There exists no way to report a GL error here. If an error
       * occurs, continue silently and hope for the best.
       */
   }
}
Exemplo n.º 20
0
static void
brw_blend_barrier(struct gl_context *ctx)
{
   struct brw_context *brw = brw_context(ctx);

   if (!ctx->Extensions.MESA_shader_framebuffer_fetch) {
      if (brw->gen >= 6) {
         brw_emit_pipe_control_flush(brw,
                                     PIPE_CONTROL_RENDER_TARGET_FLUSH |
                                     PIPE_CONTROL_CS_STALL);
         brw_emit_pipe_control_flush(brw,
                                     PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
      } else {
         brw_emit_pipe_control_flush(brw,
                                     PIPE_CONTROL_RENDER_TARGET_FLUSH);
      }
   }
}
Exemplo n.º 21
0
static void
intelDRI2Flush(__DRIdrawable *drawable)
{
   GET_CURRENT_CONTEXT(ctx);
   struct brw_context *brw = brw_context(ctx);
   if (brw == NULL)
      return;

   intel_resolve_for_dri2_flush(brw, drawable);
   brw->need_throttle = true;

   if (brw->batch.used)
      intel_batchbuffer_flush(brw);

   if (INTEL_DEBUG & DEBUG_AUB) {
      aub_dump_bmp(ctx);
   }
}
Exemplo n.º 22
0
struct gl_transform_feedback_object *
brw_new_transform_feedback(struct gl_context *ctx, GLuint name)
{
   struct brw_context *brw = brw_context(ctx);
   struct brw_transform_feedback_object *brw_obj =
      CALLOC_STRUCT(brw_transform_feedback_object);
   if (!brw_obj)
      return NULL;

   _mesa_init_transform_feedback_object(&brw_obj->base, name);

   brw_obj->offset_bo =
      drm_intel_bo_alloc(brw->bufmgr, "transform feedback offsets", 16, 64);
   brw_obj->prim_count_bo =
      drm_intel_bo_alloc(brw->bufmgr, "xfb primitive counts", 4096, 64);

   return &brw_obj->base;
}
Exemplo n.º 23
0
/**
 * called from intelDestroyContext()
 */
static void brw_destroy_context( struct intel_context *intel )
{
   struct brw_context *brw = brw_context(&intel->ctx);

   brw_destroy_state(brw);
   brw_draw_destroy( brw );

   ralloc_free(brw->wm.compile_data);

   dri_bo_release(&brw->curbe.curbe_bo);
   dri_bo_release(&brw->vs.const_bo);
   dri_bo_release(&brw->wm.const_bo);

   free(brw->curbe.last_buf);
   free(brw->curbe.next_buf);

   drm_intel_gem_context_destroy(intel->hw_ctx);
}
Exemplo n.º 24
0
static struct gl_program *brwNewProgram( struct gl_context *ctx,
				      GLenum target,
				      GLuint id )
{
   struct brw_context *brw = brw_context(ctx);

   switch (target) {
   case GL_VERTEX_PROGRAM_ARB:
   case GL_TESS_CONTROL_PROGRAM_NV:
   case GL_TESS_EVALUATION_PROGRAM_NV:
   case GL_GEOMETRY_PROGRAM_NV:
   case GL_COMPUTE_PROGRAM_NV: {
      struct brw_program *prog = rzalloc(NULL, struct brw_program);
      if (prog) {
	 prog->id = get_new_program_id(brw->screen);

	 return _mesa_init_gl_program(&prog->program, target, id);
      }
      else
	 return NULL;
   }

   case GL_FRAGMENT_PROGRAM_ARB: {
      struct brw_program *prog;
      if (brw->gen < 6) {
         struct gen4_fragment_program *g4_prog =
            rzalloc(NULL, struct gen4_fragment_program);
         prog = &g4_prog->base;
      } else {
         prog = rzalloc(NULL, struct brw_program);
      }

      if (prog) {
	 prog->id = get_new_program_id(brw->screen);

	 return _mesa_init_gl_program(&prog->program, target, id);
      }
      else
	 return NULL;
   }

   default:
      unreachable("Unsupported target in brwNewProgram()");
   }
Exemplo n.º 25
0
/**
 * called from intelFlushBatchLocked
 */
static void brw_new_batch( struct intel_context *intel )
{
   struct brw_context *brw = brw_context(&intel->ctx);

   /* If the kernel supports hardware contexts, then most hardware state is
    * preserved between batches; we only need to re-emit state that is required
    * to be in every batch.  Otherwise we need to re-emit all the state that
    * would otherwise be stored in the context (which for all intents and
    * purposes means everything).
    */
   if (intel->hw_ctx == NULL)
      brw->state.dirty.brw |= BRW_NEW_CONTEXT;

   brw->state.dirty.brw |= BRW_NEW_BATCH;

   /* Assume that the last command before the start of our batch was a
    * primitive, for safety.
    */
   intel->batch.need_workaround_flush = true;

   brw->state_batch_count = 0;

   /* Gen7 needs to track what the real transform feedback vertex count was at
    * the start of the batch, since the kernel will be resetting the offset to
    * 0.
    */
   brw->sol.offset_0_batch_start = brw->sol.svbi_0_starting_index;

   brw->ib.type = -1;

   /* Mark that the current program cache BO has been used by the GPU.
    * It will be reallocated if we need to put new programs in for the
    * next batch.
    */
   brw->cache.bo_used_by_gpu = true;

   /* We need to periodically reap the shader time results, because rollover
    * happens every few seconds.  We also want to see results every once in a
    * while, because many programs won't cleanly destroy our context, so the
    * end-of-run printout may not happen.
    */
   if (INTEL_DEBUG & DEBUG_SHADER_TIME)
      brw_collect_and_report_shader_time(brw);
}
/* called from intelFlushBatchLocked
 */
static void brw_lost_hardware( struct intel_context *intel )
{
   struct brw_context *brw = brw_context(&intel->ctx);

   /* Note that we effectively lose the context after this.
    * 
    * Setting this flag provokes a state buffer wrap and also flushes
    * the hardware caches.
    */
   brw->state.dirty.brw |= BRW_NEW_CONTEXT;

   /* Which means there shouldn't be any commands already queued:
    */
   assert(intel->batch->ptr == intel->batch->map + intel->batch->offset);

   brw->state.dirty.mesa |= ~0;
   brw->state.dirty.brw |= ~0;
   brw->state.dirty.cache |= ~0;
}
Exemplo n.º 27
0
static void leave_meta_state( struct intel_context *intel )
{
   GLcontext *ctx = &intel->ctx;
   struct brw_context *brw = brw_context(ctx);

   restore_attribs(brw);

   ctx->DrawBuffer->_ColorDrawBufferMask[0] = brw->metaops.restore_draw_mask;
   ctx->FragmentProgram.Current = brw->metaops.restore_fp;

   brw->state.draw_region = brw->metaops.saved_draw_region;
   brw->state.depth_region = brw->metaops.saved_depth_region;
   brw->metaops.saved_draw_region = NULL;
   brw->metaops.saved_depth_region = NULL;
   brw->metaops.active = 0;

   brw->state.dirty.mesa |= _NEW_BUFFERS;
   brw->state.dirty.brw |= BRW_NEW_METAOPS;
}
Exemplo n.º 28
0
/**
 * \see dd_function_table::UnmapRenderbuffer
 */
static void
intel_unmap_renderbuffer(struct gl_context *ctx,
			 struct gl_renderbuffer *rb)
{
   struct brw_context *brw = brw_context(ctx);
   struct swrast_renderbuffer *srb = (struct swrast_renderbuffer *)rb;
   struct intel_renderbuffer *irb = intel_renderbuffer(rb);

   DBG("%s: rb %d (%s)\n", __FUNCTION__,
       rb->Name, _mesa_get_format_name(rb->Format));

   if (srb->Buffer) {
      /* this is a malloc'd renderbuffer (accum buffer) */
      /* nothing to do */
      return;
   }

   intel_miptree_unmap(brw, irb->mt, irb->mt_level, irb->mt_layer);
}
Exemplo n.º 29
0
static void
intel_get_tex_sub_image(struct gl_context *ctx,
                        GLint xoffset, GLint yoffset, GLint zoffset,
                        GLsizei width, GLsizei height, GLint depth,
                        GLenum format, GLenum type, GLvoid *pixels,
                        struct gl_texture_image *texImage)
{
   struct brw_context *brw = brw_context(ctx);
   bool ok;

   DBG("%s\n", __func__);

   if (_mesa_is_bufferobj(ctx->Pack.BufferObj)) {
      if (_mesa_meta_pbo_GetTexSubImage(ctx, 3, texImage,
                                        xoffset, yoffset, zoffset,
                                        width, height, depth, format, type,
                                        pixels, &ctx->Pack)) {
         /* Flush to guarantee coherency between the render cache and other
          * caches the PBO could potentially be bound to after this point.
          * See the related comment in intelReadPixels() for a more detailed
          * explanation.
          */
         brw_emit_mi_flush(brw);
         return;
      }

      perf_debug("%s: fallback to CPU mapping in PBO case\n", __func__);
   }

   ok = intel_gettexsubimage_tiled_memcpy(ctx, texImage, xoffset, yoffset,
                                          width, height,
                                          format, type, pixels, &ctx->Pack);

   if(ok)
      return;

   _mesa_meta_GetTexSubImage(ctx, xoffset, yoffset, zoffset,
                             width, height, depth,
                             format, type, pixels, texImage);

   DBG("%s - DONE\n", __func__);
}
Exemplo n.º 30
0
/**
 * Called by glFramebufferTexture[123]DEXT() (and other places) to
 * prepare for rendering into texture memory.  This might be called
 * many times to choose different texture levels, cube faces, etc
 * before intel_finish_render_texture() is ever called.
 */
static void
intel_render_texture(struct gl_context * ctx,
                     struct gl_framebuffer *fb,
                     struct gl_renderbuffer_attachment *att)
{
   struct brw_context *brw = brw_context(ctx);
   struct gl_renderbuffer *rb = att->Renderbuffer;
   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
   struct gl_texture_image *image = rb->TexImage;
   struct intel_texture_image *intel_image = intel_texture_image(image);
   struct intel_mipmap_tree *mt = intel_image->mt;
   int layer;

   (void) fb;

   if (att->CubeMapFace > 0) {
      assert(att->Zoffset == 0);
      layer = att->CubeMapFace;
   } else {
      layer = att->Zoffset;
   }

   if (!intel_image->mt) {
      /* Fallback on drawing to a texture that doesn't have a miptree
       * (has a border, width/height 0, etc.)
       */
      _swrast_render_texture(ctx, fb, att);
      return;
   }

   intel_miptree_check_level_layer(mt, att->TextureLevel, layer);

   if (!intel_renderbuffer_update_wrapper(brw, irb, image, layer)) {
       _swrast_render_texture(ctx, fb, att);
       return;
   }

   DBG("Begin render %s texture tex=%u w=%d h=%d d=%d refcount=%d\n",
       _mesa_get_format_name(image->TexFormat),
       att->Texture->Name, image->Width, image->Height, image->Depth,
       rb->RefCount);
}