Beispiel #1
0
void
intelReadPixels(struct gl_context * ctx,
                GLint x, GLint y, GLsizei width, GLsizei height,
                GLenum format, GLenum type,
                const struct gl_pixelstore_attrib *pack, GLvoid * pixels)
{
   bool ok;

   struct brw_context *brw = brw_context(ctx);
   bool dirty;

   DBG("%s\n", __func__);

   /* Reading pixels wont dirty the front buffer, so reset the dirty
    * flag after calling intel_prepare_render().
    */
   dirty = brw->front_buffer_dirty;
   intel_prepare_render(brw);
   brw->front_buffer_dirty = dirty;

   if (_mesa_is_bufferobj(pack->BufferObj)) {
      if (intel_readpixels_blorp(ctx, x, y, width, height,
                                 format, type, pixels, pack))
         return;

      perf_debug("%s: fallback to CPU mapping in PBO case\n", __func__);
   }

   ok = intel_readpixels_tiled_memcpy(ctx, x, y, width, height,
                                      format, type, pixels, pack);
   if(ok)
      return;

   /* Update Mesa state before calling _mesa_readpixels().
    * XXX this may not be needed since ReadPixels no longer uses the
    * span code.
    */

   if (ctx->NewState)
      _mesa_update_state(ctx);

   _mesa_readpixels(ctx, x, y, width, height, format, type, pack, pixels);

   /* There's an intel_prepare_render() call in intelSpanRenderStart(). */
   brw->front_buffer_dirty = dirty;
}
Beispiel #2
0
void
intelReadPixels(struct gl_context * ctx,
                GLint x, GLint y, GLsizei width, GLsizei height,
                GLenum format, GLenum type,
                const struct gl_pixelstore_attrib *pack, GLvoid * pixels)
{
   bool ok;

   struct brw_context *brw = brw_context(ctx);
   bool dirty;

   DBG("%s\n", __func__);

   if (_mesa_is_bufferobj(pack->BufferObj)) {
      if (_mesa_meta_pbo_GetTexSubImage(ctx, 2, NULL, x, y, 0, width, height, 1,
                                        format, type, pixels, pack)) {
         /* _mesa_meta_pbo_GetTexSubImage() implements PBO transfers by
          * binding the user-provided BO as a fake framebuffer and rendering
          * to it.  This breaks the invariant of the GL that nothing is able
          * to render to a BO, causing nondeterministic corruption issues
          * because the render cache is not coherent with a number of other
          * caches that the BO could potentially be bound to afterwards.
          *
          * This could be solved in the same way that we guarantee texture
          * coherency after a texture is attached to a framebuffer and
          * rendered to, but that would involve checking *all* BOs bound to
          * the pipeline for the case we need to emit a cache flush due to
          * previous rendering to any of them -- Including vertex, index,
          * uniform, atomic counter, shader image, transform feedback,
          * indirect draw buffers, etc.
          *
          * That would increase the per-draw call overhead even though it's
          * very unlikely that any of the BOs bound to the pipeline has been
          * rendered to via a PBO at any point, so it seems better to just
          * flush here unconditionally.
          */
         brw_emit_mi_flush(brw);
         return;
      }

      perf_debug("%s: fallback to CPU mapping in PBO case\n", __func__);
   }

   ok = intel_readpixels_tiled_memcpy(ctx, x, y, width, height,
                                      format, type, pixels, pack);
   if(ok)
      return;

   /* glReadPixels() wont dirty the front buffer, so reset the dirty
    * flag after calling intel_prepare_render(). */
   dirty = brw->front_buffer_dirty;
   intel_prepare_render(brw);
   brw->front_buffer_dirty = dirty;

   /* Update Mesa state before calling _mesa_readpixels().
    * XXX this may not be needed since ReadPixels no longer uses the
    * span code.
    */

   if (ctx->NewState)
      _mesa_update_state(ctx);

   _mesa_readpixels(ctx, x, y, width, height, format, type, pack, pixels);

   /* There's an intel_prepare_render() call in intelSpanRenderStart(). */
   brw->front_buffer_dirty = dirty;
}