Exemplo n.º 1
0
void
intelReadPixels(GLcontext * ctx,
                GLint x, GLint y, GLsizei width, GLsizei height,
                GLenum format, GLenum type,
                const struct gl_pixelstore_attrib *pack, GLvoid * pixels)
{
   if (INTEL_DEBUG & DEBUG_PIXEL)
      fprintf(stderr, "%s\n", __FUNCTION__);

   intelFlush(ctx);
   intel_prepare_render(intel_context(ctx));

   if (do_blit_readpixels
       (ctx, x, y, width, height, format, type, pack, pixels))
      return;

   if (INTEL_DEBUG & DEBUG_PIXEL)
      printf("%s: fallback to swrast\n", __FUNCTION__);

   /* Update Mesa state before calling down into _swrast_ReadPixels, as
    * the spans code requires the computed buffer states to be up to date,
    * but _swrast_ReadPixels only updates Mesa state after setting up
    * the spans code.
    */

   if (ctx->NewState)
      _mesa_update_state(ctx);

   _swrast_ReadPixels(ctx, x, y, width, height, format, type, pack, pixels);
}
Exemplo n.º 2
0
static bool
intel_copy_texsubimage(struct intel_context *intel,
                       struct intel_texture_image *intelImage,
                       GLint dstx, GLint dsty, GLint slice,
                       struct intel_renderbuffer *irb,
                       GLint x, GLint y, GLsizei width, GLsizei height)
{
   const GLenum internalFormat = intelImage->base.Base.InternalFormat;

   intel_prepare_render(intel);

   if (!intelImage->mt || !irb || !irb->mt) {
      if (unlikely(INTEL_DEBUG & DEBUG_PERF))
	 fprintf(stderr, "%s fail %p %p (0x%08x)\n",
		 __FUNCTION__, intelImage->mt, irb, internalFormat);
      return false;
   }

   /* blit from src buffer to texture */
   if (!intel_miptree_blit(intel,
                           irb->mt, irb->mt_level, irb->mt_layer,
                           x, y, irb->Base.Base.Name == 0,
                           intelImage->mt, intelImage->base.Base.Level,
                           intelImage->base.Base.Face + slice,
                           dstx, dsty, false,
                           width, height, GL_COPY)) {
      return false;
   }

   return true;
}
Exemplo n.º 3
0
/**
 * Called by ctx->Driver.Clear.
 */
static void
brw_clear(struct gl_context *ctx, GLbitfield mask)
{
   struct brw_context *brw = brw_context(ctx);
   struct gl_framebuffer *fb = ctx->DrawBuffer;
   bool partial_clear = ctx->Scissor.EnableFlags && !noop_scissor(ctx, fb);

   if (!_mesa_check_conditional_render(ctx))
      return;

   if (mask & (BUFFER_BIT_FRONT_LEFT | BUFFER_BIT_FRONT_RIGHT)) {
      brw->front_buffer_dirty = true;
   }

   intel_prepare_render(brw);
   brw_workaround_depthstencil_alignment(brw, partial_clear ? 0 : mask);

   if (mask & BUFFER_BIT_DEPTH) {
      if (brw_fast_clear_depth(ctx)) {
	 DBG("fast clear: depth\n");
	 mask &= ~BUFFER_BIT_DEPTH;
      }
   }

   /* BLORP is currently only supported on Gen6+. */
   if (brw->gen >= 6 && brw->gen < 8) {
      if (mask & BUFFER_BITS_COLOR) {
         if (brw_blorp_clear_color(brw, fb, mask, partial_clear)) {
            debug_mask("blorp color", mask & BUFFER_BITS_COLOR);
            mask &= ~BUFFER_BITS_COLOR;
         }
      }
   }

   GLbitfield tri_mask = mask & (BUFFER_BITS_COLOR |
				 BUFFER_BIT_STENCIL |
				 BUFFER_BIT_DEPTH);

   if (tri_mask) {
      debug_mask("tri", tri_mask);
      mask &= ~tri_mask;

      if (ctx->API == API_OPENGLES) {
         _mesa_meta_Clear(&brw->ctx, tri_mask);
      } else {
         _mesa_meta_glsl_Clear(&brw->ctx, tri_mask);
      }
   }

   /* Any strange buffers get passed off to swrast */
   if (mask) {
      debug_mask("swrast", mask);
      _swrast_Clear(ctx, mask);
   }
}
Exemplo n.º 4
0
GLboolean
intelMakeCurrent(__DRIcontext * driContextPriv,
                 __DRIdrawable * driDrawPriv,
                 __DRIdrawable * driReadPriv)
{
   struct brw_context *brw;
   GET_CURRENT_CONTEXT(curCtx);

   if (driContextPriv)
      brw = (struct brw_context *) driContextPriv->driverPrivate;
   else
      brw = NULL;

   /* According to the glXMakeCurrent() man page: "Pending commands to
    * the previous context, if any, are flushed before it is released."
    * But only flush if we're actually changing contexts.
    */
   if (brw_context(curCtx) && brw_context(curCtx) != brw) {
      _mesa_flush(curCtx);
   }

   if (driContextPriv) {
      struct gl_context *ctx = &brw->ctx;
      struct gl_framebuffer *fb, *readFb;

      if (driDrawPriv == NULL && driReadPriv == NULL) {
         fb = _mesa_get_incomplete_framebuffer();
         readFb = _mesa_get_incomplete_framebuffer();
      } else {
         fb = driDrawPriv->driverPrivate;
         readFb = driReadPriv->driverPrivate;
         driContextPriv->dri2.draw_stamp = driDrawPriv->dri2.stamp - 1;
         driContextPriv->dri2.read_stamp = driReadPriv->dri2.stamp - 1;
      }

      /* The sRGB workaround changes the renderbuffer's format. We must change
       * the format before the renderbuffer's miptree get's allocated, otherwise
       * the formats of the renderbuffer and its miptree will differ.
       */
      intel_gles3_srgb_workaround(brw, fb);
      intel_gles3_srgb_workaround(brw, readFb);

      /* If the context viewport hasn't been initialized, force a call out to
       * the loader to get buffers so we have a drawable size for the initial
       * viewport. */
      if (!brw->ctx.ViewportInitialized)
         intel_prepare_render(brw);

      _mesa_make_current(ctx, fb, readFb);
   } else {
      _mesa_make_current(NULL, NULL, NULL);
   }

   return true;
}
Exemplo n.º 5
0
static bool
intel_copy_texsubimage(struct brw_context *brw,
                       struct intel_texture_image *intelImage,
                       GLint dstx, GLint dsty, GLint slice,
                       struct intel_renderbuffer *irb,
                       GLint x, GLint y, GLsizei width, GLsizei height)
{
   const GLenum internalFormat = intelImage->base.Base.InternalFormat;
   bool ret;

   /* No pixel transfer operations (zoom, bias, mapping), just a blit */
   if (brw->ctx._ImageTransferState)
      return false;

   intel_prepare_render(brw);

   /* glCopyTexSubImage() can be called on a multisampled renderbuffer (if
    * that renderbuffer is associated with the window system framebuffer),
    * however the hardware blitter can't handle this case, so fall back to
    * meta (which can, since it uses ReadPixels).
    */
   if (irb->Base.Base.NumSamples != 0)
      return false;

   /* glCopyTexSubImage() can't be called on a multisampled texture. */
   assert(intelImage->base.Base.NumSamples == 0);

   if (!intelImage->mt || !irb || !irb->mt) {
      if (unlikely(INTEL_DEBUG & DEBUG_PERF))
	 fprintf(stderr, "%s fail %p %p (0x%08x)\n",
		 __func__, intelImage->mt, irb, internalFormat);
      return false;
   }

   /* account for view parameters and face index */
   int dst_level = intelImage->base.Base.Level +
                   intelImage->base.Base.TexObject->MinLevel;
   int dst_slice = slice + intelImage->base.Base.Face +
                   intelImage->base.Base.TexObject->MinLayer;

   _mesa_unlock_texture(&brw->ctx, intelImage->base.Base.TexObject);

   /* blit from src buffer to texture */
   ret = intel_miptree_blit(brw,
                            irb->mt, irb->mt_level, irb->mt_layer,
                            x, y, irb->Base.Base.Name == 0,
                            intelImage->mt, dst_level, dst_slice,
                            dstx, dsty, false,
                            width, height, GL_COPY);

   _mesa_lock_texture(&brw->ctx, intelImage->base.Base.TexObject);

   return ret;
}
Exemplo n.º 6
0
GLboolean
intelMakeCurrent(__DRIcontext * driContextPriv,
                 __DRIdrawable * driDrawPriv,
                 __DRIdrawable * driReadPriv)
{
   struct intel_context *intel;
   GET_CURRENT_CONTEXT(curCtx);

   if (driContextPriv)
      intel = (struct intel_context *) driContextPriv->driverPrivate;
   else
      intel = NULL;

   /* According to the glXMakeCurrent() man page: "Pending commands to
    * the previous context, if any, are flushed before it is released."
    * But only flush if we're actually changing contexts.
    */
   if (intel_context(curCtx) && intel_context(curCtx) != intel) {
      _mesa_flush(curCtx);
   }

   if (driContextPriv) {
      struct gl_context *ctx = &intel->ctx;
      struct gl_framebuffer *fb, *readFb;
      
      if (driDrawPriv == NULL && driReadPriv == NULL) {
	 fb = _mesa_get_incomplete_framebuffer();
	 readFb = _mesa_get_incomplete_framebuffer();
      } else {
	 fb = driDrawPriv->driverPrivate;
	 readFb = driReadPriv->driverPrivate;
	 driContextPriv->dri2.draw_stamp = driDrawPriv->dri2.stamp - 1;
	 driContextPriv->dri2.read_stamp = driReadPriv->dri2.stamp - 1;
      }

      intel_prepare_render(intel);
      _mesa_make_current(ctx, fb, readFb);

      intel_gles3_srgb_workaround(intel, ctx->WinSysDrawBuffer);
      intel_gles3_srgb_workaround(intel, ctx->WinSysReadBuffer);

      /* We do this in intel_prepare_render() too, but intel->ctx.DrawBuffer
       * is NULL at that point.  We can't call _mesa_makecurrent()
       * first, since we need the buffer size for the initial
       * viewport.  So just call intel_draw_buffer() again here. */
      intel_draw_buffer(ctx);
   }
   else {
      _mesa_make_current(NULL, NULL, NULL);
   }

   return true;
}
Exemplo n.º 7
0
/**
 * Copy mipmap image between trees
 */
void
intel_miptree_image_copy(struct intel_context *intel,
                         struct intel_mipmap_tree *dst,
                         GLuint face, GLuint level,
                         struct intel_mipmap_tree *src)
{
    GLuint width = src->level[level].width;
    GLuint height = src->level[level].height;
    GLuint depth = src->level[level].depth;
    GLuint src_x, src_y, dst_x, dst_y;
    GLuint i;
    GLboolean success;

    if (dst->compressed) {
        GLuint align_w, align_h;

        intel_get_texture_alignment_unit(dst->internal_format,
                                         &align_w, &align_h);
        height = (height + 3) / 4;
        width = ALIGN(width, align_w);
    }

    intel_prepare_render(intel);

    for (i = 0; i < depth; i++) {
        intel_miptree_get_image_offset(src, level, face, i, &src_x, &src_y);
        intel_miptree_get_image_offset(dst, level, face, i, &dst_x, &dst_y);
        success = intel_region_copy(intel,
                                    dst->region, 0, dst_x, dst_y,
                                    src->region, 0, src_x, src_y,
                                    width, height, GL_FALSE,
                                    GL_COPY);
        if (!success) {
            GLubyte *src_ptr, *dst_ptr;

            src_ptr = intel_region_map(intel, src->region);
            dst_ptr = intel_region_map(intel, dst->region);

            _mesa_copy_rect(dst_ptr,
                            dst->cpp,
                            dst->region->pitch,
                            dst_x, dst_y, width, height,
                            src_ptr,
                            src->region->pitch,
                            src_x, src_y);
            intel_region_unmap(intel, src->region);
            intel_region_unmap(intel, dst->region);
        }
    }
}
Exemplo n.º 8
0
/**
 * \see dd_function_table::MapRenderbuffer
 */
static void
intel_map_renderbuffer(struct gl_context *ctx,
		       struct gl_renderbuffer *rb,
		       GLuint x, GLuint y, GLuint w, GLuint h,
		       GLbitfield mode,
		       GLubyte **out_map,
		       GLint *out_stride)
{
   struct brw_context *brw = brw_context(ctx);
   struct swrast_renderbuffer *srb = (struct swrast_renderbuffer *)rb;
   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
   void *map;
   int stride;

   if (srb->Buffer) {
      /* this is a malloc'd renderbuffer (accum buffer), not an irb */
      GLint bpp = _mesa_get_format_bytes(rb->Format);
      GLint rowStride = srb->RowStride;
      *out_map = (GLubyte *) srb->Buffer + y * rowStride + x * bpp;
      *out_stride = rowStride;
      return;
   }

   intel_prepare_render(brw);

   /* For a window-system renderbuffer, we need to flip the mapping we receive
    * upside-down.  So we need to ask for a rectangle on flipped vertically, and
    * we then return a pointer to the bottom of it with a negative stride.
    */
   if (rb->Name == 0) {
      y = rb->Height - y - h;
   }

   intel_miptree_map(brw, irb->mt, irb->mt_level, irb->mt_layer,
		     x, y, w, h, mode, &map, &stride);

   if (rb->Name == 0) {
      map += (h - 1) * stride;
      stride = -stride;
   }

   DBG("%s: rb %d (%s) mt mapped: (%d, %d) (%dx%d) -> %p/%d\n",
       __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format),
       x, y, w, h, map, stride);

   *out_map = map;
   *out_stride = stride;
}
Exemplo n.º 9
0
GLboolean
intelMakeCurrent(__DRIcontext * driContextPriv,
                 __DRIdrawable * driDrawPriv,
                 __DRIdrawable * driReadPriv)
{
   struct brw_context *brw;
   GET_CURRENT_CONTEXT(curCtx);

   if (driContextPriv)
      brw = (struct brw_context *) driContextPriv->driverPrivate;
   else
      brw = NULL;

   /* According to the glXMakeCurrent() man page: "Pending commands to
    * the previous context, if any, are flushed before it is released."
    * But only flush if we're actually changing contexts.
    */
   if (brw_context(curCtx) && brw_context(curCtx) != brw) {
      _mesa_flush(curCtx);
   }

   if (driContextPriv) {
      struct gl_context *ctx = &brw->ctx;
      struct gl_framebuffer *fb, *readFb;
      
      if (driDrawPriv == NULL && driReadPriv == NULL) {
	 fb = _mesa_get_incomplete_framebuffer();
	 readFb = _mesa_get_incomplete_framebuffer();
      } else {
	 fb = driDrawPriv->driverPrivate;
	 readFb = driReadPriv->driverPrivate;
	 driContextPriv->dri2.draw_stamp = driDrawPriv->dri2.stamp - 1;
	 driContextPriv->dri2.read_stamp = driReadPriv->dri2.stamp - 1;
      }

      intel_prepare_render(brw);
      _mesa_make_current(ctx, fb, readFb);

      intel_gles3_srgb_workaround(brw, ctx->WinSysDrawBuffer);
      intel_gles3_srgb_workaround(brw, ctx->WinSysReadBuffer);
   }
   else {
      _mesa_make_current(NULL, NULL, NULL);
   }

   return true;
}
Exemplo n.º 10
0
void
intelReadPixels(struct gl_context * ctx,
                GLint x, GLint y, GLsizei width, GLsizei height,
                GLenum format, GLenum type,
                const struct gl_pixelstore_attrib *pack, GLvoid * pixels)
{
   bool ok;

   struct brw_context *brw = brw_context(ctx);
   bool dirty;

   DBG("%s\n", __func__);

   /* Reading pixels wont dirty the front buffer, so reset the dirty
    * flag after calling intel_prepare_render().
    */
   dirty = brw->front_buffer_dirty;
   intel_prepare_render(brw);
   brw->front_buffer_dirty = dirty;

   if (_mesa_is_bufferobj(pack->BufferObj)) {
      if (intel_readpixels_blorp(ctx, x, y, width, height,
                                 format, type, pixels, pack))
         return;

      perf_debug("%s: fallback to CPU mapping in PBO case\n", __func__);
   }

   ok = intel_readpixels_tiled_memcpy(ctx, x, y, width, height,
                                      format, type, pixels, pack);
   if(ok)
      return;

   /* Update Mesa state before calling _mesa_readpixels().
    * XXX this may not be needed since ReadPixels no longer uses the
    * span code.
    */

   if (ctx->NewState)
      _mesa_update_state(ctx);

   _mesa_readpixels(ctx, x, y, width, height, format, type, pack, pixels);

   /* There's an intel_prepare_render() call in intelSpanRenderStart(). */
   brw->front_buffer_dirty = dirty;
}
/**
 * Called by ctx->Driver.Clear.
 */
static void
brw_clear(struct gl_context *ctx, GLbitfield mask)
{
   struct intel_context *intel = intel_context(ctx);

   if (!_mesa_check_conditional_render(ctx))
      return;

   if (mask & (BUFFER_BIT_FRONT_LEFT | BUFFER_BIT_FRONT_RIGHT)) {
      intel->front_buffer_dirty = true;
   }

   intel_prepare_render(intel);

   if (mask & BUFFER_BIT_DEPTH) {
      if (brw_fast_clear_depth(ctx)) {
	 DBG("fast clear: depth\n");
	 mask &= ~BUFFER_BIT_DEPTH;
      }
   }

   GLbitfield tri_mask = mask & (BUFFER_BITS_COLOR |
				 BUFFER_BIT_STENCIL |
				 BUFFER_BIT_DEPTH);

   if (tri_mask) {
      debug_mask("tri", tri_mask);
      mask &= ~tri_mask;

      if (ctx->API == API_OPENGLES) {
         _mesa_meta_Clear(&intel->ctx, tri_mask);
      } else {
         _mesa_meta_glsl_Clear(&intel->ctx, tri_mask);
      }
   }

   /* Any strange buffers get passed off to swrast */
   if (mask) {
      debug_mask("swrast", mask);
      _swrast_Clear(ctx, mask);
   }
}
Exemplo n.º 12
0
void
intelReadPixels(struct gl_context * ctx,
                GLint x, GLint y, GLsizei width, GLsizei height,
                GLenum format, GLenum type,
                const struct gl_pixelstore_attrib *pack, GLvoid * pixels)
{
   struct brw_context *brw = brw_context(ctx);
   bool dirty;

   DBG("%s\n", __FUNCTION__);

   if (_mesa_is_bufferobj(pack->BufferObj)) {
      /* Using PBOs, so try the BLT based path. */
      if (do_blit_readpixels(ctx, x, y, width, height, format, type, pack,
                             pixels)) {
         return;
      }

      perf_debug("%s: fallback to CPU mapping in PBO case\n", __FUNCTION__);
   }

   /* glReadPixels() wont dirty the front buffer, so reset the dirty
    * flag after calling intel_prepare_render(). */
   dirty = brw->front_buffer_dirty;
   intel_prepare_render(brw);
   brw->front_buffer_dirty = dirty;

   /* Update Mesa state before calling _mesa_readpixels().
    * XXX this may not be needed since ReadPixels no longer uses the
    * span code.
    */

   if (ctx->NewState)
      _mesa_update_state(ctx);

   _mesa_readpixels(ctx, x, y, width, height, format, type, pack, pixels);

   /* There's an intel_prepare_render() call in intelSpanRenderStart(). */
   brw->front_buffer_dirty = dirty;
}
Exemplo n.º 13
0
/**
 * Prepare for software rendering.  Map current read/draw framebuffers'
 * renderbuffes and all currently bound texture objects.
 *
 * Old note: Moved locking out to get reasonable span performance.
 */
void
intelSpanRenderStart(struct gl_context * ctx)
{
   struct intel_context *intel = intel_context(ctx);
   GLuint i;

   intel_flush(&intel->ctx);
   intel_prepare_render(intel);

   for (i = 0; i < ctx->Const.MaxTextureImageUnits; i++) {
      if (ctx->Texture.Unit[i]._ReallyEnabled) {
         struct gl_texture_object *texObj = ctx->Texture.Unit[i]._Current;

         intel_finalize_mipmap_tree(intel, i);
         intel_tex_map_images(intel, intel_texture_object(texObj));
      }
   }

   intel_map_unmap_framebuffer(intel, ctx->DrawBuffer, GL_TRUE);
   if (ctx->ReadBuffer != ctx->DrawBuffer)
      intel_map_unmap_framebuffer(intel, ctx->ReadBuffer, GL_TRUE);
}
Exemplo n.º 14
0
GLboolean
intelMakeCurrent(__DRIcontext * driContextPriv,
                 __DRIdrawable * driDrawPriv,
                 __DRIdrawable * driReadPriv)
{
   struct intel_context *intel;
   GET_CURRENT_CONTEXT(curCtx);

   if (driContextPriv)
      intel = (struct intel_context *) driContextPriv->driverPrivate;
   else
      intel = NULL;

   /* According to the glXMakeCurrent() man page: "Pending commands to
    * the previous context, if any, are flushed before it is released."
    * But only flush if we're actually changing contexts.
    */
   if (intel_context(curCtx) && intel_context(curCtx) != intel) {
      _mesa_flush(curCtx);
   }

   if (driContextPriv) {
      struct gl_framebuffer *fb = driDrawPriv->driverPrivate;
      struct gl_framebuffer *readFb = driReadPriv->driverPrivate;

      intel->driReadDrawable = driReadPriv;
      intel->driDrawable = driDrawPriv;
      driContextPriv->dri2.draw_stamp = driDrawPriv->dri2.stamp - 1;
      driContextPriv->dri2.read_stamp = driReadPriv->dri2.stamp - 1;
      intel_prepare_render(intel);
      _mesa_make_current(&intel->ctx, fb, readFb);
   }
   else {
      _mesa_make_current(NULL, NULL, NULL);
   }

   return GL_TRUE;
}
Exemplo n.º 15
0
bool
intel_copy_texsubimage(struct intel_context *intel,
                       struct intel_texture_image *intelImage,
                       GLint dstx, GLint dsty,
                       struct intel_renderbuffer *irb,
                       GLint x, GLint y, GLsizei width, GLsizei height)
{
    struct gl_context *ctx = &intel->ctx;
    struct intel_region *region;
    const GLenum internalFormat = intelImage->base.Base.InternalFormat;
    bool copy_supported = false;
    bool copy_supported_with_alpha_override = false;

    intel_prepare_render(intel);

    if (!intelImage->mt || !irb || !irb->mt) {
        if (unlikely(INTEL_DEBUG & DEBUG_PERF))
            fprintf(stderr, "%s fail %p %p (0x%08x)\n",
                    __FUNCTION__, intelImage->mt, irb, internalFormat);
        return false;
    } else {
        region = irb->mt->region;
        assert(region);
    }

    /* According to the Ivy Bridge PRM, Vol1 Part4, section 1.2.1.2 (Graphics
     * Data Size Limitations):
     *
     *    The BLT engine is capable of transferring very large quantities of
     *    graphics data. Any graphics data read from and written to the
     *    destination is permitted to represent a number of pixels that
     *    occupies up to 65,536 scan lines and up to 32,768 bytes per scan line
     *    at the destination. The maximum number of pixels that may be
     *    represented per scan line’s worth of graphics data depends on the
     *    color depth.
     *
     * Furthermore, intelEmitCopyBlit (which is called below) uses a signed
     * 16-bit integer to represent buffer pitch, so it can only handle buffer
     * pitches < 32k.
     *
     * As a result of these two limitations, we can only use the blitter to do
     * this copy when the region's pitch is less than 32k.
     */
    if (region->pitch >= 32768)
        return false;

    if (intelImage->base.Base.TexObject->Target == GL_TEXTURE_1D_ARRAY ||
            intelImage->base.Base.TexObject->Target == GL_TEXTURE_2D_ARRAY) {
        perf_debug("no support for array textures\n");
    }

    copy_supported = intelImage->base.Base.TexFormat == intel_rb_format(irb);

    /* Converting ARGB8888 to XRGB8888 is trivial: ignore the alpha bits */
    if (intel_rb_format(irb) == MESA_FORMAT_ARGB8888 &&
            intelImage->base.Base.TexFormat == MESA_FORMAT_XRGB8888) {
        copy_supported = true;
    }

    /* Converting XRGB8888 to ARGB8888 requires setting the alpha bits to 1.0 */
    if (intel_rb_format(irb) == MESA_FORMAT_XRGB8888 &&
            intelImage->base.Base.TexFormat == MESA_FORMAT_ARGB8888) {
        copy_supported_with_alpha_override = true;
    }

    if (!copy_supported && !copy_supported_with_alpha_override) {
        if (unlikely(INTEL_DEBUG & DEBUG_PERF))
            fprintf(stderr, "%s mismatched formats %s, %s\n",
                    __FUNCTION__,
                    _mesa_get_format_name(intelImage->base.Base.TexFormat),
                    _mesa_get_format_name(intel_rb_format(irb)));
        return false;
    }

    {
        GLuint image_x, image_y;
        GLshort src_pitch;

        /* get dest x/y in destination texture */
        intel_miptree_get_image_offset(intelImage->mt,
                                       intelImage->base.Base.Level,
                                       intelImage->base.Base.Face,
                                       &image_x, &image_y);

        /* The blitter can't handle Y-tiled buffers. */
        if (intelImage->mt->region->tiling == I915_TILING_Y) {
            return false;
        }

        if (_mesa_is_winsys_fbo(ctx->ReadBuffer)) {
            /* Flip vertical orientation for system framebuffers */
            y = ctx->ReadBuffer->Height - (y + height);
            src_pitch = -region->pitch;
        } else {
            /* reading from a FBO, y is already oriented the way we like */
            src_pitch = region->pitch;
        }

        /* blit from src buffer to texture */
        if (!intelEmitCopyBlit(intel,
                               intelImage->mt->cpp,
                               src_pitch,
                               region->bo,
                               0,
                               region->tiling,
                               intelImage->mt->region->pitch,
                               intelImage->mt->region->bo,
                               0,
                               intelImage->mt->region->tiling,
                               irb->draw_x + x, irb->draw_y + y,
                               image_x + dstx, image_y + dsty,
                               width, height,
                               GL_COPY)) {
            return false;
        }
    }

    if (copy_supported_with_alpha_override)
        intel_set_teximage_alpha_to_one(ctx, intelImage);

    return true;
}
Exemplo n.º 16
0
/* May fail if out of video memory for texture or vbo upload, or on
 * fallback conditions.
 */
static void
brw_try_draw_prims(struct gl_context *ctx,
                   const struct gl_client_array *arrays[],
                   const struct _mesa_prim *prims,
                   GLuint nr_prims,
                   const struct _mesa_index_buffer *ib,
                   GLuint min_index,
                   GLuint max_index,
                   struct gl_buffer_object *indirect)
{
   struct brw_context *brw = brw_context(ctx);
   GLuint i;
   bool fail_next = false;

   if (ctx->NewState)
      _mesa_update_state(ctx);

   /* Find the highest sampler unit used by each shader program.  A bit-count
    * won't work since ARB programs use the texture unit number as the sampler
    * index.
    */
   brw->wm.base.sampler_count =
      _mesa_fls(ctx->FragmentProgram._Current->Base.SamplersUsed);
   brw->gs.base.sampler_count = ctx->GeometryProgram._Current ?
      _mesa_fls(ctx->GeometryProgram._Current->Base.SamplersUsed) : 0;
   brw->vs.base.sampler_count =
      _mesa_fls(ctx->VertexProgram._Current->Base.SamplersUsed);

   /* We have to validate the textures *before* checking for fallbacks;
    * otherwise, the software fallback won't be able to rely on the
    * texture state, the firstLevel and lastLevel fields won't be
    * set in the intel texture object (they'll both be 0), and the
    * software fallback will segfault if it attempts to access any
    * texture level other than level 0.
    */
   brw_validate_textures(brw);

   intel_prepare_render(brw);

   /* This workaround has to happen outside of brw_upload_render_state()
    * because it may flush the batchbuffer for a blit, affecting the state
    * flags.
    */
   brw_workaround_depthstencil_alignment(brw, 0);

   /* Bind all inputs, derive varying and size information:
    */
   brw_merge_inputs(brw, arrays);

   brw->ib.ib = ib;
   brw->ctx.NewDriverState |= BRW_NEW_INDICES;

   brw->vb.min_index = min_index;
   brw->vb.max_index = max_index;
   brw->ctx.NewDriverState |= BRW_NEW_VERTICES;

   for (i = 0; i < nr_prims; i++) {
      int estimated_max_prim_size;
      const int sampler_state_size = 16;

      estimated_max_prim_size = 512; /* batchbuffer commands */
      estimated_max_prim_size += BRW_MAX_TEX_UNIT *
         (sampler_state_size + sizeof(struct gen5_sampler_default_color));
      estimated_max_prim_size += 1024; /* gen6 VS push constants */
      estimated_max_prim_size += 1024; /* gen6 WM push constants */
      estimated_max_prim_size += 512; /* misc. pad */

      /* Flush the batch if it's approaching full, so that we don't wrap while
       * we've got validated state that needs to be in the same batch as the
       * primitives.
       */
      intel_batchbuffer_require_space(brw, estimated_max_prim_size, RENDER_RING);
      intel_batchbuffer_save_state(brw);

      if (brw->num_instances != prims[i].num_instances ||
          brw->basevertex != prims[i].basevertex) {
         brw->num_instances = prims[i].num_instances;
         brw->basevertex = prims[i].basevertex;
         if (i > 0) { /* For i == 0 we just did this before the loop */
            brw->ctx.NewDriverState |= BRW_NEW_VERTICES;
            brw_merge_inputs(brw, arrays);
         }
      }

      brw->draw.gl_basevertex =
         prims[i].indexed ? prims[i].basevertex : prims[i].start;

      drm_intel_bo_unreference(brw->draw.draw_params_bo);

      if (prims[i].is_indirect) {
         /* Point draw_params_bo at the indirect buffer. */
         brw->draw.draw_params_bo =
            intel_buffer_object(ctx->DrawIndirectBuffer)->buffer;
         drm_intel_bo_reference(brw->draw.draw_params_bo);
         brw->draw.draw_params_offset =
            prims[i].indirect_offset + (prims[i].indexed ? 12 : 8);
      } else {
         /* Set draw_params_bo to NULL so brw_prepare_vertices knows it
          * has to upload gl_BaseVertex and such if they're needed.
          */
         brw->draw.draw_params_bo = NULL;
         brw->draw.draw_params_offset = 0;
      }

      if (brw->gen < 6)
	 brw_set_prim(brw, &prims[i]);
      else
	 gen6_set_prim(brw, &prims[i]);

retry:

      /* Note that before the loop, brw->ctx.NewDriverState was set to != 0, and
       * that the state updated in the loop outside of this block is that in
       * *_set_prim or intel_batchbuffer_flush(), which only impacts
       * brw->ctx.NewDriverState.
       */
      if (brw->ctx.NewDriverState) {
	 brw->no_batch_wrap = true;
	 brw_upload_render_state(brw);
      }

      brw_emit_prim(brw, &prims[i], brw->primitive);

      brw->no_batch_wrap = false;

      if (dri_bufmgr_check_aperture_space(&brw->batch.bo, 1)) {
	 if (!fail_next) {
	    intel_batchbuffer_reset_to_saved(brw);
	    intel_batchbuffer_flush(brw);
	    fail_next = true;
	    goto retry;
	 } else {
            int ret = intel_batchbuffer_flush(brw);
            WARN_ONCE(ret == -ENOSPC,
                      "i965: Single primitive emit exceeded "
                      "available aperture space\n");
	 }
      }

      /* Now that we know we haven't run out of aperture space, we can safely
       * reset the dirty bits.
       */
      if (brw->ctx.NewDriverState)
         brw_render_state_finished(brw);
   }

   if (brw->always_flush_batch)
      intel_batchbuffer_flush(brw);

   brw_state_cache_check_size(brw);
   brw_postdraw_set_buffers_need_resolve(brw);

   return;
}
Exemplo n.º 17
0
/* May fail if out of video memory for texture or vbo upload, or on
 * fallback conditions.
 */
static bool brw_try_draw_prims( struct gl_context *ctx,
				     const struct gl_client_array *arrays[],
				     const struct _mesa_prim *prim,
				     GLuint nr_prims,
				     const struct _mesa_index_buffer *ib,
				     GLuint min_index,
				     GLuint max_index )
{
   struct intel_context *intel = intel_context(ctx);
   struct brw_context *brw = brw_context(ctx);
   bool retval = true;
   GLuint i;
   bool fail_next = false;

   if (ctx->NewState)
      _mesa_update_state( ctx );

   /* We have to validate the textures *before* checking for fallbacks;
    * otherwise, the software fallback won't be able to rely on the
    * texture state, the firstLevel and lastLevel fields won't be
    * set in the intel texture object (they'll both be 0), and the 
    * software fallback will segfault if it attempts to access any
    * texture level other than level 0.
    */
   brw_validate_textures( brw );

   /* Resolves must occur after updating state and finalizing textures but
    * before setting up any hardware state for this draw call.
    */
   brw_predraw_resolve_buffers(brw);

   /* Bind all inputs, derive varying and size information:
    */
   brw_merge_inputs( brw, arrays );

   brw->ib.ib = ib;
   brw->state.dirty.brw |= BRW_NEW_INDICES;

   brw->vb.min_index = min_index;
   brw->vb.max_index = max_index;
   brw->state.dirty.brw |= BRW_NEW_VERTICES;

   /* Have to validate state quite late.  Will rebuild tnl_program,
    * which depends on varying information.  
    * 
    * Note this is where brw->vs->prog_data.inputs_read is calculated,
    * so can't access it earlier.
    */

   intel_prepare_render(intel);

   for (i = 0; i < nr_prims; i++) {
      int estimated_max_prim_size;

      estimated_max_prim_size = 512; /* batchbuffer commands */
      estimated_max_prim_size += (BRW_MAX_TEX_UNIT *
				  (sizeof(struct brw_sampler_state) +
				   sizeof(struct gen5_sampler_default_color)));
      estimated_max_prim_size += 1024; /* gen6 VS push constants */
      estimated_max_prim_size += 1024; /* gen6 WM push constants */
      estimated_max_prim_size += 512; /* misc. pad */

      /* Flush the batch if it's approaching full, so that we don't wrap while
       * we've got validated state that needs to be in the same batch as the
       * primitives.
       */
      intel_batchbuffer_require_space(intel, estimated_max_prim_size, false);
      intel_batchbuffer_save_state(intel);

      if (intel->gen < 6)
	 brw_set_prim(brw, &prim[i]);
      else
	 gen6_set_prim(brw, &prim[i]);

retry:
      /* Note that before the loop, brw->state.dirty.brw was set to != 0, and
       * that the state updated in the loop outside of this block is that in
       * *_set_prim or intel_batchbuffer_flush(), which only impacts
       * brw->state.dirty.brw.
       */
      if (brw->state.dirty.brw) {
	 intel->no_batch_wrap = true;
	 brw_upload_state(brw);

	 if (unlikely(brw->intel.Fallback)) {
	    intel->no_batch_wrap = false;
	    retval = false;
	    goto out;
	 }
      }

      if (intel->gen >= 7)
	 gen7_emit_prim(brw, &prim[i], brw->primitive);
      else
	 brw_emit_prim(brw, &prim[i], brw->primitive);

      intel->no_batch_wrap = false;

      if (dri_bufmgr_check_aperture_space(&intel->batch.bo, 1)) {
	 if (!fail_next) {
	    intel_batchbuffer_reset_to_saved(intel);
	    intel_batchbuffer_flush(intel);
	    fail_next = true;
	    goto retry;
	 } else {
	    if (intel_batchbuffer_flush(intel) == -ENOSPC) {
	       static bool warned = false;

	       if (!warned) {
		  fprintf(stderr, "i965: Single primitive emit exceeded"
			  "available aperture space\n");
		  warned = true;
	       }

	       retval = false;
	    }
	 }
      }

      if (!_mesa_meta_in_progress(ctx))
         brw_update_primitive_count(brw, &prim[i]);
   }

   if (intel->always_flush_batch)
      intel_batchbuffer_flush(intel);
 out:

   brw_state_cache_check_size(brw);
   brw_postdraw_set_buffers_need_resolve(brw);

   return retval;
}
static void
i915_render_start(struct intel_context *intel)
{
   intel_prepare_render(intel);
}
Exemplo n.º 19
0
void
intelReadPixels(struct gl_context * ctx,
                GLint x, GLint y, GLsizei width, GLsizei height,
                GLenum format, GLenum type,
                const struct gl_pixelstore_attrib *pack, GLvoid * pixels)
{
   bool ok;

   struct brw_context *brw = brw_context(ctx);
   bool dirty;

   DBG("%s\n", __func__);

   if (_mesa_is_bufferobj(pack->BufferObj)) {
      if (_mesa_meta_pbo_GetTexSubImage(ctx, 2, NULL, x, y, 0, width, height, 1,
                                        format, type, pixels, pack)) {
         /* _mesa_meta_pbo_GetTexSubImage() implements PBO transfers by
          * binding the user-provided BO as a fake framebuffer and rendering
          * to it.  This breaks the invariant of the GL that nothing is able
          * to render to a BO, causing nondeterministic corruption issues
          * because the render cache is not coherent with a number of other
          * caches that the BO could potentially be bound to afterwards.
          *
          * This could be solved in the same way that we guarantee texture
          * coherency after a texture is attached to a framebuffer and
          * rendered to, but that would involve checking *all* BOs bound to
          * the pipeline for the case we need to emit a cache flush due to
          * previous rendering to any of them -- Including vertex, index,
          * uniform, atomic counter, shader image, transform feedback,
          * indirect draw buffers, etc.
          *
          * That would increase the per-draw call overhead even though it's
          * very unlikely that any of the BOs bound to the pipeline has been
          * rendered to via a PBO at any point, so it seems better to just
          * flush here unconditionally.
          */
         brw_emit_mi_flush(brw);
         return;
      }

      perf_debug("%s: fallback to CPU mapping in PBO case\n", __func__);
   }

   ok = intel_readpixels_tiled_memcpy(ctx, x, y, width, height,
                                      format, type, pixels, pack);
   if(ok)
      return;

   /* glReadPixels() wont dirty the front buffer, so reset the dirty
    * flag after calling intel_prepare_render(). */
   dirty = brw->front_buffer_dirty;
   intel_prepare_render(brw);
   brw->front_buffer_dirty = dirty;

   /* Update Mesa state before calling _mesa_readpixels().
    * XXX this may not be needed since ReadPixels no longer uses the
    * span code.
    */

   if (ctx->NewState)
      _mesa_update_state(ctx);

   _mesa_readpixels(ctx, x, y, width, height, format, type, pack, pixels);

   /* There's an intel_prepare_render() call in intelSpanRenderStart(). */
   brw->front_buffer_dirty = dirty;
}
Exemplo n.º 20
0
/**
 * Called by ctx->Driver.Clear.
 */
static void
intelClear(struct gl_context *ctx, GLbitfield mask)
{
   struct intel_context *intel = intel_context(ctx);
   const GLuint colorMask = *((GLuint *) & ctx->Color.ColorMask[0]);
   GLbitfield tri_mask = 0;
   GLbitfield blit_mask = 0;
   GLbitfield swrast_mask = 0;
   struct gl_framebuffer *fb = ctx->DrawBuffer;
   struct intel_renderbuffer *irb;
   int i;

   if (!_mesa_check_conditional_render(ctx))
      return;

   if (mask & (BUFFER_BIT_FRONT_LEFT | BUFFER_BIT_FRONT_RIGHT)) {
      intel->front_buffer_dirty = GL_TRUE;
   }

   if (0)
      fprintf(stderr, "%s\n", __FUNCTION__);

   /* Get SW clears out of the way: Anything without an intel_renderbuffer */
   for (i = 0; i < BUFFER_COUNT; i++) {
      if (!(mask & (1 << i)))
	 continue;

      irb = intel_get_renderbuffer(fb, i);
      if (unlikely(!irb)) {
	 swrast_mask |= (1 << i);
	 mask &= ~(1 << i);
      }
   }
   if (unlikely(swrast_mask)) {
      debug_mask("swrast", swrast_mask);
      _swrast_Clear(ctx, swrast_mask);
   }

   /* HW color buffers (front, back, aux, generic FBO, etc) */
   if (intel->gen < 6 && colorMask == ~0) {
      /* clear all R,G,B,A */
      blit_mask |= (mask & BUFFER_BITS_COLOR);
   }
   else {
      /* glColorMask in effect */
      tri_mask |= (mask & BUFFER_BITS_COLOR);
   }

   /* Make sure we have up to date buffers before we start looking at
    * the tiling bits to determine how to clear. */
   intel_prepare_render(intel);

   /* HW stencil */
   if (mask & BUFFER_BIT_STENCIL) {
      const struct intel_region *stencilRegion
         = intel_get_rb_region(fb, BUFFER_STENCIL);
      if (stencilRegion) {
         /* have hw stencil */
         if (stencilRegion->tiling == I915_TILING_Y ||
	     (ctx->Stencil.WriteMask[0] & 0xff) != 0xff) {
	    /* We have to use the 3D engine if we're clearing a partial mask
	     * of the stencil buffer, or if we're on a 965 which has a tiled
	     * depth/stencil buffer in a layout we can't blit to.
	     */
            tri_mask |= BUFFER_BIT_STENCIL;
         }
	 else if (intel->has_separate_stencil &&
	       stencilRegion->tiling == I915_TILING_NONE) {
	    /* The stencil buffer is actually W tiled, which the hardware
	     * cannot blit to. */
	    tri_mask |= BUFFER_BIT_STENCIL;
	 }
         else {
            /* clearing all stencil bits, use blitting */
            blit_mask |= BUFFER_BIT_STENCIL;
         }
      }
   }

   /* HW depth */
   if (mask & BUFFER_BIT_DEPTH) {
      const struct intel_region *irb = intel_get_rb_region(fb, BUFFER_DEPTH);

      /* clear depth with whatever method is used for stencil (see above) */
      if (irb->tiling == I915_TILING_Y || tri_mask & BUFFER_BIT_STENCIL)
         tri_mask |= BUFFER_BIT_DEPTH;
      else
         blit_mask |= BUFFER_BIT_DEPTH;
   }

   /* If we're doing a tri pass for depth/stencil, include a likely color
    * buffer with it.
    */
   if (mask & (BUFFER_BIT_DEPTH | BUFFER_BIT_STENCIL)) {
      int color_bit = _mesa_ffs(mask & BUFFER_BITS_COLOR);
      if (color_bit != 0) {
	 tri_mask |= blit_mask & (1 << (color_bit - 1));
	 blit_mask &= ~(1 << (color_bit - 1));
      }
   }

   /* Anything left, just use tris */
   tri_mask |= mask & ~blit_mask;

   if (blit_mask) {
      debug_mask("blit", blit_mask);
      tri_mask |= intelClearWithBlit(ctx, blit_mask);
   }

   if (tri_mask) {
      debug_mask("tri", tri_mask);
      if (ctx->Extensions.ARB_fragment_shader)
	 _mesa_meta_glsl_Clear(&intel->ctx, tri_mask);
      else
	 _mesa_meta_Clear(&intel->ctx, tri_mask);
   }
}
Exemplo n.º 21
0
bool
intel_copy_texsubimage(struct intel_context *intel,
                       struct intel_texture_image *intelImage,
                       GLint dstx, GLint dsty,
                       struct intel_renderbuffer *irb,
                       GLint x, GLint y, GLsizei width, GLsizei height)
{
   struct gl_context *ctx = &intel->ctx;
   struct intel_region *region;
   const GLenum internalFormat = intelImage->base.Base.InternalFormat;
   bool copy_supported = false;
   bool copy_supported_with_alpha_override = false;

   intel_prepare_render(intel);

   if (!intelImage->mt || !irb || !irb->mt) {
      if (unlikely(INTEL_DEBUG & DEBUG_FALLBACKS))
	 fprintf(stderr, "%s fail %p %p (0x%08x)\n",
		 __FUNCTION__, intelImage->mt, irb, internalFormat);
      return false;
   } else {
      region = irb->mt->region;
      assert(region);
   }

   copy_supported = intelImage->base.Base.TexFormat == intel_rb_format(irb);

   /* Converting ARGB8888 to XRGB8888 is trivial: ignore the alpha bits */
   if (intel_rb_format(irb) == MESA_FORMAT_ARGB8888 &&
       intelImage->base.Base.TexFormat == MESA_FORMAT_XRGB8888) {
      copy_supported = true;
   }

   /* Converting XRGB8888 to ARGB8888 requires setting the alpha bits to 1.0 */
   if (intel_rb_format(irb) == MESA_FORMAT_XRGB8888 &&
       intelImage->base.Base.TexFormat == MESA_FORMAT_ARGB8888) {
      copy_supported_with_alpha_override = true;
   }

   if (!copy_supported && !copy_supported_with_alpha_override) {
      if (unlikely(INTEL_DEBUG & DEBUG_FALLBACKS))
	 fprintf(stderr, "%s mismatched formats %s, %s\n",
		 __FUNCTION__,
		 _mesa_get_format_name(intelImage->base.Base.TexFormat),
		 _mesa_get_format_name(intel_rb_format(irb)));
      return false;
   }

   {
      GLuint image_x, image_y;
      GLshort src_pitch;

      /* get dest x/y in destination texture */
      intel_miptree_get_image_offset(intelImage->mt,
				     intelImage->base.Base.Level,
				     intelImage->base.Base.Face,
				     0,
				     &image_x, &image_y);

      /* The blitter can't handle Y-tiled buffers. */
      if (intelImage->mt->region->tiling == I915_TILING_Y) {
	 return false;
      }

      if (ctx->ReadBuffer->Name == 0) {
	 /* Flip vertical orientation for system framebuffers */
	 y = ctx->ReadBuffer->Height - (y + height);
	 src_pitch = -region->pitch;
      } else {
	 /* reading from a FBO, y is already oriented the way we like */
	 src_pitch = region->pitch;
      }

      /* blit from src buffer to texture */
      if (!intelEmitCopyBlit(intel,
			     intelImage->mt->cpp,
			     src_pitch,
			     region->bo,
			     0,
			     region->tiling,
			     intelImage->mt->region->pitch,
			     intelImage->mt->region->bo,
			     0,
			     intelImage->mt->region->tiling,
			     irb->draw_x + x, irb->draw_y + y,
			     image_x + dstx, image_y + dsty,
			     width, height,
			     GL_COPY)) {
	 return false;
      }
   }

   if (copy_supported_with_alpha_override)
      intel_set_teximage_alpha_to_one(ctx, intelImage);

   return true;
}
Exemplo n.º 22
0
/**
 * CopyPixels with the blitter.  Don't support zooming, pixel transfer, etc.
 */
static GLboolean
do_blit_copypixels(struct gl_context * ctx,
                   GLint srcx, GLint srcy,
                   GLsizei width, GLsizei height,
                   GLint dstx, GLint dsty, GLenum type)
{
   struct intel_context *intel = intel_context(ctx);
   struct gl_framebuffer *fb = ctx->DrawBuffer;
   struct gl_framebuffer *read_fb = ctx->ReadBuffer;
   GLint orig_dstx;
   GLint orig_dsty;
   GLint orig_srcx;
   GLint orig_srcy;
   GLboolean flip = GL_FALSE;
   struct intel_renderbuffer *draw_irb = NULL;
   struct intel_renderbuffer *read_irb = NULL;

   /* Update draw buffer bounds */
   _mesa_update_state(ctx);

   switch (type) {
   case GL_COLOR:
      if (fb->_NumColorDrawBuffers != 1) {
	 fallback_debug("glCopyPixels() fallback: MRT\n");
	 return GL_FALSE;
      }

      draw_irb = intel_renderbuffer(fb->_ColorDrawBuffers[0]);
      read_irb = intel_renderbuffer(read_fb->_ColorReadBuffer);
      break;
   case GL_DEPTH_STENCIL_EXT:
      draw_irb = intel_renderbuffer(fb->Attachment[BUFFER_DEPTH].Renderbuffer);
      read_irb =
	 intel_renderbuffer(read_fb->Attachment[BUFFER_DEPTH].Renderbuffer);
      break;
   case GL_DEPTH:
      fallback_debug("glCopyPixels() fallback: GL_DEPTH\n");
      return GL_FALSE;
   case GL_STENCIL:
      fallback_debug("glCopyPixels() fallback: GL_STENCIL\n");
      return GL_FALSE;
   default:
      fallback_debug("glCopyPixels(): Unknown type\n");
      return GL_FALSE;
   }

   if (!draw_irb) {
      fallback_debug("glCopyPixels() fallback: missing draw buffer\n");
      return GL_FALSE;
   }

   if (!read_irb) {
      fallback_debug("glCopyPixels() fallback: missing read buffer\n");
      return GL_FALSE;
   }

   if (draw_irb->Base.Format != read_irb->Base.Format &&
       !(draw_irb->Base.Format == MESA_FORMAT_XRGB8888 &&
	 read_irb->Base.Format == MESA_FORMAT_ARGB8888)) {
      fallback_debug("glCopyPixels() fallback: mismatched formats (%s -> %s\n",
		     _mesa_get_format_name(read_irb->Base.Format),
		     _mesa_get_format_name(draw_irb->Base.Format));
      return GL_FALSE;
   }

   /* Copypixels can be more than a straight copy.  Ensure all the
    * extra operations are disabled:
    */
   if (!intel_check_copypixel_blit_fragment_ops(ctx) ||
       ctx->Pixel.ZoomX != 1.0F || ctx->Pixel.ZoomY != 1.0F)
      return GL_FALSE;

   intel_prepare_render(intel);

   intel_flush(&intel->ctx);

   /* Clip to destination buffer. */
   orig_dstx = dstx;
   orig_dsty = dsty;
   if (!_mesa_clip_to_region(fb->_Xmin, fb->_Ymin,
			     fb->_Xmax, fb->_Ymax,
			     &dstx, &dsty, &width, &height))
      goto out;
   /* Adjust src coords for our post-clipped destination origin */
   srcx += dstx - orig_dstx;
   srcy += dsty - orig_dsty;

   /* Clip to source buffer. */
   orig_srcx = srcx;
   orig_srcy = srcy;
   if (!_mesa_clip_to_region(0, 0,
			     read_fb->Width, read_fb->Height,
			     &srcx, &srcy, &width, &height))
      goto out;
   /* Adjust dst coords for our post-clipped source origin */
   dstx += srcx - orig_srcx;
   dsty += srcy - orig_srcy;

   /* Flip dest Y if it's a window system framebuffer. */
   if (fb->Name == 0) {
      /* copypixels to a window system framebuffer */
      dsty = fb->Height - dsty - height;
      flip = !flip;
   }

   /* Flip source Y if it's a window system framebuffer. */
   if (read_fb->Name == 0) {
      srcy = read_fb->Height - srcy - height;
      flip = !flip;
   }

   srcx += read_irb->draw_x;
   srcy += read_irb->draw_y;
   dstx += draw_irb->draw_x;
   dsty += draw_irb->draw_y;

   if (!intel_region_copy(intel,
			  draw_irb->region, 0, dstx, dsty,
			  read_irb->region, 0, srcx, srcy,
			  width, height, flip,
			  ctx->Color.ColorLogicOpEnabled ?
			  ctx->Color.LogicOp : GL_COPY)) {
      DBG("%s: blit failure\n", __FUNCTION__);
      return GL_FALSE;
   }

out:
   intel_check_front_buffer_rendering(intel);

   DBG("%s: success\n", __FUNCTION__);
   return GL_TRUE;
}
Exemplo n.º 23
0
/*
 * Render a bitmap.
 */
static bool
do_blit_bitmap( struct gl_context *ctx, 
		GLint dstx, GLint dsty,
		GLsizei width, GLsizei height,
		const struct gl_pixelstore_attrib *unpack,
		const GLubyte *bitmap )
{
   struct intel_context *intel = intel_context(ctx);
   struct gl_framebuffer *fb = ctx->DrawBuffer;
   struct intel_renderbuffer *irb;
   GLfloat tmpColor[4];
   GLubyte ubcolor[4];
   GLuint color;
   GLsizei bitmap_width = width;
   GLsizei bitmap_height = height;
   GLint px, py;
   GLuint stipple[32];
   GLint orig_dstx = dstx;
   GLint orig_dsty = dsty;

   /* Update draw buffer bounds */
   _mesa_update_state(ctx);

   if (ctx->Depth.Test) {
      /* The blit path produces incorrect results when depth testing is on.
       * It seems the blit Z coord is always 1.0 (the far plane) so fragments
       * will likely be obscured by other, closer geometry.
       */
      return false;
   }

   intel_prepare_render(intel);

   if (fb->_NumColorDrawBuffers != 1) {
      perf_debug("accelerated glBitmap() only supports rendering to a "
                 "single color buffer\n");
      return false;
   }

   irb = intel_renderbuffer(fb->_ColorDrawBuffers[0]);

   if (_mesa_is_bufferobj(unpack->BufferObj)) {
      bitmap = map_pbo(ctx, width, height, unpack, bitmap);
      if (bitmap == NULL)
	 return true;	/* even though this is an error, we're done */
   }

   COPY_4V(tmpColor, ctx->Current.RasterColor);

   if (_mesa_need_secondary_color(ctx)) {
       ADD_3V(tmpColor, tmpColor, ctx->Current.RasterSecondaryColor);
   }

   UNCLAMPED_FLOAT_TO_UBYTE(ubcolor[0], tmpColor[0]);
   UNCLAMPED_FLOAT_TO_UBYTE(ubcolor[1], tmpColor[1]);
   UNCLAMPED_FLOAT_TO_UBYTE(ubcolor[2], tmpColor[2]);
   UNCLAMPED_FLOAT_TO_UBYTE(ubcolor[3], tmpColor[3]);

   switch (irb->mt->format) {
   case MESA_FORMAT_B8G8R8A8_UNORM:
   case MESA_FORMAT_B8G8R8X8_UNORM:
      color = PACK_COLOR_8888(ubcolor[3], ubcolor[0], ubcolor[1], ubcolor[2]);
      break;
   case MESA_FORMAT_B5G6R5_UNORM:
      color = PACK_COLOR_565(ubcolor[0], ubcolor[1], ubcolor[2]);
      break;
   default:
      perf_debug("Unsupported format %s in accelerated glBitmap()\n",
                 _mesa_get_format_name(irb->mt->format));
      return false;
   }

   if (!intel_check_blit_fragment_ops(ctx, tmpColor[3] == 1.0F))
      return false;

   /* Clip to buffer bounds and scissor. */
   if (!_mesa_clip_to_region(fb->_Xmin, fb->_Ymin,
			     fb->_Xmax, fb->_Ymax,
			     &dstx, &dsty, &width, &height))
      goto out;

   dsty = y_flip(fb, dsty, height);

#define DY 32
#define DX 32

   /* Chop it all into chunks that can be digested by hardware: */
   for (py = 0; py < height; py += DY) {
      for (px = 0; px < width; px += DX) {
	 int h = MIN2(DY, height - py);
	 int w = MIN2(DX, width - px);
	 GLuint sz = ALIGN(ALIGN(w,8) * h, 64)/8;
	 GLenum logic_op = ctx->Color.ColorLogicOpEnabled ?
	    ctx->Color.LogicOp : GL_COPY;

	 assert(sz <= sizeof(stipple));
	 memset(stipple, 0, sz);

	 /* May need to adjust this when padding has been introduced in
	  * sz above:
	  *
	  * Have to translate destination coordinates back into source
	  * coordinates.
	  */
         int count = get_bitmap_rect(bitmap_width, bitmap_height, unpack,
                                     bitmap,
                                     -orig_dstx + (dstx + px),
                                     -orig_dsty + y_flip(fb, dsty + py, h),
                                     w, h,
                                     (GLubyte *)stipple,
                                     8,
                                     _mesa_is_winsys_fbo(fb));
         if (count == 0)
	    continue;

	 if (!intelEmitImmediateColorExpandBlit(intel,
						irb->mt->cpp,
						(GLubyte *)stipple,
						sz,
						color,
						irb->mt->region->pitch,
						irb->mt->region->bo,
						0,
						irb->mt->region->tiling,
						dstx + px,
						dsty + py,
						w, h,
						logic_op)) {
	    return false;
	 }

         if (ctx->Query.CurrentOcclusionObject)
            ctx->Query.CurrentOcclusionObject->Result += count;
      }
   }
out:

   if (unlikely(INTEL_DEBUG & DEBUG_SYNC))
      intel_batchbuffer_flush(intel);

   if (_mesa_is_bufferobj(unpack->BufferObj)) {
      /* done with PBO so unmap it now */
      ctx->Driver.UnmapBuffer(ctx, unpack->BufferObj, MAP_INTERNAL);
   }

   intel_check_front_buffer_rendering(intel);

   return true;
}
Exemplo n.º 24
0
/* May fail if out of video memory for texture or vbo upload, or on
 * fallback conditions.
 */
static GLboolean brw_try_draw_prims( struct gl_context *ctx,
				     const struct gl_client_array *arrays[],
				     const struct _mesa_prim *prim,
				     GLuint nr_prims,
				     const struct _mesa_index_buffer *ib,
				     GLuint min_index,
				     GLuint max_index )
{
   struct intel_context *intel = intel_context(ctx);
   struct brw_context *brw = brw_context(ctx);
   GLboolean retval = GL_FALSE;
   GLboolean warn = GL_FALSE;
   GLuint i;

   if (ctx->NewState)
      _mesa_update_state( ctx );

   /* We have to validate the textures *before* checking for fallbacks;
    * otherwise, the software fallback won't be able to rely on the
    * texture state, the firstLevel and lastLevel fields won't be
    * set in the intel texture object (they'll both be 0), and the 
    * software fallback will segfault if it attempts to access any
    * texture level other than level 0.
    */
   brw_validate_textures( brw );

   /* Bind all inputs, derive varying and size information:
    */
   brw_merge_inputs( brw, arrays );

   brw->ib.ib = ib;
   brw->state.dirty.brw |= BRW_NEW_INDICES;

   brw->vb.min_index = min_index;
   brw->vb.max_index = max_index;
   brw->state.dirty.brw |= BRW_NEW_VERTICES;

   /* Have to validate state quite late.  Will rebuild tnl_program,
    * which depends on varying information.  
    * 
    * Note this is where brw->vs->prog_data.inputs_read is calculated,
    * so can't access it earlier.
    */

   intel_prepare_render(intel);

   for (i = 0; i < nr_prims; i++) {
      uint32_t hw_prim;
      int estimated_max_prim_size;

      estimated_max_prim_size = 512; /* batchbuffer commands */
      estimated_max_prim_size += (BRW_MAX_TEX_UNIT *
				  (sizeof(struct brw_sampler_state) +
				   sizeof(struct gen5_sampler_default_color)));
      estimated_max_prim_size += 1024; /* gen6 VS push constants */
      estimated_max_prim_size += 1024; /* gen6 WM push constants */
      estimated_max_prim_size += 512; /* misc. pad */

      /* Flush the batch if it's approaching full, so that we don't wrap while
       * we've got validated state that needs to be in the same batch as the
       * primitives.
       */
      intel_batchbuffer_require_space(intel, estimated_max_prim_size, false);

      hw_prim = brw_set_prim(brw, &prim[i]);
      if (brw->state.dirty.brw) {
	 brw_validate_state(brw);

	 /* Various fallback checks:  */
	 if (brw->intel.Fallback)
	    goto out;

	 /* Check that we can fit our state in with our existing batchbuffer, or
	  * flush otherwise.
	  */
	 if (dri_bufmgr_check_aperture_space(brw->state.validated_bos,
					     brw->state.validated_bo_count)) {
	    static GLboolean warned;
	    intel_batchbuffer_flush(intel);

	    /* Validate the state after we flushed the batch (which would have
	     * changed the set of dirty state).  If we still fail to
	     * check_aperture, warn of what's happening, but attempt to continue
	     * on since it may succeed anyway, and the user would probably rather
	     * see a failure and a warning than a fallback.
	     */
	    brw_validate_state(brw);
	    if (!warned &&
		dri_bufmgr_check_aperture_space(brw->state.validated_bos,
						brw->state.validated_bo_count)) {
	       warn = GL_TRUE;
	       warned = GL_TRUE;
	    }
	 }

	 intel->no_batch_wrap = GL_TRUE;
	 brw_upload_state(brw);
      }

      if (intel->gen >= 7)
	 gen7_emit_prim(brw, &prim[i], hw_prim);
      else
	 brw_emit_prim(brw, &prim[i], hw_prim);

      intel->no_batch_wrap = GL_FALSE;

      retval = GL_TRUE;
   }

   if (intel->always_flush_batch)
      intel_batchbuffer_flush(intel);
 out:

   brw_state_cache_check_size(brw);

   if (warn)
      fprintf(stderr, "i965: Single primitive emit potentially exceeded "
	      "available aperture space\n");

   if (!retval)
      DBG("%s failed\n", __FUNCTION__);

   return retval;
}
/* May fail if out of video memory for texture or vbo upload, or on
 * fallback conditions.
 */
static bool brw_try_draw_prims( struct gl_context *ctx,
				     const struct gl_client_array *arrays[],
				     const struct _mesa_prim *prims,
				     GLuint nr_prims,
				     const struct _mesa_index_buffer *ib,
				     GLuint min_index,
				     GLuint max_index,
				     struct gl_buffer_object *indirect)
{
   struct brw_context *brw = brw_context(ctx);
   bool retval = true;
   GLuint i;
   bool fail_next = false;

   if (ctx->NewState)
      _mesa_update_state( ctx );

   /* Find the highest sampler unit used by each shader program.  A bit-count
    * won't work since ARB programs use the texture unit number as the sampler
    * index.
    */
   brw->wm.base.sampler_count =
      _mesa_fls(ctx->FragmentProgram._Current->Base.SamplersUsed);
   brw->gs.base.sampler_count = ctx->GeometryProgram._Current ?
      _mesa_fls(ctx->GeometryProgram._Current->Base.SamplersUsed) : 0;
   brw->vs.base.sampler_count =
      _mesa_fls(ctx->VertexProgram._Current->Base.SamplersUsed);

   /* We have to validate the textures *before* checking for fallbacks;
    * otherwise, the software fallback won't be able to rely on the
    * texture state, the firstLevel and lastLevel fields won't be
    * set in the intel texture object (they'll both be 0), and the 
    * software fallback will segfault if it attempts to access any
    * texture level other than level 0.
    */
   brw_validate_textures( brw );

   intel_prepare_render(brw);

   /* This workaround has to happen outside of brw_upload_state() because it
    * may flush the batchbuffer for a blit, affecting the state flags.
    */
   brw_workaround_depthstencil_alignment(brw, 0);

   /* Resolves must occur after updating renderbuffers, updating context state,
    * and finalizing textures but before setting up any hardware state for
    * this draw call.
    */
   brw_predraw_resolve_buffers(brw);

   /* Bind all inputs, derive varying and size information:
    */
   brw_merge_inputs( brw, arrays );

   brw->ib.ib = ib;
   brw->state.dirty.brw |= BRW_NEW_INDICES;

   brw->vb.min_index = min_index;
   brw->vb.max_index = max_index;
   brw->state.dirty.brw |= BRW_NEW_VERTICES;

   for (i = 0; i < nr_prims; i++) {
      int estimated_max_prim_size;

      estimated_max_prim_size = 512; /* batchbuffer commands */
      estimated_max_prim_size += (BRW_MAX_TEX_UNIT *
				  (sizeof(struct brw_sampler_state) +
				   sizeof(struct gen5_sampler_default_color)));
      estimated_max_prim_size += 1024; /* gen6 VS push constants */
      estimated_max_prim_size += 1024; /* gen6 WM push constants */
      estimated_max_prim_size += 512; /* misc. pad */

      /* Flush the batch if it's approaching full, so that we don't wrap while
       * we've got validated state that needs to be in the same batch as the
       * primitives.
       */
      intel_batchbuffer_require_space(brw, estimated_max_prim_size, RENDER_RING);
      intel_batchbuffer_save_state(brw);

      if (brw->num_instances != prims[i].num_instances) {
         brw->num_instances = prims[i].num_instances;
         brw->state.dirty.brw |= BRW_NEW_VERTICES;
         brw_merge_inputs(brw, arrays);
      }
      if (brw->basevertex != prims[i].basevertex) {
         brw->basevertex = prims[i].basevertex;
         brw->state.dirty.brw |= BRW_NEW_VERTICES;
         brw_merge_inputs(brw, arrays);
      }
      if (brw->gen < 6)
	 brw_set_prim(brw, &prims[i]);
      else
	 gen6_set_prim(brw, &prims[i]);

retry:
      /* Note that before the loop, brw->state.dirty.brw was set to != 0, and
       * that the state updated in the loop outside of this block is that in
       * *_set_prim or intel_batchbuffer_flush(), which only impacts
       * brw->state.dirty.brw.
       */
      if (brw->state.dirty.brw) {
	 brw->no_batch_wrap = true;
	 brw_upload_state(brw);
      }

      brw_emit_prim(brw, &prims[i], brw->primitive);

      brw->no_batch_wrap = false;

      if (dri_bufmgr_check_aperture_space(&brw->batch.bo, 1)) {
	 if (!fail_next) {
	    intel_batchbuffer_reset_to_saved(brw);
	    intel_batchbuffer_flush(brw);
	    fail_next = true;
	    goto retry;
	 } else {
	    if (intel_batchbuffer_flush(brw) == -ENOSPC) {
	       static bool warned = false;

	       if (!warned) {
		  fprintf(stderr, "i965: Single primitive emit exceeded"
			  "available aperture space\n");
		  warned = true;
	       }

	       retval = false;
	    }
	 }
      }
   }

   if (brw->always_flush_batch)
      intel_batchbuffer_flush(brw);

   brw_state_cache_check_size(brw);
   brw_postdraw_set_buffers_need_resolve(brw);

   return retval;
}
Exemplo n.º 26
0
static GLboolean
do_blit_readpixels(GLcontext * ctx,
                   GLint x, GLint y, GLsizei width, GLsizei height,
                   GLenum format, GLenum type,
                   const struct gl_pixelstore_attrib *pack, GLvoid * pixels)
{
   struct intel_context *intel = intel_context(ctx);
   struct intel_region *src = intel_readbuf_region(intel);
   struct intel_buffer_object *dst = intel_buffer_object(pack->BufferObj);
   GLuint dst_offset;
   GLuint rowLength;
   drm_intel_bo *dst_buffer;
   GLboolean all;
   GLint dst_x, dst_y;

   if (INTEL_DEBUG & DEBUG_PIXEL)
      printf("%s\n", __FUNCTION__);

   if (!src)
      return GL_FALSE;

   if (!_mesa_is_bufferobj(pack->BufferObj)) {
      /* PBO only for now:
       */
      if (INTEL_DEBUG & DEBUG_PIXEL)
         printf("%s - not PBO\n", __FUNCTION__);
      return GL_FALSE;
   }


   if (ctx->_ImageTransferState ||
       !intel_check_blit_format(src, format, type)) {
      if (INTEL_DEBUG & DEBUG_PIXEL)
         printf("%s - bad format for blit\n", __FUNCTION__);
      return GL_FALSE;
   }

   if (pack->Alignment != 1 || pack->SwapBytes || pack->LsbFirst) {
      if (INTEL_DEBUG & DEBUG_PIXEL)
         printf("%s: bad packing params\n", __FUNCTION__);
      return GL_FALSE;
   }

   if (pack->RowLength > 0)
      rowLength = pack->RowLength;
   else
      rowLength = width;

   if (pack->Invert) {
      if (INTEL_DEBUG & DEBUG_PIXEL)
         printf("%s: MESA_PACK_INVERT not done yet\n", __FUNCTION__);
      return GL_FALSE;
   }
   else {
      if (ctx->ReadBuffer->Name == 0)
	 rowLength = -rowLength;
   }

   dst_offset = (GLintptr) _mesa_image_address(2, pack, pixels, width, height,
					       format, type, 0, 0, 0);

   if (!_mesa_clip_copytexsubimage(ctx,
				   &dst_x, &dst_y,
				   &x, &y,
				   &width, &height)) {
      return GL_TRUE;
   }

   intel_prepare_render(intel);

   all = (width * height * src->cpp == dst->Base.Size &&
	  x == 0 && dst_offset == 0);

   dst_x = 0;
   dst_y = 0;

   dst_buffer = intel_bufferobj_buffer(intel, dst,
					       all ? INTEL_WRITE_FULL :
					       INTEL_WRITE_PART);

   if (ctx->ReadBuffer->Name == 0)
      y = ctx->ReadBuffer->Height - (y + height);

   if (!intelEmitCopyBlit(intel,
			  src->cpp,
			  src->pitch, src->buffer, 0, src->tiling,
			  rowLength, dst_buffer, dst_offset, GL_FALSE,
			  x, y,
			  dst_x, dst_y,
			  width, height,
			  GL_COPY)) {
      return GL_FALSE;
   }

   if (INTEL_DEBUG & DEBUG_PIXEL)
      printf("%s - DONE\n", __FUNCTION__);

   return GL_TRUE;
}
Exemplo n.º 27
0
/**
 * CopyPixels with the blitter.  Don't support zooming, pixel transfer, etc.
 */
static bool
do_blit_copypixels(struct gl_context * ctx,
                   GLint srcx, GLint srcy,
                   GLsizei width, GLsizei height,
                   GLint dstx, GLint dsty, GLenum type)
{
   struct brw_context *brw = brw_context(ctx);
   struct gl_framebuffer *fb = ctx->DrawBuffer;
   struct gl_framebuffer *read_fb = ctx->ReadBuffer;
   GLint orig_dstx;
   GLint orig_dsty;
   GLint orig_srcx;
   GLint orig_srcy;
   struct intel_renderbuffer *draw_irb = NULL;
   struct intel_renderbuffer *read_irb = NULL;

   /* Update draw buffer bounds */
   _mesa_update_state(ctx);

   intel_prepare_render(brw);

   switch (type) {
   case GL_COLOR:
      if (fb->_NumColorDrawBuffers != 1) {
	 perf_debug("glCopyPixels() fallback: MRT\n");
	 return false;
      }

      draw_irb = intel_renderbuffer(fb->_ColorDrawBuffers[0]);
      read_irb = intel_renderbuffer(read_fb->_ColorReadBuffer);
      break;
   case GL_DEPTH_STENCIL_EXT:
      draw_irb = intel_renderbuffer(fb->Attachment[BUFFER_DEPTH].Renderbuffer);
      read_irb =
	 intel_renderbuffer(read_fb->Attachment[BUFFER_DEPTH].Renderbuffer);
      break;
   case GL_DEPTH:
      perf_debug("glCopyPixels() fallback: GL_DEPTH\n");
      return false;
   case GL_STENCIL:
      perf_debug("glCopyPixels() fallback: GL_STENCIL\n");
      return false;
   default:
      perf_debug("glCopyPixels(): Unknown type\n");
      return false;
   }

   if (!draw_irb) {
      perf_debug("glCopyPixels() fallback: missing draw buffer\n");
      return false;
   }

   if (!read_irb) {
      perf_debug("glCopyPixels() fallback: missing read buffer\n");
      return false;
   }

   if (draw_irb->mt->num_samples > 1 || read_irb->mt->num_samples > 1) {
      perf_debug("glCopyPixels() fallback: multisampled buffers\n");
      return false;
   }

   if (ctx->_ImageTransferState) {
      perf_debug("glCopyPixels(): Unsupported image transfer state\n");
      return false;
   }

   if (ctx->Depth.Test) {
      perf_debug("glCopyPixels(): Unsupported depth test state\n");
      return false;
   }

   if (ctx->Stencil._Enabled) {
      perf_debug("glCopyPixels(): Unsupported stencil test state\n");
      return false;
   }

   if (ctx->Fog.Enabled ||
       ctx->Texture._MaxEnabledTexImageUnit != -1 ||
       ctx->FragmentProgram._Enabled) {
      perf_debug("glCopyPixels(): Unsupported fragment shader state\n");
      return false;
   }

   if (ctx->Color.AlphaEnabled ||
       ctx->Color.BlendEnabled) {
      perf_debug("glCopyPixels(): Unsupported blend state\n");
      return false;
   }

   if (!ctx->Color.ColorMask[0][0] ||
       !ctx->Color.ColorMask[0][1] ||
       !ctx->Color.ColorMask[0][2] ||
       !ctx->Color.ColorMask[0][3]) {
      perf_debug("glCopyPixels(): Unsupported color mask state\n");
      return false;
   }

   if (ctx->Pixel.ZoomX != 1.0F || ctx->Pixel.ZoomY != 1.0F) {
      perf_debug("glCopyPixles(): Unsupported pixel zoom\n");
      return false;
   }

   intel_batchbuffer_flush(brw);

   /* Clip to destination buffer. */
   orig_dstx = dstx;
   orig_dsty = dsty;
   if (!_mesa_clip_to_region(fb->_Xmin, fb->_Ymin,
			     fb->_Xmax, fb->_Ymax,
			     &dstx, &dsty, &width, &height))
      goto out;
   /* Adjust src coords for our post-clipped destination origin */
   srcx += dstx - orig_dstx;
   srcy += dsty - orig_dsty;

   /* Clip to source buffer. */
   orig_srcx = srcx;
   orig_srcy = srcy;
   if (!_mesa_clip_to_region(0, 0,
			     read_fb->Width, read_fb->Height,
			     &srcx, &srcy, &width, &height))
      goto out;
   /* Adjust dst coords for our post-clipped source origin */
   dstx += srcx - orig_srcx;
   dsty += srcy - orig_srcy;

   if (!intel_miptree_blit(brw,
                           read_irb->mt, read_irb->mt_level, read_irb->mt_layer,
                           srcx, srcy, _mesa_is_winsys_fbo(read_fb),
                           draw_irb->mt, draw_irb->mt_level, draw_irb->mt_layer,
                           dstx, dsty, _mesa_is_winsys_fbo(fb),
                           width, height,
                           (ctx->Color.ColorLogicOpEnabled ?
                            ctx->Color.LogicOp : GL_COPY))) {
      DBG("%s: blit failure\n", __FUNCTION__);
      return false;
   }

   if (ctx->Query.CurrentOcclusionObject)
      ctx->Query.CurrentOcclusionObject->Result += width * height;

out:

   DBG("%s: success\n", __FUNCTION__);
   return true;
}
Exemplo n.º 28
0
/**
 * Try to do a glBlitFramebuffer using glCopyTexSubImage2D
 * We can do this when the dst renderbuffer is actually a texture and
 * there is no scaling, mirroring or scissoring.
 *
 * \return new buffer mask indicating the buffers left to blit using the
 *         normal path.
 */
static GLbitfield
intel_blit_framebuffer_with_blitter(struct gl_context *ctx,
                                    GLint srcX0, GLint srcY0,
                                    GLint srcX1, GLint srcY1,
                                    GLint dstX0, GLint dstY0,
                                    GLint dstX1, GLint dstY1,
                                    GLbitfield mask, GLenum filter)
{
   struct brw_context *brw = brw_context(ctx);

   /* Sync up the state of window system buffers.  We need to do this before
    * we go looking for the buffers.
    */
   intel_prepare_render(brw);

   if (mask & GL_COLOR_BUFFER_BIT) {
      GLint i;
      const struct gl_framebuffer *drawFb = ctx->DrawBuffer;
      const struct gl_framebuffer *readFb = ctx->ReadBuffer;
      struct gl_renderbuffer *src_rb = readFb->_ColorReadBuffer;
      struct intel_renderbuffer *src_irb = intel_renderbuffer(src_rb);

      if (!src_irb) {
         perf_debug("glBlitFramebuffer(): missing src renderbuffer.  "
                    "Falling back to software rendering.\n");
         return mask;
      }

      /* If the source and destination are the same size with no mirroring,
       * the rectangles are within the size of the texture and there is no
       * scissor, then we can probably use the blit engine.
       */
      if (!(srcX0 - srcX1 == dstX0 - dstX1 &&
            srcY0 - srcY1 == dstY0 - dstY1 &&
            srcX1 >= srcX0 &&
            srcY1 >= srcY0 &&
            srcX0 >= 0 && srcX1 <= readFb->Width &&
            srcY0 >= 0 && srcY1 <= readFb->Height &&
            dstX0 >= 0 && dstX1 <= drawFb->Width &&
            dstY0 >= 0 && dstY1 <= drawFb->Height &&
            !ctx->Scissor.Enabled)) {
         perf_debug("glBlitFramebuffer(): non-1:1 blit.  "
                    "Falling back to software rendering.\n");
         return mask;
      }

      /* Blit to all active draw buffers.  We don't do any pre-checking,
       * because we assume that copying to MRTs is rare, and failure midway
       * through copying is even more rare.  Even if it was to occur, it's
       * safe to let meta start the copy over from scratch, because
       * glBlitFramebuffer completely overwrites the destination pixels, and
       * results are undefined if any destination pixels have a dependency on
       * source pixels.
       */
      for (i = 0; i < ctx->DrawBuffer->_NumColorDrawBuffers; i++) {
         struct gl_renderbuffer *dst_rb = ctx->DrawBuffer->_ColorDrawBuffers[i];
         struct intel_renderbuffer *dst_irb = intel_renderbuffer(dst_rb);

         if (!dst_irb) {
            perf_debug("glBlitFramebuffer(): missing dst renderbuffer.  "
                       "Falling back to software rendering.\n");
            return mask;
         }

         gl_format src_format = _mesa_get_srgb_format_linear(src_rb->Format);
         gl_format dst_format = _mesa_get_srgb_format_linear(dst_rb->Format);
         if (src_format != dst_format) {
            perf_debug("glBlitFramebuffer(): unsupported blit from %s to %s.  "
                       "Falling back to software rendering.\n",
                       _mesa_get_format_name(src_format),
                       _mesa_get_format_name(dst_format));
            return mask;
         }

         if (!intel_miptree_blit(brw,
                                 src_irb->mt,
                                 src_irb->mt_level, src_irb->mt_layer,
                                 srcX0, srcY0, src_rb->Name == 0,
                                 dst_irb->mt,
                                 dst_irb->mt_level, dst_irb->mt_layer,
                                 dstX0, dstY0, dst_rb->Name == 0,
                                 dstX1 - dstX0, dstY1 - dstY0, GL_COPY)) {
            perf_debug("glBlitFramebuffer(): unknown blit failure.  "
                       "Falling back to software rendering.\n");
            return mask;
         }
      }

      mask &= ~GL_COLOR_BUFFER_BIT;
   }

   return mask;
}
Exemplo n.º 29
0
bool
brw_blorp_copytexsubimage(struct brw_context *brw,
                          struct gl_renderbuffer *src_rb,
                          struct gl_texture_image *dst_image,
                          int slice,
                          int srcX0, int srcY0,
                          int dstX0, int dstY0,
                          int width, int height)
{
   struct gl_context *ctx = &brw->ctx;
   struct intel_renderbuffer *src_irb = intel_renderbuffer(src_rb);
   struct intel_texture_image *intel_image = intel_texture_image(dst_image);

   /* No pixel transfer operations (zoom, bias, mapping), just a blit */
   if (brw->ctx._ImageTransferState)
      return false;

   /* Sync up the state of window system buffers.  We need to do this before
    * we go looking at the src renderbuffer's miptree.
    */
   intel_prepare_render(brw);

   struct intel_mipmap_tree *src_mt = src_irb->mt;
   struct intel_mipmap_tree *dst_mt = intel_image->mt;

   /* There is support for only up to eight samples. */
   if (src_mt->num_samples > 8 || dst_mt->num_samples > 8)
      return false;

   /* BLORP is only supported from Gen6 onwards. */
   if (brw->gen < 6)
      return false;

   if (_mesa_get_format_base_format(src_rb->Format) !=
       _mesa_get_format_base_format(dst_image->TexFormat)) {
      return false;
   }

   /* We can't handle format conversions between Z24 and other formats since
    * we have to lie about the surface format.  See the comments in
    * brw_blorp_surface_info::set().
    */
   if ((src_mt->format == MESA_FORMAT_Z24_UNORM_X8_UINT) !=
       (dst_mt->format == MESA_FORMAT_Z24_UNORM_X8_UINT)) {
      return false;
   }

   if (!brw->format_supported_as_render_target[dst_image->TexFormat])
      return false;

   /* Source clipping shouldn't be necessary, since copytexsubimage (in
    * src/mesa/main/teximage.c) calls _mesa_clip_copytexsubimage() which
    * takes care of it.
    *
    * Destination clipping shouldn't be necessary since the restrictions on
    * glCopyTexSubImage prevent the user from specifying a destination rectangle
    * that falls outside the bounds of the destination texture.
    * See error_check_subtexture_dimensions().
    */

   int srcY1 = srcY0 + height;
   int srcX1 = srcX0 + width;
   int dstX1 = dstX0 + width;
   int dstY1 = dstY0 + height;

   /* Account for the fact that in the system framebuffer, the origin is at
    * the lower left.
    */
   bool mirror_y = false;
   if (_mesa_is_winsys_fbo(ctx->ReadBuffer)) {
      GLint tmp = src_rb->Height - srcY0;
      srcY0 = src_rb->Height - srcY1;
      srcY1 = tmp;
      mirror_y = true;
   }

   /* Account for face selection and texture view MinLayer */
   int dst_slice = slice + dst_image->TexObject->MinLayer + dst_image->Face;
   int dst_level = dst_image->Level + dst_image->TexObject->MinLevel;

   brw_blorp_blit_miptrees(brw,
                           src_mt, src_irb->mt_level, src_irb->mt_layer,
                           src_rb->Format, blorp_get_texture_swizzle(src_irb),
                           dst_mt, dst_level, dst_slice,
                           dst_image->TexFormat,
                           srcX0, srcY0, srcX1, srcY1,
                           dstX0, dstY0, dstX1, dstY1,
                           GL_NEAREST, false, mirror_y,
                           false, false);

   /* If we're copying to a packed depth stencil texture and the source
    * framebuffer has separate stencil, we need to also copy the stencil data
    * over.
    */
   src_rb = ctx->ReadBuffer->Attachment[BUFFER_STENCIL].Renderbuffer;
   if (_mesa_get_format_bits(dst_image->TexFormat, GL_STENCIL_BITS) > 0 &&
       src_rb != NULL) {
      src_irb = intel_renderbuffer(src_rb);
      src_mt = src_irb->mt;

      if (src_mt->stencil_mt)
         src_mt = src_mt->stencil_mt;
      if (dst_mt->stencil_mt)
         dst_mt = dst_mt->stencil_mt;

      if (src_mt != dst_mt) {
         brw_blorp_blit_miptrees(brw,
                                 src_mt, src_irb->mt_level, src_irb->mt_layer,
                                 src_mt->format,
                                 blorp_get_texture_swizzle(src_irb),
                                 dst_mt, dst_level, dst_slice,
                                 dst_mt->format,
                                 srcX0, srcY0, srcX1, srcY1,
                                 dstX0, dstY0, dstX1, dstY1,
                                 GL_NEAREST, false, mirror_y,
                                 false, false);
      }
   }

   return true;
}
static bool
do_blit_drawpixels(struct gl_context * ctx,
		   GLint x, GLint y, GLsizei width, GLsizei height,
		   GLenum format, GLenum type,
		   const struct gl_pixelstore_attrib *unpack,
		   const GLvoid * pixels)
{
   struct brw_context *brw = brw_context(ctx);
   struct intel_buffer_object *src = intel_buffer_object(unpack->BufferObj);
   GLuint src_offset;
   drm_intel_bo *src_buffer;

   DBG("%s\n", __FUNCTION__);

   if (!intel_check_blit_fragment_ops(ctx, false))
      return false;

   if (ctx->DrawBuffer->_NumColorDrawBuffers != 1) {
      DBG("%s: fallback due to MRT\n", __FUNCTION__);
      return false;
   }

   struct gl_renderbuffer *rb = ctx->DrawBuffer->_ColorDrawBuffers[0];
   struct intel_renderbuffer *irb = intel_renderbuffer(rb);

   if (!_mesa_format_matches_format_and_type(irb->mt->format, format, type,
                                             false)) {
      DBG("%s: bad format for blit\n", __FUNCTION__);
      return false;
   }

   if (unpack->SwapBytes || unpack->LsbFirst ||
       unpack->SkipPixels || unpack->SkipRows) {
      DBG("%s: bad packing params\n", __FUNCTION__);
      return false;
   }

   int src_stride = _mesa_image_row_stride(unpack, width, format, type);
   bool src_flip = false;
   /* Mesa flips the src_stride for unpack->Invert, but we want our mt to have
    * a normal src_stride.
    */
   if (unpack->Invert) {
      src_stride = -src_stride;
      src_flip = true;
   }

   src_offset = (GLintptr)pixels;
   src_offset += _mesa_image_offset(2, unpack, width, height,
				    format, type, 0, 0, 0);

   intel_prepare_render(brw);

   src_buffer = intel_bufferobj_buffer(brw, src,
				       src_offset, width * height *
                                       irb->mt->cpp);

   struct intel_mipmap_tree *pbo_mt =
      intel_miptree_create_for_bo(brw,
                                  src_buffer,
                                  irb->mt->format,
                                  src_offset,
                                  width, height,
                                  src_stride, I915_TILING_NONE);
   if (!pbo_mt)
      return false;

   if (!intel_miptree_blit(brw,
                           pbo_mt, 0, 0,
                           0, 0, src_flip,
                           irb->mt, irb->mt_level, irb->mt_layer,
                           x, y, _mesa_is_winsys_fbo(ctx->DrawBuffer),
                           width, height, GL_COPY)) {
      DBG("%s: blit failed\n", __FUNCTION__);
      intel_miptree_release(&pbo_mt);
      return false;
   }

   intel_miptree_release(&pbo_mt);

   if (ctx->Query.CurrentOcclusionObject)
      ctx->Query.CurrentOcclusionObject->Result += width * height;

   intel_check_front_buffer_rendering(brw);

   DBG("%s: success\n", __FUNCTION__);
   return true;
}