Пример #1
0
bool
do_single_blorp_clear(struct brw_context *brw, struct gl_framebuffer *fb,
                      struct gl_renderbuffer *rb, unsigned buf,
                      bool partial_clear, unsigned layer)
{
   struct gl_context *ctx = &brw->ctx;
   struct intel_renderbuffer *irb = intel_renderbuffer(rb);

   brw_blorp_clear_params params(brw, fb, rb, ctx->Color.ColorMask[buf],
                                 partial_clear, layer);

   bool is_fast_clear =
      (params.fast_clear_op == GEN7_FAST_CLEAR_OP_FAST_CLEAR);
   if (is_fast_clear) {
      /* Record the clear color in the miptree so that it will be
       * programmed in SURFACE_STATE by later rendering and resolve
       * operations.
       */
      uint32_t new_color_value =
         compute_fast_clear_color_bits(&ctx->Color.ClearColor);
      if (irb->mt->fast_clear_color_value != new_color_value) {
         irb->mt->fast_clear_color_value = new_color_value;
         brw->state.dirty.brw |= BRW_NEW_SURFACES;
      }

      /* If the buffer is already in INTEL_FAST_CLEAR_STATE_CLEAR, the clear
       * is redundant and can be skipped.
       */
      if (irb->mt->fast_clear_state == INTEL_FAST_CLEAR_STATE_CLEAR)
         return true;

      /* If the MCS buffer hasn't been allocated yet, we need to allocate
       * it now.
       */
      if (!irb->mt->mcs_mt) {
         if (!intel_miptree_alloc_non_msrt_mcs(brw, irb->mt)) {
            /* MCS allocation failed--probably this will only happen in
             * out-of-memory conditions.  But in any case, try to recover
             * by falling back to a non-blorp clear technique.
             */
            return false;
         }
         brw->state.dirty.brw |= BRW_NEW_SURFACES;
      }
   }

   const char *clear_type;
   if (is_fast_clear)
      clear_type = "fast";
   else if (params.wm_prog_key.use_simd16_replicated_data)
      clear_type = "replicated";
   else
      clear_type = "slow";

   DBG("%s (%s) to mt %p level %d layer %d\n", __FUNCTION__, clear_type,
       irb->mt, irb->mt_level, irb->mt_layer);

   brw_blorp_exec(brw, &params);

   if (is_fast_clear) {
      /* Now that the fast clear has occurred, put the buffer in
       * INTEL_FAST_CLEAR_STATE_CLEAR so that we won't waste time doing
       * redundant clears.
       */
      irb->mt->fast_clear_state = INTEL_FAST_CLEAR_STATE_CLEAR;
   }

   return true;
}
Пример #2
0
bool
brw_meta_fast_clear(struct brw_context *brw, struct gl_framebuffer *fb,
                    GLbitfield buffers, bool partial_clear)
{
   struct gl_context *ctx = &brw->ctx;
   mesa_format format;
   enum { FAST_CLEAR, REP_CLEAR, PLAIN_CLEAR } clear_type;
   GLbitfield plain_clear_buffers, meta_save, rep_clear_buffers, fast_clear_buffers;
   struct rect fast_clear_rect, clear_rect;
   int layers;

   fast_clear_buffers = rep_clear_buffers = plain_clear_buffers = 0;

   /* First we loop through the color draw buffers and determine which ones
    * can be fast cleared, which ones can use the replicated write and which
    * ones have to fall back to regular color clear.
    */
   for (unsigned buf = 0; buf < fb->_NumColorDrawBuffers; buf++) {
      struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[buf];
      struct intel_renderbuffer *irb = intel_renderbuffer(rb);
      int index = fb->_ColorDrawBufferIndexes[buf];

      /* Only clear the buffers present in the provided mask */
      if (((1 << index) & buffers) == 0)
         continue;

      /* If this is an ES2 context or GL_ARB_ES2_compatibility is supported,
       * the framebuffer can be complete with some attachments missing.  In
       * this case the _ColorDrawBuffers pointer will be NULL.
       */
      if (rb == NULL)
         continue;

      clear_type = FAST_CLEAR;

      /* We don't have fast clear until gen7. */
      if (brw->gen < 7)
         clear_type = REP_CLEAR;

      if (irb->mt->fast_clear_state == INTEL_FAST_CLEAR_STATE_NO_MCS)
         clear_type = REP_CLEAR;

      /* We can't do scissored fast clears because of the restrictions on the
       * fast clear rectangle size.
       */
      if (partial_clear)
         clear_type = REP_CLEAR;

      /* Fast clear is only supported for colors where all components are
       * either 0 or 1.
       */
      format = _mesa_get_render_format(ctx, irb->mt->format);
      if (!is_color_fast_clear_compatible(brw, format, &ctx->Color.ClearColor))
         clear_type = REP_CLEAR;

      /* From the SNB PRM (Vol4_Part1):
       *
       *     "Replicated data (Message Type = 111) is only supported when
       *      accessing tiled memory.  Using this Message Type to access
       *      linear (untiled) memory is UNDEFINED."
       */
      if (irb->mt->tiling == I915_TILING_NONE) {
         perf_debug("falling back to plain clear because buffers are untiled\n");
         clear_type = PLAIN_CLEAR;
      }

      /* Constant color writes ignore everything in blend and color calculator
       * state.  This is not documented.
       */
      GLubyte *color_mask = ctx->Color.ColorMask[buf];
      for (int i = 0; i < 4; i++) {
         if (_mesa_format_has_color_component(irb->mt->format, i) &&
             !color_mask[i]) {
            perf_debug("falling back to plain clear because of color mask\n");
            clear_type = PLAIN_CLEAR;
         }
      }

      /* Allocate the MCS for non MSRT surfaces now if we're doing a fast
       * clear and we don't have the MCS yet.  On failure, fall back to
       * replicated clear.
       */
      if (clear_type == FAST_CLEAR && irb->mt->mcs_mt == NULL)
         if (!intel_miptree_alloc_non_msrt_mcs(brw, irb->mt))
            clear_type = REP_CLEAR;

      switch (clear_type) {
      case FAST_CLEAR:
         irb->mt->fast_clear_color_value =
            compute_fast_clear_color_bits(&ctx->Color.ClearColor);
         irb->need_downsample = true;

         /* If the buffer is already in INTEL_FAST_CLEAR_STATE_CLEAR, the
          * clear is redundant and can be skipped.  Only skip after we've
          * updated the fast clear color above though.
          */
         if (irb->mt->fast_clear_state == INTEL_FAST_CLEAR_STATE_CLEAR)
            continue;

         /* Set fast_clear_state to RESOLVED so we don't try resolve them when
          * we draw, in case the mt is also bound as a texture.
          */
         irb->mt->fast_clear_state = INTEL_FAST_CLEAR_STATE_RESOLVED;
         irb->need_downsample = true;
         fast_clear_buffers |= 1 << index;
         get_fast_clear_rect(brw, fb, irb, &fast_clear_rect);
         break;

      case REP_CLEAR:
         rep_clear_buffers |= 1 << index;
         get_buffer_rect(brw, fb, irb, &clear_rect);
         break;

      case PLAIN_CLEAR:
         plain_clear_buffers |= 1 << index;
         get_buffer_rect(brw, fb, irb, &clear_rect);
         continue;
      }
   }

   if (!(fast_clear_buffers | rep_clear_buffers)) {
      if (plain_clear_buffers)
         /* If we only have plain clears, skip the meta save/restore. */
         goto out;
      else
         /* Nothing left to do.  This happens when we hit the redundant fast
          * clear case above and nothing else.
          */
         return true;
   }

   meta_save =
      MESA_META_ALPHA_TEST |
      MESA_META_BLEND |
      MESA_META_DEPTH_TEST |
      MESA_META_RASTERIZATION |
      MESA_META_SHADER |
      MESA_META_STENCIL_TEST |
      MESA_META_VERTEX |
      MESA_META_VIEWPORT |
      MESA_META_CLIP |
      MESA_META_CLAMP_FRAGMENT_COLOR |
      MESA_META_MULTISAMPLE |
      MESA_META_OCCLUSION_QUERY |
      MESA_META_DRAW_BUFFERS;

   _mesa_meta_begin(ctx, meta_save);

   if (!brw_fast_clear_init(brw)) {
      /* This is going to be hard to recover from, most likely out of memory.
       * Bail and let meta try and (probably) fail for us.
       */
      plain_clear_buffers = buffers;
      goto bail_to_meta;
   }

   /* Clears never have the color clamped. */
   if (ctx->Extensions.ARB_color_buffer_float)
      _mesa_ClampColor(GL_CLAMP_FRAGMENT_COLOR, GL_FALSE);

   _mesa_set_enable(ctx, GL_DEPTH_TEST, GL_FALSE);
   _mesa_DepthMask(GL_FALSE);
   _mesa_set_enable(ctx, GL_STENCIL_TEST, GL_FALSE);

   use_rectlist(brw, true);

   layers = MAX2(1, fb->MaxNumLayers);
   if (fast_clear_buffers) {
      _mesa_meta_drawbuffers_from_bitfield(fast_clear_buffers);
      brw_bind_rep_write_shader(brw, (float *) fast_clear_color);
      set_fast_clear_op(brw, GEN7_PS_RENDER_TARGET_FAST_CLEAR_ENABLE);
      brw_draw_rectlist(ctx, &fast_clear_rect, layers);
      set_fast_clear_op(brw, 0);
   }

   if (rep_clear_buffers) {
      _mesa_meta_drawbuffers_from_bitfield(rep_clear_buffers);
      brw_bind_rep_write_shader(brw, ctx->Color.ClearColor.f);
      brw_draw_rectlist(ctx, &clear_rect, layers);
   }

   /* Now set the mts we cleared to INTEL_FAST_CLEAR_STATE_CLEAR so we'll
    * resolve them eventually.
    */
   for (unsigned buf = 0; buf < fb->_NumColorDrawBuffers; buf++) {
      struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[buf];
      struct intel_renderbuffer *irb = intel_renderbuffer(rb);
      int index = fb->_ColorDrawBufferIndexes[buf];

      if ((1 << index) & fast_clear_buffers)
         irb->mt->fast_clear_state = INTEL_FAST_CLEAR_STATE_CLEAR;
   }

 bail_to_meta:
   /* Dirty _NEW_BUFFERS so we reemit SURFACE_STATE which sets the fast clear
    * color before resolve and sets irb->mt->fast_clear_state to UNRESOLVED if
    * we render to it.
    */
   brw->NewGLState |= _NEW_BUFFERS;


   /* Set the custom state back to normal and dirty the same bits as above */
   use_rectlist(brw, false);

   _mesa_meta_end(ctx);

   /* From BSpec: Render Target Fast Clear:
    *
    *     After Render target fast clear, pipe-control with color cache
    *     write-flush must be issued before sending any DRAW commands on that
    *     render target.
    */
   intel_batchbuffer_emit_mi_flush(brw);

   /* If we had to fall back to plain clear for any buffers, clear those now
    * by calling into meta.
    */
 out:
   if (plain_clear_buffers)
      _mesa_meta_glsl_Clear(&brw->ctx, plain_clear_buffers);

   return true;
}
Пример #3
0
bool
brw_blorp_clear_color(struct brw_context *brw, struct gl_framebuffer *fb,
                      bool partial_clear)
{
   struct gl_context *ctx = &brw->ctx;

   /* The constant color clear code doesn't work for multisampled surfaces, so
    * we need to support falling back to other clear mechanisms.
    * Unfortunately, our clear code is based on a bitmask that doesn't
    * distinguish individual color attachments, so we walk the attachments to
    * see if any require fallback, and fall back for all if any of them need
    * to.
    */
   for (unsigned buf = 0; buf < ctx->DrawBuffer->_NumColorDrawBuffers; buf++) {
      struct gl_renderbuffer *rb = ctx->DrawBuffer->_ColorDrawBuffers[buf];
      struct intel_renderbuffer *irb = intel_renderbuffer(rb);

      if (irb && irb->mt->msaa_layout != INTEL_MSAA_LAYOUT_NONE)
         return false;
   }

   for (unsigned buf = 0; buf < ctx->DrawBuffer->_NumColorDrawBuffers; buf++) {
      struct gl_renderbuffer *rb = ctx->DrawBuffer->_ColorDrawBuffers[buf];
      struct intel_renderbuffer *irb = intel_renderbuffer(rb);

      /* If this is an ES2 context or GL_ARB_ES2_compatibility is supported,
       * the framebuffer can be complete with some attachments missing.  In
       * this case the _ColorDrawBuffers pointer will be NULL.
       */
      if (rb == NULL)
         continue;

      brw_blorp_clear_params params(brw, fb, rb, ctx->Color.ColorMask[buf],
                                    partial_clear);

      bool is_fast_clear =
         (params.fast_clear_op == GEN7_FAST_CLEAR_OP_FAST_CLEAR);
      if (is_fast_clear) {
         /* Record the clear color in the miptree so that it will be
          * programmed in SURFACE_STATE by later rendering and resolve
          * operations.
          */
         uint32_t new_color_value =
            compute_fast_clear_color_bits(&ctx->Color.ClearColor);
         if (irb->mt->fast_clear_color_value != new_color_value) {
            irb->mt->fast_clear_color_value = new_color_value;
            brw->state.dirty.brw |= BRW_NEW_SURFACES;
         }

         /* If the buffer is already in INTEL_MCS_STATE_CLEAR, the clear is
          * redundant and can be skipped.
          */
         if (irb->mt->mcs_state == INTEL_MCS_STATE_CLEAR)
            continue;

         /* If the MCS buffer hasn't been allocated yet, we need to allocate
          * it now.
          */
         if (!irb->mt->mcs_mt) {
            if (!intel_miptree_alloc_non_msrt_mcs(brw, irb->mt)) {
               /* MCS allocation failed--probably this will only happen in
                * out-of-memory conditions.  But in any case, try to recover
                * by falling back to a non-blorp clear technique.
                */
               return false;
            }
            brw->state.dirty.brw |= BRW_NEW_SURFACES;
         }
      }

      DBG("%s to mt %p level %d layer %d\n", __FUNCTION__,
          irb->mt, irb->mt_level, irb->mt_layer);

      brw_blorp_exec(brw, &params);

      if (is_fast_clear) {
         /* Now that the fast clear has occurred, put the buffer in
          * INTEL_MCS_STATE_CLEAR so that we won't waste time doing redundant
          * clears.
          */
         irb->mt->mcs_state = INTEL_MCS_STATE_CLEAR;
      }
   }

   return true;
}