Beispiel #1
0
/**
 * Called by ctx->Driver.Clear.
 */
static void
brw_clear(struct gl_context *ctx, GLbitfield mask)
{
   struct brw_context *brw = brw_context(ctx);
   struct gl_framebuffer *fb = ctx->DrawBuffer;
   bool partial_clear = ctx->Scissor.EnableFlags && !noop_scissor(ctx, fb);

   if (!_mesa_check_conditional_render(ctx))
      return;

   if (mask & (BUFFER_BIT_FRONT_LEFT | BUFFER_BIT_FRONT_RIGHT)) {
      brw->front_buffer_dirty = true;
   }

   intel_prepare_render(brw);
   brw_workaround_depthstencil_alignment(brw, partial_clear ? 0 : mask);

   if (mask & BUFFER_BIT_DEPTH) {
      if (brw_fast_clear_depth(ctx)) {
	 DBG("fast clear: depth\n");
	 mask &= ~BUFFER_BIT_DEPTH;
      }
   }

   /* BLORP is currently only supported on Gen6+. */
   if (brw->gen >= 6 && brw->gen < 8) {
      if (mask & BUFFER_BITS_COLOR) {
         if (brw_blorp_clear_color(brw, fb, mask, partial_clear)) {
            debug_mask("blorp color", mask & BUFFER_BITS_COLOR);
            mask &= ~BUFFER_BITS_COLOR;
         }
      }
   }

   GLbitfield tri_mask = mask & (BUFFER_BITS_COLOR |
				 BUFFER_BIT_STENCIL |
				 BUFFER_BIT_DEPTH);

   if (tri_mask) {
      debug_mask("tri", tri_mask);
      mask &= ~tri_mask;

      if (ctx->API == API_OPENGLES) {
         _mesa_meta_Clear(&brw->ctx, tri_mask);
      } else {
         _mesa_meta_glsl_Clear(&brw->ctx, tri_mask);
      }
   }

   /* Any strange buffers get passed off to swrast */
   if (mask) {
      debug_mask("swrast", mask);
      _swrast_Clear(ctx, mask);
   }
}
Beispiel #2
0
/**
 * Implements fast depth clears on gen6+.
 *
 * Fast clears basically work by setting a flag in each of the subspans
 * represented in the HiZ buffer that says "When you need the depth values for
 * this subspan, it's the hardware's current clear value."  Then later rendering
 * can just use the static clear value instead of referencing memory.
 *
 * The tricky part of the implementation is that you have to have the clear
 * value that was used on the depth buffer in place for all further rendering,
 * at least until a resolve to the real depth buffer happens.
 */
static bool
brw_fast_clear_depth(struct gl_context *ctx)
{
   struct intel_context *intel = intel_context(ctx);
   struct gl_framebuffer *fb = ctx->DrawBuffer;
   struct intel_renderbuffer *depth_irb =
      intel_get_renderbuffer(fb, BUFFER_DEPTH);
   struct intel_mipmap_tree *mt = depth_irb->mt;

   if (intel->gen < 6)
      return false;

   if (!mt->hiz_mt)
      return false;

   /* We only handle full buffer clears -- otherwise you'd have to track whether
    * a previous clear had happened at a different clear value and resolve it
    * first.
    */
   if (ctx->Scissor.Enabled && !noop_scissor(ctx, fb)) {
      perf_debug("Failed to fast clear depth due to scissor being enabled.  "
                 "Possible 5%% performance win if avoided.\n");
      return false;
   }

   /* The rendered area has to be 8x4 samples, not resolved pixels, so we look
    * at the miptree slice dimensions instead of renderbuffer size.
    */
   if (mt->level[depth_irb->mt_level].width % 8 != 0 ||
       mt->level[depth_irb->mt_level].height % 4 != 0) {
      perf_debug("Failed to fast clear depth due to width/height %d,%d not "
                 "being aligned to 8,4.  Possible 5%% performance win if "
                 "avoided\n",
                 mt->level[depth_irb->mt_level].width,
                 mt->level[depth_irb->mt_level].height);
      return false;
   }

   uint32_t depth_clear_value;
   switch (mt->format) {
   case MESA_FORMAT_Z32_FLOAT_X24S8:
   case MESA_FORMAT_S8_Z24:
      /* From the Sandy Bridge PRM, volume 2 part 1, page 314:
       *
       *     "[DevSNB+]: Several cases exist where Depth Buffer Clear cannot be
       *      enabled (the legacy method of clearing must be performed):
       *
       *      - If the depth buffer format is D32_FLOAT_S8X24_UINT or
       *        D24_UNORM_S8_UINT.
       */
      return false;

   case MESA_FORMAT_Z32_FLOAT:
      depth_clear_value = float_as_int(ctx->Depth.Clear);
      break;

   case MESA_FORMAT_Z16:
      /* From the Sandy Bridge PRM, volume 2 part 1, page 314:
       *
       *     "[DevSNB+]: Several cases exist where Depth Buffer Clear cannot be
       *      enabled (the legacy method of clearing must be performed):
       *
       *      - DevSNB{W/A}]: When depth buffer format is D16_UNORM and the
       *        width of the map (LOD0) is not multiple of 16, fast clear
       *        optimization must be disabled.
       */
      if (intel->gen == 6 && (mt->level[depth_irb->mt_level].width % 16) != 0)
	 return false;
      /* FALLTHROUGH */

   default:
      depth_clear_value = fb->_DepthMax * ctx->Depth.Clear;
      break;
   }

   /* If we're clearing to a new clear value, then we need to resolve any clear
    * flags out of the HiZ buffer into the real depth buffer.
    */
   if (mt->depth_clear_value != depth_clear_value) {
      intel_miptree_all_slices_resolve_depth(intel, mt);
      mt->depth_clear_value = depth_clear_value;
   }

   /* From the Sandy Bridge PRM, volume 2 part 1, page 313:
    *
    *     "If other rendering operations have preceded this clear, a
    *      PIPE_CONTROL with write cache flush enabled and Z-inhibit disabled
    *      must be issued before the rectangle primitive used for the depth
    *      buffer clear operation.
    */
   intel_batchbuffer_emit_mi_flush(intel);

   intel_hiz_exec(intel, mt, depth_irb->mt_level, depth_irb->mt_layer,
		  GEN6_HIZ_OP_DEPTH_CLEAR);

   if (intel->gen == 6) {
      /* From the Sandy Bridge PRM, volume 2 part 1, page 314:
       *
       *     "DevSNB, DevSNB-B{W/A}]: Depth buffer clear pass must be followed
       *      by a PIPE_CONTROL command with DEPTH_STALL bit set and Then
       *      followed by Depth FLUSH'
      */
      intel_batchbuffer_emit_mi_flush(intel);
   }

   /* Now, the HiZ buffer contains data that needs to be resolved to the depth
    * buffer.
    */
   intel_renderbuffer_set_needs_depth_resolve(depth_irb);

   return true;
}
Beispiel #3
0
/**
 * Implements fast depth clears on gen6+.
 *
 * Fast clears basically work by setting a flag in each of the subspans
 * represented in the HiZ buffer that says "When you need the depth values for
 * this subspan, it's the hardware's current clear value."  Then later rendering
 * can just use the static clear value instead of referencing memory.
 *
 * The tricky part of the implementation is that you have to have the clear
 * value that was used on the depth buffer in place for all further rendering,
 * at least until a resolve to the real depth buffer happens.
 */
static bool
brw_fast_clear_depth(struct gl_context *ctx)
{
   struct brw_context *brw = brw_context(ctx);
   struct gl_framebuffer *fb = ctx->DrawBuffer;
   struct intel_renderbuffer *depth_irb =
      intel_get_renderbuffer(fb, BUFFER_DEPTH);
   struct intel_mipmap_tree *mt = depth_irb->mt;
   struct gl_renderbuffer_attachment *depth_att = &fb->Attachment[BUFFER_DEPTH];

   if (brw->gen < 6)
      return false;

   if (!intel_renderbuffer_has_hiz(depth_irb))
      return false;

   /* We only handle full buffer clears -- otherwise you'd have to track whether
    * a previous clear had happened at a different clear value and resolve it
    * first.
    */
   if ((ctx->Scissor.EnableFlags & 1) && !noop_scissor(ctx, fb)) {
      perf_debug("Failed to fast clear %dx%d depth because of scissors.  "
                 "Possible 5%% performance win if avoided.\n",
                 mt->logical_width0, mt->logical_height0);
      return false;
   }

   uint32_t depth_clear_value;
   switch (mt->format) {
   case MESA_FORMAT_Z32_FLOAT_S8X24_UINT:
   case MESA_FORMAT_Z24_UNORM_S8_UINT:
      /* From the Sandy Bridge PRM, volume 2 part 1, page 314:
       *
       *     "[DevSNB+]: Several cases exist where Depth Buffer Clear cannot be
       *      enabled (the legacy method of clearing must be performed):
       *
       *      - If the depth buffer format is D32_FLOAT_S8X24_UINT or
       *        D24_UNORM_S8_UINT.
       */
      return false;

   case MESA_FORMAT_Z_FLOAT32:
      depth_clear_value = float_as_int(ctx->Depth.Clear);
      break;

   case MESA_FORMAT_Z_UNORM16:
      /* From the Sandy Bridge PRM, volume 2 part 1, page 314:
       *
       *     "[DevSNB+]: Several cases exist where Depth Buffer Clear cannot be
       *      enabled (the legacy method of clearing must be performed):
       *
       *      - DevSNB{W/A}]: When depth buffer format is D16_UNORM and the
       *        width of the map (LOD0) is not multiple of 16, fast clear
       *        optimization must be disabled.
       */
      if (brw->gen == 6 &&
          (minify(mt->physical_width0,
                  depth_irb->mt_level - mt->first_level) % 16) != 0)
	 return false;
      /* FALLTHROUGH */

   default:
      if (brw->gen >= 8)
         depth_clear_value = float_as_int(ctx->Depth.Clear);
      else
         depth_clear_value = fb->_DepthMax * ctx->Depth.Clear;
      break;
   }

   /* If we're clearing to a new clear value, then we need to resolve any clear
    * flags out of the HiZ buffer into the real depth buffer.
    */
   if (mt->depth_clear_value != depth_clear_value) {
      intel_miptree_all_slices_resolve_depth(brw, mt);
      mt->depth_clear_value = depth_clear_value;
   }

   /* From the Sandy Bridge PRM, volume 2 part 1, page 313:
    *
    *     "If other rendering operations have preceded this clear, a
    *      PIPE_CONTROL with write cache flush enabled and Z-inhibit disabled
    *      must be issued before the rectangle primitive used for the depth
    *      buffer clear operation.
    */
   brw_emit_mi_flush(brw);

   if (fb->MaxNumLayers > 0) {
      for (unsigned layer = 0; layer < depth_irb->layer_count; layer++) {
         intel_hiz_exec(brw, mt, depth_irb->mt_level,
                        depth_irb->mt_layer + layer,
                        GEN6_HIZ_OP_DEPTH_CLEAR);
      }
   } else {
      intel_hiz_exec(brw, mt, depth_irb->mt_level, depth_irb->mt_layer,
                     GEN6_HIZ_OP_DEPTH_CLEAR);
   }

   if (brw->gen == 6) {
      /* From the Sandy Bridge PRM, volume 2 part 1, page 314:
       *
       *     "DevSNB, DevSNB-B{W/A}]: Depth buffer clear pass must be followed
       *      by a PIPE_CONTROL command with DEPTH_STALL bit set and Then
       *      followed by Depth FLUSH'
      */
      brw_emit_mi_flush(brw);
   }

   /* Now, the HiZ buffer contains data that needs to be resolved to the depth
    * buffer.
    */
   intel_renderbuffer_att_set_needs_depth_resolve(depth_att);

   return true;
}
Beispiel #4
0
/**
 * Called by ctx->Driver.Clear.
 */
static void
brw_clear(struct gl_context *ctx, GLbitfield mask)
{
   struct brw_context *brw = brw_context(ctx);
   struct gl_framebuffer *fb = ctx->DrawBuffer;
   const struct gen_device_info *devinfo = &brw->screen->devinfo;
   bool partial_clear = ctx->Scissor.EnableFlags && !noop_scissor(fb);

   if (!_mesa_check_conditional_render(ctx))
      return;

   if (mask & (BUFFER_BIT_FRONT_LEFT | BUFFER_BIT_FRONT_RIGHT)) {
      brw->front_buffer_dirty = true;
   }

   intel_prepare_render(brw);
   brw_workaround_depthstencil_alignment(brw, partial_clear ? 0 : mask);

   if (mask & BUFFER_BIT_DEPTH) {
      if (brw_fast_clear_depth(ctx)) {
	 DBG("fast clear: depth\n");
	 mask &= ~BUFFER_BIT_DEPTH;
      }
   }

   if (mask & BUFFER_BIT_STENCIL) {
      struct intel_renderbuffer *stencil_irb =
         intel_get_renderbuffer(fb, BUFFER_STENCIL);
      struct intel_mipmap_tree *mt = stencil_irb->mt;
      if (mt && mt->stencil_mt)
         mt->stencil_mt->r8stencil_needs_update = true;
   }

   if (mask & BUFFER_BITS_COLOR) {
      brw_blorp_clear_color(brw, fb, mask, partial_clear,
                            ctx->Color.sRGBEnabled);
      debug_mask("blorp color", mask & BUFFER_BITS_COLOR);
      mask &= ~BUFFER_BITS_COLOR;
   }

   if (devinfo->gen >= 6 && (mask & BUFFER_BITS_DEPTH_STENCIL)) {
      brw_blorp_clear_depth_stencil(brw, fb, mask, partial_clear);
      debug_mask("blorp depth/stencil", mask & BUFFER_BITS_DEPTH_STENCIL);
      mask &= ~BUFFER_BITS_DEPTH_STENCIL;
   }

   GLbitfield tri_mask = mask & (BUFFER_BIT_STENCIL |
                                 BUFFER_BIT_DEPTH);

   if (tri_mask) {
      debug_mask("tri", tri_mask);
      mask &= ~tri_mask;
      _mesa_meta_glsl_Clear(&brw->ctx, tri_mask);
   }

   /* Any strange buffers get passed off to swrast.  The only thing that
    * should be left at this point is the accumulation buffer.
    */
   assert((mask & ~BUFFER_BIT_ACCUM) == 0);
   if (mask) {
      debug_mask("swrast", mask);
      _swrast_Clear(ctx, mask);
   }
}
Beispiel #5
0
/**
 * Implements fast depth clears on gen6+.
 *
 * Fast clears basically work by setting a flag in each of the subspans
 * represented in the HiZ buffer that says "When you need the depth values for
 * this subspan, it's the hardware's current clear value."  Then later rendering
 * can just use the static clear value instead of referencing memory.
 *
 * The tricky part of the implementation is that you have to have the clear
 * value that was used on the depth buffer in place for all further rendering,
 * at least until a resolve to the real depth buffer happens.
 */
static bool
brw_fast_clear_depth(struct gl_context *ctx)
{
   struct brw_context *brw = brw_context(ctx);
   struct gl_framebuffer *fb = ctx->DrawBuffer;
   struct intel_renderbuffer *depth_irb =
      intel_get_renderbuffer(fb, BUFFER_DEPTH);
   struct intel_mipmap_tree *mt = depth_irb->mt;
   struct gl_renderbuffer_attachment *depth_att = &fb->Attachment[BUFFER_DEPTH];
   const struct gen_device_info *devinfo = &brw->screen->devinfo;

   if (devinfo->gen < 6)
      return false;

   if (!intel_renderbuffer_has_hiz(depth_irb))
      return false;

   /* We only handle full buffer clears -- otherwise you'd have to track whether
    * a previous clear had happened at a different clear value and resolve it
    * first.
    */
   if ((ctx->Scissor.EnableFlags & 1) && !noop_scissor(fb)) {
      perf_debug("Failed to fast clear %dx%d depth because of scissors.  "
                 "Possible 5%% performance win if avoided.\n",
                 mt->surf.logical_level0_px.width,
                 mt->surf.logical_level0_px.height);
      return false;
   }

   switch (mt->format) {
   case MESA_FORMAT_Z32_FLOAT_S8X24_UINT:
   case MESA_FORMAT_Z24_UNORM_S8_UINT:
      /* From the Sandy Bridge PRM, volume 2 part 1, page 314:
       *
       *     "[DevSNB+]: Several cases exist where Depth Buffer Clear cannot be
       *      enabled (the legacy method of clearing must be performed):
       *
       *      - If the depth buffer format is D32_FLOAT_S8X24_UINT or
       *        D24_UNORM_S8_UINT.
       */
      return false;

   case MESA_FORMAT_Z_UNORM16:
      /* From the Sandy Bridge PRM, volume 2 part 1, page 314:
       *
       *     "[DevSNB+]: Several cases exist where Depth Buffer Clear cannot be
       *      enabled (the legacy method of clearing must be performed):
       *
       *      - DevSNB{W/A}]: When depth buffer format is D16_UNORM and the
       *        width of the map (LOD0) is not multiple of 16, fast clear
       *        optimization must be disabled.
       */
      if (devinfo->gen == 6 &&
          (minify(mt->surf.phys_level0_sa.width,
                  depth_irb->mt_level - mt->first_level) % 16) != 0)
	 return false;
      break;

   default:
      break;
   }

   /* Quantize the clear value to what can be stored in the actual depth
    * buffer.  This makes the following check more accurate because it now
    * checks if the actual depth bits will match.  It also prevents us from
    * getting a too-accurate depth value during depth testing or when sampling
    * with HiZ enabled.
    */
   float clear_value =
      mt->format == MESA_FORMAT_Z_FLOAT32 ? ctx->Depth.Clear :
      (unsigned)(ctx->Depth.Clear * fb->_DepthMax) / (float)fb->_DepthMax;

   const uint32_t num_layers = depth_att->Layered ? depth_irb->layer_count : 1;

   /* If we're clearing to a new clear value, then we need to resolve any clear
    * flags out of the HiZ buffer into the real depth buffer.
    */
   if (mt->fast_clear_color.f32[0] != clear_value) {
      for (uint32_t level = mt->first_level; level <= mt->last_level; level++) {
         if (!intel_miptree_level_has_hiz(mt, level))
            continue;

         const unsigned level_layers = brw_get_num_logical_layers(mt, level);

         for (uint32_t layer = 0; layer < level_layers; layer++) {
            if (level == depth_irb->mt_level &&
                layer >= depth_irb->mt_layer &&
                layer < depth_irb->mt_layer + num_layers) {
               /* We're going to clear this layer anyway.  Leave it alone. */
               continue;
            }

            enum isl_aux_state aux_state =
               intel_miptree_get_aux_state(mt, level, layer);

            if (aux_state != ISL_AUX_STATE_CLEAR &&
                aux_state != ISL_AUX_STATE_COMPRESSED_CLEAR) {
               /* This slice doesn't have any fast-cleared bits. */
               continue;
            }

            /* If we got here, then the level may have fast-clear bits that
             * use the old clear value.  We need to do a depth resolve to get
             * rid of their use of the clear value before we can change it.
             * Fortunately, few applications ever change their depth clear
             * value so this shouldn't happen often.
             */
            intel_hiz_exec(brw, mt, level, layer, 1,
                           ISL_AUX_OP_FULL_RESOLVE);
            intel_miptree_set_aux_state(brw, mt, level, layer, 1,
                                        ISL_AUX_STATE_RESOLVED);
         }
      }

      intel_miptree_set_depth_clear_value(ctx, mt, clear_value);
   }

   bool need_clear = false;
   for (unsigned a = 0; a < num_layers; a++) {
      enum isl_aux_state aux_state =
         intel_miptree_get_aux_state(mt, depth_irb->mt_level,
                                     depth_irb->mt_layer + a);

      if (aux_state != ISL_AUX_STATE_CLEAR) {
         need_clear = true;
         break;
      }
   }

   if (!need_clear) {
      /* If all of the layers we intend to clear are already in the clear
       * state then simply updating the miptree fast clear value is sufficient
       * to change their clear value.
       */
      return true;
   }

   for (unsigned a = 0; a < num_layers; a++) {
      enum isl_aux_state aux_state =
         intel_miptree_get_aux_state(mt, depth_irb->mt_level,
                                     depth_irb->mt_layer + a);

      if (aux_state != ISL_AUX_STATE_CLEAR) {
         intel_hiz_exec(brw, mt, depth_irb->mt_level,
                        depth_irb->mt_layer + a, 1,
                        ISL_AUX_OP_FAST_CLEAR);
      }
   }

   /* Now, the HiZ buffer contains data that needs to be resolved to the depth
    * buffer.
    */
   intel_miptree_set_aux_state(brw, mt, depth_irb->mt_level,
                               depth_irb->mt_layer, num_layers,
                               ISL_AUX_STATE_CLEAR);

   return true;
}