Ejemplo n.º 1
0
void
brw_upload_invariant_state(struct brw_context *brw)
{
   /* 3DSTATE_SIP, 3DSTATE_MULTISAMPLE, etc. are nonpipelined. */
   if (brw->gen == 6)
      intel_emit_post_sync_nonzero_flush(brw);

   /* Select the 3D pipeline (as opposed to media) */
   BEGIN_BATCH(1);
   OUT_BATCH(brw->CMD_PIPELINE_SELECT << 16 | 0);
   ADVANCE_BATCH();

   if (brw->gen < 6) {
      /* Disable depth offset clamping. */
      BEGIN_BATCH(2);
      OUT_BATCH(_3DSTATE_GLOBAL_DEPTH_OFFSET_CLAMP << 16 | (2 - 2));
      OUT_BATCH_F(0.0);
      ADVANCE_BATCH();
   }

   BEGIN_BATCH(2);
   OUT_BATCH(CMD_STATE_SIP << 16 | (2 - 2));
   OUT_BATCH(0);
   ADVANCE_BATCH();

   BEGIN_BATCH(1);
   OUT_BATCH(brw->CMD_VF_STATISTICS << 16 |
	     (unlikely(INTEL_DEBUG & DEBUG_STATS) ? 1 : 0));
   ADVANCE_BATCH();
}
Ejemplo n.º 2
0
/* 3DSTATE_VS
 *
 * Disable vertex shader.
 */
void
gen6_blorp_emit_vs_disable(struct brw_context *brw,
                           const brw_blorp_params *params)
{
   struct intel_context *intel = &brw->intel;

   if (intel->gen == 6) {
      /* From the BSpec, Volume 2a, Part 3 "Vertex Shader", Section
       * 3DSTATE_VS, Dword 5.0 "VS Function Enable":
       *
       *   [DevSNB] A pipeline flush must be programmed prior to a
       *   3DSTATE_VS command that causes the VS Function Enable to
       *   toggle. Pipeline flush can be executed by sending a PIPE_CONTROL
       *   command with CS stall bit set and a post sync operation.
       */
      intel_emit_post_sync_nonzero_flush(intel);
   }

   BEGIN_BATCH(6);
   OUT_BATCH(_3DSTATE_VS << 16 | (6 - 2));
   OUT_BATCH(0);
   OUT_BATCH(0);
   OUT_BATCH(0);
   OUT_BATCH(0);
   OUT_BATCH(0);
   ADVANCE_BATCH();
}
static void upload_polygon_stipple(struct brw_context *brw)
{
   struct intel_context *intel = &brw->intel;
   struct gl_context *ctx = &brw->intel.ctx;
   GLuint i;

   /* _NEW_POLYGON */
   if (!ctx->Polygon.StippleFlag)
      return;

   if (intel->gen == 6)
      intel_emit_post_sync_nonzero_flush(intel);

   BEGIN_BATCH(33);
   OUT_BATCH(_3DSTATE_POLY_STIPPLE_PATTERN << 16 | (33 - 2));

   /* Polygon stipple is provided in OpenGL order, i.e. bottom
    * row first.  If we're rendering to a window (i.e. the
    * default frame buffer object, 0), then we need to invert
    * it to match our pixel layout.  But if we're rendering
    * to a FBO (i.e. any named frame buffer object), we *don't*
    * need to invert - we already match the layout.
    */
   if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
      for (i = 0; i < 32; i++)
	  OUT_BATCH(ctx->PolygonStipple[31 - i]); /* invert */
   }
   else {
      for (i = 0; i < 32; i++)
	 OUT_BATCH(ctx->PolygonStipple[i]);
   }
   CACHED_BATCH();
}
static void upload_polygon_stipple_offset(struct brw_context *brw)
{
   struct intel_context *intel = &brw->intel;
   struct gl_context *ctx = &brw->intel.ctx;

   /* _NEW_POLYGON */
   if (!ctx->Polygon.StippleFlag)
      return;

   if (intel->gen == 6)
      intel_emit_post_sync_nonzero_flush(intel);

   BEGIN_BATCH(2);
   OUT_BATCH(_3DSTATE_POLY_STIPPLE_OFFSET << 16 | (2-2));

   /* _NEW_BUFFERS
    *
    * If we're drawing to a system window we have to invert the Y axis
    * in order to match the OpenGL pixel coordinate system, and our
    * offset must be matched to the window position.  If we're drawing
    * to a user-created FBO then our native pixel coordinate system
    * works just fine, and there's no window system to worry about.
    */
   if (_mesa_is_winsys_fbo(brw->intel.ctx.DrawBuffer))
      OUT_BATCH((32 - (ctx->DrawBuffer->Height & 31)) & 31);
   else
      OUT_BATCH(0);
   CACHED_BATCH();
}
Ejemplo n.º 5
0
static void
gen6_blorp_emit_depth_disable(struct brw_context *brw,
                              const brw_blorp_params *params)
{
   intel_emit_post_sync_nonzero_flush(brw);
   intel_emit_depth_stall_flushes(brw);

   BEGIN_BATCH(7);
   OUT_BATCH(_3DSTATE_DEPTH_BUFFER << 16 | (7 - 2));
   OUT_BATCH((BRW_DEPTHFORMAT_D32_FLOAT << 18) |
             (BRW_SURFACE_NULL << 29));
   OUT_BATCH(0);
   OUT_BATCH(0);
   OUT_BATCH(0);
   OUT_BATCH(0);
   OUT_BATCH(0);
   ADVANCE_BATCH();

   BEGIN_BATCH(3);
   OUT_BATCH(_3DSTATE_HIER_DEPTH_BUFFER << 16 | (3 - 2));
   OUT_BATCH(0);
   OUT_BATCH(0);
   ADVANCE_BATCH();

   BEGIN_BATCH(3);
   OUT_BATCH(_3DSTATE_STENCIL_BUFFER << 16 | (3 - 2));
   OUT_BATCH(0);
   OUT_BATCH(0);
   ADVANCE_BATCH();
}
Ejemplo n.º 6
0
/* 3DSTATE_VS
 *
 * Disable vertex shader.
 */
void
gen6_blorp_emit_vs_disable(struct brw_context *brw,
                           const brw_blorp_params *params)
{
   if (brw->gen == 6) {
      /* From the BSpec, 3D Pipeline > Geometry > Vertex Shader > State,
       * 3DSTATE_VS, Dword 5.0 "VS Function Enable":
       *
       *   [DevSNB] A pipeline flush must be programmed prior to a
       *   3DSTATE_VS command that causes the VS Function Enable to
       *   toggle. Pipeline flush can be executed by sending a PIPE_CONTROL
       *   command with CS stall bit set and a post sync operation.
       */
      intel_emit_post_sync_nonzero_flush(brw);
   }

   /* Disable the push constant buffers. */
   BEGIN_BATCH(5);
   OUT_BATCH(_3DSTATE_CONSTANT_VS << 16 | (5 - 2));
   OUT_BATCH(0);
   OUT_BATCH(0);
   OUT_BATCH(0);
   OUT_BATCH(0);
   ADVANCE_BATCH();

   BEGIN_BATCH(6);
   OUT_BATCH(_3DSTATE_VS << 16 | (6 - 2));
   OUT_BATCH(0);
   OUT_BATCH(0);
   OUT_BATCH(0);
   OUT_BATCH(0);
   OUT_BATCH(0);
   ADVANCE_BATCH();
}
Ejemplo n.º 7
0
static void upload_line_stipple(struct brw_context *brw)
{
   struct gl_context *ctx = &brw->ctx;
   GLfloat tmp;
   GLint tmpi;

   if (!ctx->Line.StippleFlag)
      return;

   if (brw->gen == 6)
      intel_emit_post_sync_nonzero_flush(brw);

   BEGIN_BATCH(3);
   OUT_BATCH(_3DSTATE_LINE_STIPPLE_PATTERN << 16 | (3 - 2));
   OUT_BATCH(ctx->Line.StipplePattern);

   if (brw->gen >= 7) {
      /* in U1.16 */
      tmp = 1.0 / (GLfloat) ctx->Line.StippleFactor;
      tmpi = tmp * (1<<16);
      OUT_BATCH(tmpi << 15 | ctx->Line.StippleFactor);
   }
   else {
      /* in U1.13 */
      tmp = 1.0 / (GLfloat) ctx->Line.StippleFactor;
      tmpi = tmp * (1<<13);
      OUT_BATCH(tmpi << 16 | ctx->Line.StippleFactor);
   }

   ADVANCE_BATCH();
}
Ejemplo n.º 8
0
static void
brw_upload_initial_gpu_state(struct brw_context *brw)
{
   /* On platforms with hardware contexts, we can set our initial GPU state
    * right away rather than doing it via state atoms.  This saves a small
    * amount of overhead on every draw call.
    */
   if (!brw->hw_ctx)
      return;

   if (brw->gen == 6)
      intel_emit_post_sync_nonzero_flush(brw);

   brw_upload_invariant_state(brw);

   /* Recommended optimization for Victim Cache eviction in pixel backend. */
   if (brw->gen >= 9) {
      BEGIN_BATCH(3);
      OUT_BATCH(MI_LOAD_REGISTER_IMM | (3 - 2));
      OUT_BATCH(GEN7_CACHE_MODE_1);
      OUT_BATCH((GEN9_PARTIAL_RESOLVE_DISABLE_IN_VC << 16) |
                GEN9_PARTIAL_RESOLVE_DISABLE_IN_VC);
      ADVANCE_BATCH();
   }

   if (brw->gen >= 8) {
      gen8_emit_3dstate_sample_pattern(brw);
   }
}
void
brw_begin_transform_feedback(struct gl_context *ctx, GLenum mode,
			     struct gl_transform_feedback_object *obj)
{
   struct brw_context *brw = brw_context(ctx);
   const struct gl_shader_program *shaderprog;
   const struct gl_transform_feedback_info *linked_xfb_info;
   struct gl_transform_feedback_object *xfb_obj =
      ctx->TransformFeedback.CurrentObject;

   assert(brw->gen == 6);

   if (brw->geometry_program) {
      /* BRW_NEW_GEOMETRY_PROGRAM */
      shaderprog =
         ctx->_Shader->CurrentProgram[MESA_SHADER_GEOMETRY];
   } else {
      /* BRW_NEW_VERTEX_PROGRAM */
      shaderprog =
         ctx->_Shader->CurrentProgram[MESA_SHADER_VERTEX];
   }
   linked_xfb_info = &shaderprog->LinkedTransformFeedback;

   /* Compute the maximum number of vertices that we can write without
    * overflowing any of the buffers currently being used for feedback.
    */
   unsigned max_index
      = _mesa_compute_max_transform_feedback_vertices(xfb_obj,
                                                      linked_xfb_info);

   /* 3DSTATE_GS_SVB_INDEX is non-pipelined. */
   intel_emit_post_sync_nonzero_flush(brw);

   /* Initialize the SVBI 0 register to zero and set the maximum index. */
   BEGIN_BATCH(4);
   OUT_BATCH(_3DSTATE_GS_SVB_INDEX << 16 | (4 - 2));
   OUT_BATCH(0); /* SVBI 0 */
   OUT_BATCH(0); /* starting index */
   OUT_BATCH(max_index);
   ADVANCE_BATCH();

   /* Initialize the rest of the unused streams to sane values.  Otherwise,
    * they may indicate that there is no room to write data and prevent
    * anything from happening at all.
    */
   for (int i = 1; i < 4; i++) {
      BEGIN_BATCH(4);
      OUT_BATCH(_3DSTATE_GS_SVB_INDEX << 16 | (4 - 2));
      OUT_BATCH(i << SVB_INDEX_SHIFT);
      OUT_BATCH(0); /* starting index */
      OUT_BATCH(0xffffffff);
      ADVANCE_BATCH();
   }
}
static int
gen6_render_ring_flush(struct intel_ring_buffer *ring,
                       u32 invalidate_domains, u32 flush_domains)
{
    u32 flags = 0;
    struct pipe_control *pc = ring->private;
    u32 scratch_addr = pc->gtt_offset + 128;
    int ret;

    /* Force SNB workarounds for PIPE_CONTROL flushes */
    ret = intel_emit_post_sync_nonzero_flush(ring);
    if (ret)
        return ret;

    /* Just flush everything.  Experiments have shown that reducing the
     * number of bits based on the write domains has little performance
     * impact.
     */
    if (flush_domains) {
        flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
        flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
        /*
         * Ensure that any following seqno writes only happen
         * when the render cache is indeed flushed.
         */
        flags |= PIPE_CONTROL_CS_STALL;
    }
    if (invalidate_domains) {
        flags |= PIPE_CONTROL_TLB_INVALIDATE;
        flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
        flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
        flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
        flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
        flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
        /*
         * TLB invalidate requires a post-sync write.
         */
        flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
    }

    ret = intel_ring_begin(ring, 4);
    if (ret)
        return ret;

    intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
    intel_ring_emit(ring, flags);
    intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
    intel_ring_emit(ring, 0);
    intel_ring_advance(ring);

    return 0;
}
Ejemplo n.º 11
0
/* 3DSTATE_DRAWING_RECTANGLE */
void
gen6_blorp_emit_drawing_rectangle(struct brw_context *brw,
                                  const brw_blorp_params *params)
{
   if (brw->gen == 6)
      intel_emit_post_sync_nonzero_flush(brw);

   BEGIN_BATCH(4);
   OUT_BATCH(_3DSTATE_DRAWING_RECTANGLE << 16 | (4 - 2));
   OUT_BATCH(0);
   OUT_BATCH(((params->x1 - 1) & 0xffff) |
             ((params->y1 - 1) << 16));
   OUT_BATCH(0);
   ADVANCE_BATCH();
}
Ejemplo n.º 12
0
/**********************************************************************
 * AA Line parameters
 */
static void upload_aa_line_parameters(struct brw_context *brw)
{
   struct gl_context *ctx = &brw->ctx;

   if (!ctx->Line.SmoothFlag || !brw->has_aa_line_parameters)
      return;

   if (brw->gen == 6)
      intel_emit_post_sync_nonzero_flush(brw);

   OUT_BATCH(_3DSTATE_AA_LINE_PARAMETERS << 16 | (3 - 2));
   /* use legacy aa line coverage computation */
   OUT_BATCH(0);
   OUT_BATCH(0);
   CACHED_BATCH();
}
Ejemplo n.º 13
0
/* Constant single cliprect for framebuffer object or DRI2 drawing */
static void upload_drawing_rect(struct brw_context *brw)
{
   struct gl_context *ctx = &brw->ctx;

   /* 3DSTATE_DRAWING_RECTANGLE is non-pipelined. */
   if (brw->gen == 6)
      intel_emit_post_sync_nonzero_flush(brw);

   BEGIN_BATCH(4);
   OUT_BATCH(_3DSTATE_DRAWING_RECTANGLE << 16 | (4 - 2));
   OUT_BATCH(0); /* xmin, ymin */
   OUT_BATCH(((ctx->DrawBuffer->Width - 1) & 0xffff) |
	    ((ctx->DrawBuffer->Height - 1) << 16));
   OUT_BATCH(0);
   ADVANCE_BATCH();
}
Ejemplo n.º 14
0
void
brw_upload_invariant_state(struct brw_context *brw)
{
   const bool is_965 = brw->gen == 4 && !brw->is_g4x;

   /* 3DSTATE_SIP, 3DSTATE_MULTISAMPLE, etc. are nonpipelined. */
   if (brw->gen == 6)
      intel_emit_post_sync_nonzero_flush(brw);

   /* Select the 3D pipeline (as opposed to media) */
   const uint32_t _3DSTATE_PIPELINE_SELECT =
      is_965 ? CMD_PIPELINE_SELECT_965 : CMD_PIPELINE_SELECT_GM45;
   BEGIN_BATCH(1);
   OUT_BATCH(_3DSTATE_PIPELINE_SELECT << 16 | 0);
   ADVANCE_BATCH();

   if (brw->gen < 6) {
      /* Disable depth offset clamping. */
      BEGIN_BATCH(2);
      OUT_BATCH(_3DSTATE_GLOBAL_DEPTH_OFFSET_CLAMP << 16 | (2 - 2));
      OUT_BATCH_F(0.0);
      ADVANCE_BATCH();
   }

   if (brw->gen >= 8) {
      BEGIN_BATCH(3);
      OUT_BATCH(CMD_STATE_SIP << 16 | (3 - 2));
      OUT_BATCH(0);
      OUT_BATCH(0);
      ADVANCE_BATCH();
   } else {
      BEGIN_BATCH(2);
      OUT_BATCH(CMD_STATE_SIP << 16 | (2 - 2));
      OUT_BATCH(0);
      ADVANCE_BATCH();
   }

   const uint32_t _3DSTATE_VF_STATISTICS =
      is_965 ? GEN4_3DSTATE_VF_STATISTICS : GM45_3DSTATE_VF_STATISTICS;
   BEGIN_BATCH(1);
   OUT_BATCH(_3DSTATE_VF_STATISTICS << 16 |
	     (unlikely(INTEL_DEBUG & DEBUG_STATS) ? 1 : 0));
   ADVANCE_BATCH();
}
static void upload_invariant_state( struct brw_context *brw )
{
   struct intel_context *intel = &brw->intel;

   /* 3DSTATE_SIP, 3DSTATE_MULTISAMPLE, etc. are nonpipelined. */
   if (intel->gen == 6)
      intel_emit_post_sync_nonzero_flush(intel);

   /* Select the 3D pipeline (as opposed to media) */
   BEGIN_BATCH(1);
   OUT_BATCH(brw->CMD_PIPELINE_SELECT << 16 | 0);
   ADVANCE_BATCH();

   if (intel->gen < 6) {
      /* Disable depth offset clamping. */
      BEGIN_BATCH(2);
      OUT_BATCH(_3DSTATE_GLOBAL_DEPTH_OFFSET_CLAMP << 16 | (2 - 2));
      OUT_BATCH_F(0.0);
      ADVANCE_BATCH();
   }

   if (intel->gen == 6) {
      int i;

      for (i = 0; i < 4; i++) {
         BEGIN_BATCH(4);
         OUT_BATCH(_3DSTATE_GS_SVB_INDEX << 16 | (4 - 2));
         OUT_BATCH(i << SVB_INDEX_SHIFT);
         OUT_BATCH(0);
         OUT_BATCH(0xffffffff);
         ADVANCE_BATCH();
      }
   }

   BEGIN_BATCH(2);
   OUT_BATCH(CMD_STATE_SIP << 16 | (2 - 2));
   OUT_BATCH(0);
   ADVANCE_BATCH();

   BEGIN_BATCH(1);
   OUT_BATCH(brw->CMD_VF_STATISTICS << 16 |
	     (unlikely(INTEL_DEBUG & DEBUG_STATS) ? 1 : 0));
   ADVANCE_BATCH();
}
Ejemplo n.º 16
0
void intel_batch_emit_flush(ScrnInfoPtr scrn)
{
	intel_screen_private *intel = intel_get_screen_private(scrn);
	int flags;

	assert (!intel->in_batch_atomic);

	/* Big hammer, look to the pipelined flushes in future. */
	if ((INTEL_INFO(intel)->gen >= 060)) {
		if (intel->current_batch == BLT_BATCH) {
			BEGIN_BATCH_BLT(4);
			OUT_BATCH(MI_FLUSH_DW | 2);
			OUT_BATCH(0);
			OUT_BATCH(0);
			OUT_BATCH(0);
			ADVANCE_BATCH();
		} else  {
			if ((INTEL_INFO(intel)->gen == 060)) {
				/* HW-Workaround for Sandybdrige */
				intel_emit_post_sync_nonzero_flush(scrn);
			} else {
				BEGIN_BATCH(4);
				OUT_BATCH(BRW_PIPE_CONTROL | (4 - 2));
				OUT_BATCH(BRW_PIPE_CONTROL_WC_FLUSH |
					  BRW_PIPE_CONTROL_TC_FLUSH |
					  BRW_PIPE_CONTROL_NOWRITE);
				OUT_BATCH(0); /* write address */
				OUT_BATCH(0); /* write data */
				ADVANCE_BATCH();
			}
		}
	} else {
		flags = MI_WRITE_DIRTY_STATE | MI_INVALIDATE_MAP_CACHE;
		if (INTEL_INFO(intel)->gen >= 040)
			flags = 0;

		BEGIN_BATCH(1);
		OUT_BATCH(MI_FLUSH | flags);
		ADVANCE_BATCH();
	}
	intel_batch_do_flush(scrn);
}
static void upload_line_stipple(struct brw_context *brw)
{
   struct intel_context *intel = &brw->intel;
   struct gl_context *ctx = &brw->intel.ctx;
   GLfloat tmp;
   GLint tmpi;

   if (!ctx->Line.StippleFlag)
      return;

   if (intel->gen == 6)
      intel_emit_post_sync_nonzero_flush(intel);

   BEGIN_BATCH(3);
   OUT_BATCH(_3DSTATE_LINE_STIPPLE_PATTERN << 16 | (3 - 2));
   OUT_BATCH(ctx->Line.StipplePattern);
   tmp = 1.0 / (GLfloat) ctx->Line.StippleFactor;
   tmpi = tmp * (1<<13);
   OUT_BATCH(tmpi << 16 | ctx->Line.StippleFactor);
   CACHED_BATCH();
}
Ejemplo n.º 18
0
/**********************************************************************
 * AA Line parameters
 */
static void upload_aa_line_parameters(struct brw_context *brw)
{
   struct gl_context *ctx = &brw->ctx;

   if (!ctx->Line.SmoothFlag)
      return;

   /* Original Gen4 doesn't have 3DSTATE_AA_LINE_PARAMETERS. */
   if (brw->gen == 4 && !brw->is_g4x)
      return;

   if (brw->gen == 6)
      intel_emit_post_sync_nonzero_flush(brw);

   BEGIN_BATCH(3);
   OUT_BATCH(_3DSTATE_AA_LINE_PARAMETERS << 16 | (3 - 2));
   /* use legacy aa line coverage computation */
   OUT_BATCH(0);
   OUT_BATCH(0);
   ADVANCE_BATCH();
}
Ejemplo n.º 19
0
static void
gen6_blorp_emit_depth_stencil_config(struct brw_context *brw,
                                     const brw_blorp_params *params)
{
   struct intel_context *intel = &brw->intel;
   struct gl_context *ctx = &intel->ctx;
   uint32_t draw_x = params->depth.x_offset;
   uint32_t draw_y = params->depth.y_offset;
   uint32_t tile_mask_x, tile_mask_y;

   brw_get_depthstencil_tile_masks(params->depth.mt,
                                   params->depth.level,
                                   params->depth.layer,
                                   NULL,
                                   &tile_mask_x, &tile_mask_y);

   /* 3DSTATE_DEPTH_BUFFER */
   {
      uint32_t tile_x = draw_x & tile_mask_x;
      uint32_t tile_y = draw_y & tile_mask_y;
      uint32_t offset =
         intel_region_get_aligned_offset(params->depth.mt->region,
                                         draw_x & ~tile_mask_x,
                                         draw_y & ~tile_mask_y, false);

      /* According to the Sandy Bridge PRM, volume 2 part 1, pp326-327
       * (3DSTATE_DEPTH_BUFFER dw5), in the documentation for "Depth
       * Coordinate Offset X/Y":
       *
       *   "The 3 LSBs of both offsets must be zero to ensure correct
       *   alignment"
       *
       * We have no guarantee that tile_x and tile_y are correctly aligned,
       * since they are determined by the mipmap layout, which is only aligned
       * to multiples of 4.
       *
       * So, to avoid hanging the GPU, just smash the low order 3 bits of
       * tile_x and tile_y to 0.  This is a temporary workaround until we come
       * up with a better solution.
       */
      WARN_ONCE((tile_x & 7) || (tile_y & 7),
                "Depth/stencil buffer needs alignment to 8-pixel boundaries.\n"
                "Truncating offset, bad rendering may occur.\n");
      tile_x &= ~7;
      tile_y &= ~7;

      intel_emit_post_sync_nonzero_flush(intel);
      intel_emit_depth_stall_flushes(intel);

      BEGIN_BATCH(7);
      OUT_BATCH(_3DSTATE_DEPTH_BUFFER << 16 | (7 - 2));
      OUT_BATCH((params->depth.mt->region->pitch - 1) |
                params->depth_format << 18 |
                1 << 21 | /* separate stencil enable */
                1 << 22 | /* hiz enable */
                BRW_TILEWALK_YMAJOR << 26 |
                1 << 27 | /* y-tiled */
                BRW_SURFACE_2D << 29);
      OUT_RELOC(params->depth.mt->region->bo,
                I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
                offset);
      OUT_BATCH(BRW_SURFACE_MIPMAPLAYOUT_BELOW << 1 |
                (params->depth.width + tile_x - 1) << 6 |
                (params->depth.height + tile_y - 1) << 19);
      OUT_BATCH(0);
      OUT_BATCH(tile_x |
                tile_y << 16);
      OUT_BATCH(0);
      ADVANCE_BATCH();
   }

   /* 3DSTATE_HIER_DEPTH_BUFFER */
   {
      struct intel_region *hiz_region = params->depth.mt->hiz_mt->region;
      uint32_t hiz_offset =
         intel_region_get_aligned_offset(hiz_region,
                                         draw_x & ~tile_mask_x,
                                         (draw_y & ~tile_mask_y) / 2, false);

      BEGIN_BATCH(3);
      OUT_BATCH((_3DSTATE_HIER_DEPTH_BUFFER << 16) | (3 - 2));
      OUT_BATCH(hiz_region->pitch - 1);
      OUT_RELOC(hiz_region->bo,
                I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
                hiz_offset);
      ADVANCE_BATCH();
   }

   /* 3DSTATE_STENCIL_BUFFER */
   {
      BEGIN_BATCH(3);
      OUT_BATCH((_3DSTATE_STENCIL_BUFFER << 16) | (3 - 2));
      OUT_BATCH(0);
      OUT_BATCH(0);
      ADVANCE_BATCH();
   }
}
Ejemplo n.º 20
0
static void emit_depthbuffer(struct brw_context *brw)
{
   struct intel_context *intel = &brw->intel;
   struct gl_context *ctx = &intel->ctx;
   struct gl_framebuffer *fb = ctx->DrawBuffer;
   /* _NEW_BUFFERS */
   struct intel_renderbuffer *depth_irb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
   struct intel_renderbuffer *stencil_irb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
   struct intel_mipmap_tree *stencil_mt = NULL;
   struct intel_region *hiz_region = NULL;
   unsigned int len;
   bool separate_stencil = false;

   /* Amount by which drawing should be offset in order to draw to the
    * appropriate miplevel/zoffset/cubeface.  We will extract these values
    * from depth_irb or stencil_irb once we determine which is present.
    */
   uint32_t draw_x = 0, draw_y = 0;

   /* Masks used to determine how much of the draw_x and draw_y offsets should
    * be performed using the fine adjustment of "depth coordinate offset X/Y"
    * (dw5 of 3DSTATE_DEPTH_BUFFER).  Any remaining coarse adjustment will be
    * performed by changing the base addresses of the buffers.
    *
    * Since the HiZ, depth, and stencil buffers all use the same "depth
    * coordinate offset X/Y" values, we need to make sure that the coarse
    * adjustment will be possible to apply to all three buffers.  Since coarse
    * adjustment can only be applied in multiples of the tile size, we will OR
    * together the tile masks of all the buffers to determine which offsets to
    * perform as fine adjustments.
    */
   uint32_t tile_mask_x = 0, tile_mask_y = 0;

   if (depth_irb) {
      intel_region_get_tile_masks(depth_irb->mt->region,
                                  &tile_mask_x, &tile_mask_y);
   }

   if (depth_irb &&
       depth_irb->mt &&
       depth_irb->mt->hiz_mt) {
      hiz_region = depth_irb->mt->hiz_mt->region;

      uint32_t hiz_tile_mask_x, hiz_tile_mask_y;
      intel_region_get_tile_masks(hiz_region,
                                  &hiz_tile_mask_x, &hiz_tile_mask_y);

      /* Each HiZ row represents 2 rows of pixels */
      hiz_tile_mask_y = hiz_tile_mask_y << 1 | 1;

      tile_mask_x |= hiz_tile_mask_x;
      tile_mask_y |= hiz_tile_mask_y;
   }

   /* 3DSTATE_DEPTH_BUFFER, 3DSTATE_STENCIL_BUFFER are both
    * non-pipelined state that will need the PIPE_CONTROL workaround.
    */
   if (intel->gen == 6) {
      intel_emit_post_sync_nonzero_flush(intel);
      intel_emit_depth_stall_flushes(intel);
   }

   /* Find the real separate stencil mt if present. */
   if (stencil_irb) {
      stencil_mt = stencil_irb->mt;
      if (stencil_mt->stencil_mt)
	 stencil_mt = stencil_mt->stencil_mt;

      if (stencil_mt->format == MESA_FORMAT_S8) {
	 separate_stencil = true;

         /* Separate stencil buffer uses 64x64 tiles. */
         tile_mask_x |= 63;
         tile_mask_y |= 63;
      } else {
         uint32_t stencil_tile_mask_x, stencil_tile_mask_y;
         intel_region_get_tile_masks(stencil_mt->region,
                                     &stencil_tile_mask_x,
                                     &stencil_tile_mask_y);

         tile_mask_x |= stencil_tile_mask_x;
         tile_mask_y |= stencil_tile_mask_y;
      }
   }

   /* If there's a packed depth/stencil bound to stencil only, we need to
    * emit the packed depth/stencil buffer packet.
    */
   if (!depth_irb && stencil_irb && !separate_stencil)
      depth_irb = stencil_irb;

   if (intel->gen >= 6)
      len = 7;
   else if (intel->is_g4x || intel->gen == 5)
      len = 6;
   else
      len = 5;

   if (!depth_irb && !separate_stencil) {
      BEGIN_BATCH(len);
      OUT_BATCH(_3DSTATE_DEPTH_BUFFER << 16 | (len - 2));
      OUT_BATCH((BRW_DEPTHFORMAT_D32_FLOAT << 18) |
		(BRW_SURFACE_NULL << 29));
      OUT_BATCH(0);
      OUT_BATCH(0);
      OUT_BATCH(0);

      if (intel->is_g4x || intel->gen >= 5)
         OUT_BATCH(0);

      if (intel->gen >= 6)
	 OUT_BATCH(0);

      ADVANCE_BATCH();

   } else if (!depth_irb && separate_stencil) {
      uint32_t tile_x, tile_y;

      /*
       * There exists a separate stencil buffer but no depth buffer.
       *
       * The stencil buffer inherits most of its fields from
       * 3DSTATE_DEPTH_BUFFER: namely the tile walk, surface type, width, and
       * height.
       *
       * Enable the hiz bit because it and the separate stencil bit must have
       * the same value. From Section 2.11.5.6.1.1 3DSTATE_DEPTH_BUFFER, Bit
       * 1.21 "Separate Stencil Enable":
       *     [DevIL]: If this field is enabled, Hierarchical Depth Buffer
       *     Enable must also be enabled.
       *
       *     [DevGT]: This field must be set to the same value (enabled or
       *     disabled) as Hierarchical Depth Buffer Enable
       *
       * The tiled bit must be set. From the Sandybridge PRM, Volume 2, Part 1,
       * Section 7.5.5.1.1 3DSTATE_DEPTH_BUFFER, Bit 1.27 Tiled Surface:
       *     [DevGT+]: This field must be set to TRUE.
       */
      assert(intel->has_separate_stencil);

      draw_x = stencil_irb->draw_x;
      draw_y = stencil_irb->draw_y;
      tile_x = draw_x & tile_mask_x;
      tile_y = draw_y & tile_mask_y;

      /* According to the Sandy Bridge PRM, volume 2 part 1, pp326-327
       * (3DSTATE_DEPTH_BUFFER dw5), in the documentation for "Depth
       * Coordinate Offset X/Y":
       *
       *   "The 3 LSBs of both offsets must be zero to ensure correct
       *   alignment"
       *
       * We have no guarantee that tile_x and tile_y are correctly aligned,
       * since they are determined by the mipmap layout, which is only aligned
       * to multiples of 4.
       *
       * So, to avoid hanging the GPU, just smash the low order 3 bits of
       * tile_x and tile_y to 0.  This is a temporary workaround until we come
       * up with a better solution.
       */
      tile_x &= ~7;
      tile_y &= ~7;

      BEGIN_BATCH(len);
      OUT_BATCH(_3DSTATE_DEPTH_BUFFER << 16 | (len - 2));
      OUT_BATCH((BRW_DEPTHFORMAT_D32_FLOAT << 18) |
	        (1 << 21) | /* separate stencil enable */
	        (1 << 22) | /* hiz enable */
	        (BRW_TILEWALK_YMAJOR << 26) |
	        (1 << 27) | /* tiled surface */
	        (BRW_SURFACE_2D << 29));
      OUT_BATCH(0);
      OUT_BATCH(((stencil_irb->Base.Base.Width + tile_x - 1) << 6) |
	         (stencil_irb->Base.Base.Height + tile_y - 1) << 19);
      OUT_BATCH(0);

      if (intel->is_g4x || intel->gen >= 5)
         OUT_BATCH(tile_x | (tile_y << 16));
      else
	 assert(tile_x == 0 && tile_y == 0);

      if (intel->gen >= 6)
	 OUT_BATCH(0);

      ADVANCE_BATCH();

   } else {
      struct intel_region *region = depth_irb->mt->region;
      uint32_t tile_x, tile_y, offset;

      /* If using separate stencil, hiz must be enabled. */
      assert(!separate_stencil || hiz_region);

      assert(intel->gen < 6 || region->tiling == I915_TILING_Y);
      assert(!hiz_region || region->tiling == I915_TILING_Y);

      draw_x = depth_irb->draw_x;
      draw_y = depth_irb->draw_y;
      tile_x = draw_x & tile_mask_x;
      tile_y = draw_y & tile_mask_y;

      /* According to the Sandy Bridge PRM, volume 2 part 1, pp326-327
       * (3DSTATE_DEPTH_BUFFER dw5), in the documentation for "Depth
       * Coordinate Offset X/Y":
       *
       *   "The 3 LSBs of both offsets must be zero to ensure correct
       *   alignment"
       *
       * We have no guarantee that tile_x and tile_y are correctly aligned,
       * since they are determined by the mipmap layout, which is only aligned
       * to multiples of 4.
       *
       * So, to avoid hanging the GPU, just smash the low order 3 bits of
       * tile_x and tile_y to 0.  This is a temporary workaround until we come
       * up with a better solution.
       */
      tile_x &= ~7;
      tile_y &= ~7;

      offset = intel_region_get_aligned_offset(region,
                                               draw_x & ~tile_mask_x,
                                               draw_y & ~tile_mask_y);

      BEGIN_BATCH(len);
      OUT_BATCH(_3DSTATE_DEPTH_BUFFER << 16 | (len - 2));
      OUT_BATCH(((region->pitch * region->cpp) - 1) |
		(brw_depthbuffer_format(brw) << 18) |
		((hiz_region ? 1 : 0) << 21) | /* separate stencil enable */
		((hiz_region ? 1 : 0) << 22) | /* hiz enable */
		(BRW_TILEWALK_YMAJOR << 26) |
		((region->tiling != I915_TILING_NONE) << 27) |
		(BRW_SURFACE_2D << 29));
      OUT_RELOC(region->bo,
		I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
		offset);
      OUT_BATCH((BRW_SURFACE_MIPMAPLAYOUT_BELOW << 1) |
		(((depth_irb->Base.Base.Width + tile_x) - 1) << 6) |
		(((depth_irb->Base.Base.Height + tile_y) - 1) << 19));
      OUT_BATCH(0);

      if (intel->is_g4x || intel->gen >= 5)
         OUT_BATCH(tile_x | (tile_y << 16));
      else
	 assert(tile_x == 0 && tile_y == 0);

      if (intel->gen >= 6)
	 OUT_BATCH(0);

      ADVANCE_BATCH();
   }

   if (hiz_region || separate_stencil) {
      /*
       * In the 3DSTATE_DEPTH_BUFFER batch emitted above, the 'separate
       * stencil enable' and 'hiz enable' bits were set. Therefore we must
       * emit 3DSTATE_HIER_DEPTH_BUFFER and 3DSTATE_STENCIL_BUFFER. Even if
       * there is no stencil buffer, 3DSTATE_STENCIL_BUFFER must be emitted;
       * failure to do so causes hangs on gen5 and a stall on gen6.
       */

      /* Emit hiz buffer. */
      if (hiz_region) {
         uint32_t hiz_offset =
            intel_region_get_aligned_offset(hiz_region,
                                            draw_x & ~tile_mask_x,
                                            (draw_y & ~tile_mask_y) / 2);

	 BEGIN_BATCH(3);
	 OUT_BATCH((_3DSTATE_HIER_DEPTH_BUFFER << 16) | (3 - 2));
	 OUT_BATCH(hiz_region->pitch * hiz_region->cpp - 1);
	 OUT_RELOC(hiz_region->bo,
		   I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
		   hiz_offset);
	 ADVANCE_BATCH();
      } else {
	 BEGIN_BATCH(3);
	 OUT_BATCH((_3DSTATE_HIER_DEPTH_BUFFER << 16) | (3 - 2));
	 OUT_BATCH(0);
	 OUT_BATCH(0);
	 ADVANCE_BATCH();
      }

      /* Emit stencil buffer. */
      if (separate_stencil) {
	 struct intel_region *region = stencil_mt->region;

         /* Note: we can't compute the stencil offset using
          * intel_region_get_aligned_offset(), because stencil_region claims
          * that the region is untiled; in fact it's W tiled.
          */
         uint32_t stencil_offset =
            (draw_y & ~tile_mask_y) * region->pitch +
            (draw_x & ~tile_mask_x) * 64;

	 BEGIN_BATCH(3);
	 OUT_BATCH((_3DSTATE_STENCIL_BUFFER << 16) | (3 - 2));
         /* The stencil buffer has quirky pitch requirements.  From Vol 2a,
          * 11.5.6.2.1 3DSTATE_STENCIL_BUFFER, field "Surface Pitch":
          *    The pitch must be set to 2x the value computed based on width, as
          *    the stencil buffer is stored with two rows interleaved.
          */
	 OUT_BATCH(2 * region->pitch * region->cpp - 1);
	 OUT_RELOC(region->bo,
		   I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
		   stencil_offset);
	 ADVANCE_BATCH();
      } else {
	 BEGIN_BATCH(3);
	 OUT_BATCH((_3DSTATE_STENCIL_BUFFER << 16) | (3 - 2));
	 OUT_BATCH(0);
	 OUT_BATCH(0);
	 ADVANCE_BATCH();
      }
   }

   /*
    * On Gen >= 6, emit clear params for safety. If using hiz, then clear
    * params must be emitted.
    *
    * From Section 2.11.5.6.4.1 3DSTATE_CLEAR_PARAMS:
    *     3DSTATE_CLEAR_PARAMS packet must follow the DEPTH_BUFFER_STATE packet
    *     when HiZ is enabled and the DEPTH_BUFFER_STATE changes.
    */
   if (intel->gen >= 6 || hiz_region) {
      if (intel->gen == 6)
	 intel_emit_post_sync_nonzero_flush(intel);

      BEGIN_BATCH(2);
      OUT_BATCH(_3DSTATE_CLEAR_PARAMS << 16 |
		GEN5_DEPTH_CLEAR_VALID |
		(2 - 2));
      OUT_BATCH(depth_irb ? depth_irb->mt->depth_clear_value : 0);
      ADVANCE_BATCH();
   }
}
Ejemplo n.º 21
0
static void
upload_vs_state(struct brw_context *brw)
{
   struct gl_context *ctx = &brw->ctx;
   const struct brw_stage_state *stage_state = &brw->vs.base;
   uint32_t floating_point_mode = 0;

   /* From the BSpec, 3D Pipeline > Geometry > Vertex Shader > State,
    * 3DSTATE_VS, Dword 5.0 "VS Function Enable":
    *
    *   [DevSNB] A pipeline flush must be programmed prior to a 3DSTATE_VS
    *   command that causes the VS Function Enable to toggle. Pipeline
    *   flush can be executed by sending a PIPE_CONTROL command with CS
    *   stall bit set and a post sync operation.
    *
    * Although we don't disable the VS during normal drawing, BLORP sometimes
    * disables it.  To be safe, do the flush here just in case.
    */
   intel_emit_post_sync_nonzero_flush(brw);

   if (stage_state->push_const_size == 0) {
      /* Disable the push constant buffers. */
      BEGIN_BATCH(5);
      OUT_BATCH(_3DSTATE_CONSTANT_VS << 16 | (5 - 2));
      OUT_BATCH(0);
      OUT_BATCH(0);
      OUT_BATCH(0);
      OUT_BATCH(0);
      ADVANCE_BATCH();
   } else {
      BEGIN_BATCH(5);
      OUT_BATCH(_3DSTATE_CONSTANT_VS << 16 |
		GEN6_CONSTANT_BUFFER_0_ENABLE |
		(5 - 2));
      /* Pointer to the VS constant buffer.  Covered by the set of
       * state flags from gen6_upload_vs_constants
       */
      OUT_BATCH(stage_state->push_const_offset +
		stage_state->push_const_size - 1);
      OUT_BATCH(0);
      OUT_BATCH(0);
      OUT_BATCH(0);
      ADVANCE_BATCH();
   }

   /* Use ALT floating point mode for ARB vertex programs, because they
    * require 0^0 == 1.
    */
   if (ctx->_Shader->CurrentProgram[MESA_SHADER_VERTEX] == NULL)
      floating_point_mode = GEN6_VS_FLOATING_POINT_MODE_ALT;

   BEGIN_BATCH(6);
   OUT_BATCH(_3DSTATE_VS << 16 | (6 - 2));
   OUT_BATCH(stage_state->prog_offset);
   OUT_BATCH(floating_point_mode |
	     ((ALIGN(stage_state->sampler_count, 4)/4) << GEN6_VS_SAMPLER_COUNT_SHIFT) |
             ((brw->vs.prog_data->base.base.binding_table.size_bytes / 4) <<
              GEN6_VS_BINDING_TABLE_ENTRY_COUNT_SHIFT));

   if (brw->vs.prog_data->base.base.total_scratch) {
      OUT_RELOC(stage_state->scratch_bo,
		I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
		ffs(brw->vs.prog_data->base.base.total_scratch) - 11);
   } else {
      OUT_BATCH(0);
   }

   OUT_BATCH((brw->vs.prog_data->base.base.dispatch_grf_start_reg <<
              GEN6_VS_DISPATCH_START_GRF_SHIFT) |
	     (brw->vs.prog_data->base.urb_read_length << GEN6_VS_URB_READ_LENGTH_SHIFT) |
	     (0 << GEN6_VS_URB_ENTRY_READ_OFFSET_SHIFT));

   OUT_BATCH(((brw->max_vs_threads - 1) << GEN6_VS_MAX_THREADS_SHIFT) |
	     GEN6_VS_STATISTICS_ENABLE |
	     GEN6_VS_ENABLE);
   ADVANCE_BATCH();

   /* Based on my reading of the simulator, the VS constants don't get
    * pulled into the VS FF unit until an appropriate pipeline flush
    * happens, and instead the 3DSTATE_CONSTANT_VS packet just adds
    * references to them into a little FIFO.  The flushes are common,
    * but don't reliably happen between this and a 3DPRIMITIVE, causing
    * the primitive to use the wrong constants.  Then the FIFO
    * containing the constant setup gets added to again on the next
    * constants change, and eventually when a flush does happen the
    * unit is overwhelmed by constant changes and dies.
    *
    * To avoid this, send a PIPE_CONTROL down the line that will
    * update the unit immediately loading the constants.  The flush
    * type bits here were those set by the STATE_BASE_ADDRESS whose
    * move in a82a43e8d99e1715dd11c9c091b5ab734079b6a6 triggered the
    * bug reports that led to this workaround, and may be more than
    * what is strictly required to avoid the issue.
    */
   intel_emit_post_sync_nonzero_flush(brw);
   brw_emit_pipe_control_flush(brw,
                               PIPE_CONTROL_DEPTH_STALL |
                               PIPE_CONTROL_INSTRUCTION_FLUSH |
                               PIPE_CONTROL_STATE_CACHE_INVALIDATE);
}
Ejemplo n.º 22
0
static void
gen6_blorp_emit_depth_stencil_config(struct brw_context *brw,
                                     const brw_blorp_params *params)
{
   struct intel_context *intel = &brw->intel;
   uint32_t draw_x, draw_y;
   uint32_t tile_mask_x, tile_mask_y;

   gen6_blorp_compute_tile_masks(params, &tile_mask_x, &tile_mask_y);
   params->depth.get_draw_offsets(&draw_x, &draw_y);

   /* 3DSTATE_DEPTH_BUFFER */
   {
      uint32_t width, height;
      params->depth.get_miplevel_dims(&width, &height);

      uint32_t tile_x = draw_x & tile_mask_x;
      uint32_t tile_y = draw_y & tile_mask_y;
      uint32_t offset =
         intel_region_get_aligned_offset(params->depth.mt->region,
                                         draw_x & ~tile_mask_x,
                                         draw_y & ~tile_mask_y);

      /* According to the Sandy Bridge PRM, volume 2 part 1, pp326-327
       * (3DSTATE_DEPTH_BUFFER dw5), in the documentation for "Depth
       * Coordinate Offset X/Y":
       *
       *   "The 3 LSBs of both offsets must be zero to ensure correct
       *   alignment"
       *
       * We have no guarantee that tile_x and tile_y are correctly aligned,
       * since they are determined by the mipmap layout, which is only aligned
       * to multiples of 4.
       *
       * So, to avoid hanging the GPU, just smash the low order 3 bits of
       * tile_x and tile_y to 0.  This is a temporary workaround until we come
       * up with a better solution.
       */
      tile_x &= ~7;
      tile_y &= ~7;

      intel_emit_post_sync_nonzero_flush(intel);
      intel_emit_depth_stall_flushes(intel);

      BEGIN_BATCH(7);
      OUT_BATCH(_3DSTATE_DEPTH_BUFFER << 16 | (7 - 2));
      uint32_t pitch_bytes =
         params->depth.mt->region->pitch * params->depth.mt->region->cpp;
      OUT_BATCH((pitch_bytes - 1) |
                params->depth_format << 18 |
                1 << 21 | /* separate stencil enable */
                1 << 22 | /* hiz enable */
                BRW_TILEWALK_YMAJOR << 26 |
                1 << 27 | /* y-tiled */
                BRW_SURFACE_2D << 29);
      OUT_RELOC(params->depth.mt->region->bo,
                I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
                offset);
      OUT_BATCH(BRW_SURFACE_MIPMAPLAYOUT_BELOW << 1 |
                (width + tile_x - 1) << 6 |
                (height + tile_y - 1) << 19);
      OUT_BATCH(0);
      OUT_BATCH(tile_x |
                tile_y << 16);
      OUT_BATCH(0);
      ADVANCE_BATCH();
   }

   /* 3DSTATE_HIER_DEPTH_BUFFER */
   {
      struct intel_region *hiz_region = params->depth.mt->hiz_mt->region;
      uint32_t hiz_offset =
         intel_region_get_aligned_offset(hiz_region,
                                         draw_x & ~tile_mask_x,
                                         (draw_y & ~tile_mask_y) / 2);

      BEGIN_BATCH(3);
      OUT_BATCH((_3DSTATE_HIER_DEPTH_BUFFER << 16) | (3 - 2));
      OUT_BATCH(hiz_region->pitch * hiz_region->cpp - 1);
      OUT_RELOC(hiz_region->bo,
                I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
                hiz_offset);
      ADVANCE_BATCH();
   }

   /* 3DSTATE_STENCIL_BUFFER */
   {
      BEGIN_BATCH(3);
      OUT_BATCH((_3DSTATE_STENCIL_BUFFER << 16) | (3 - 2));
      OUT_BATCH(0);
      OUT_BATCH(0);
      ADVANCE_BATCH();
   }
}
void
brw_emit_depth_stencil_hiz(struct brw_context *brw,
                           struct intel_mipmap_tree *depth_mt,
                           uint32_t depth_offset, uint32_t depthbuffer_format,
                           uint32_t depth_surface_type,
                           struct intel_mipmap_tree *stencil_mt,
                           struct intel_mipmap_tree *hiz_mt,
                           bool separate_stencil, uint32_t width,
                           uint32_t height, uint32_t tile_x, uint32_t tile_y)
{
   struct intel_context *intel = &brw->intel;

   /* Enable the hiz bit if we're doing separate stencil, because it and the
    * separate stencil bit must have the same value. From Section 2.11.5.6.1.1
    * 3DSTATE_DEPTH_BUFFER, Bit 1.21 "Separate Stencil Enable":
    *     [DevIL]: If this field is enabled, Hierarchical Depth Buffer
    *     Enable must also be enabled.
    *
    *     [DevGT]: This field must be set to the same value (enabled or
    *     disabled) as Hierarchical Depth Buffer Enable
    */
   bool enable_hiz_ss = hiz_mt || separate_stencil;


   /* 3DSTATE_DEPTH_BUFFER, 3DSTATE_STENCIL_BUFFER are both
    * non-pipelined state that will need the PIPE_CONTROL workaround.
    */
   if (intel->gen == 6) {
      intel_emit_post_sync_nonzero_flush(intel);
      intel_emit_depth_stall_flushes(intel);
   }

   unsigned int len;
   if (intel->gen >= 6)
      len = 7;
   else if (intel->is_g4x || intel->gen == 5)
      len = 6;
   else
      len = 5;

   BEGIN_BATCH(len);
   OUT_BATCH(_3DSTATE_DEPTH_BUFFER << 16 | (len - 2));
   OUT_BATCH((depth_mt ? depth_mt->region->pitch - 1 : 0) |
             (depthbuffer_format << 18) |
             ((enable_hiz_ss ? 1 : 0) << 21) | /* separate stencil enable */
             ((enable_hiz_ss ? 1 : 0) << 22) | /* hiz enable */
             (BRW_TILEWALK_YMAJOR << 26) |
             ((depth_mt ? depth_mt->region->tiling != I915_TILING_NONE : 1)
              << 27) |
             (depth_surface_type << 29));

   if (depth_mt) {
      OUT_RELOC(depth_mt->region->bo,
		I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
		depth_offset);
   } else {
      OUT_BATCH(0);
   }

   OUT_BATCH(((width + tile_x - 1) << 6) |
             ((height + tile_y - 1) << 19));
   OUT_BATCH(0);

   if (intel->is_g4x || intel->gen >= 5)
      OUT_BATCH(tile_x | (tile_y << 16));
   else
      assert(tile_x == 0 && tile_y == 0);

   if (intel->gen >= 6)
      OUT_BATCH(0);

   ADVANCE_BATCH();

   if (hiz_mt || separate_stencil) {
      /*
       * In the 3DSTATE_DEPTH_BUFFER batch emitted above, the 'separate
       * stencil enable' and 'hiz enable' bits were set. Therefore we must
       * emit 3DSTATE_HIER_DEPTH_BUFFER and 3DSTATE_STENCIL_BUFFER. Even if
       * there is no stencil buffer, 3DSTATE_STENCIL_BUFFER must be emitted;
       * failure to do so causes hangs on gen5 and a stall on gen6.
       */

      /* Emit hiz buffer. */
      if (hiz_mt) {
	 BEGIN_BATCH(3);
	 OUT_BATCH((_3DSTATE_HIER_DEPTH_BUFFER << 16) | (3 - 2));
	 OUT_BATCH(hiz_mt->region->pitch - 1);
	 OUT_RELOC(hiz_mt->region->bo,
		   I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
		   brw->depthstencil.hiz_offset);
	 ADVANCE_BATCH();
      } else {
	 BEGIN_BATCH(3);
	 OUT_BATCH((_3DSTATE_HIER_DEPTH_BUFFER << 16) | (3 - 2));
	 OUT_BATCH(0);
	 OUT_BATCH(0);
	 ADVANCE_BATCH();
      }

      /* Emit stencil buffer. */
      if (separate_stencil) {
	 struct intel_region *region = stencil_mt->region;

	 BEGIN_BATCH(3);
	 OUT_BATCH((_3DSTATE_STENCIL_BUFFER << 16) | (3 - 2));
         /* The stencil buffer has quirky pitch requirements.  From Vol 2a,
          * 11.5.6.2.1 3DSTATE_STENCIL_BUFFER, field "Surface Pitch":
          *    The pitch must be set to 2x the value computed based on width, as
          *    the stencil buffer is stored with two rows interleaved.
          */
	 OUT_BATCH(2 * region->pitch - 1);
	 OUT_RELOC(region->bo,
		   I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
		   brw->depthstencil.stencil_offset);
	 ADVANCE_BATCH();
      } else {
	 BEGIN_BATCH(3);
	 OUT_BATCH((_3DSTATE_STENCIL_BUFFER << 16) | (3 - 2));
	 OUT_BATCH(0);
	 OUT_BATCH(0);
	 ADVANCE_BATCH();
      }
   }

   /*
    * On Gen >= 6, emit clear params for safety. If using hiz, then clear
    * params must be emitted.
    *
    * From Section 2.11.5.6.4.1 3DSTATE_CLEAR_PARAMS:
    *     3DSTATE_CLEAR_PARAMS packet must follow the DEPTH_BUFFER_STATE packet
    *     when HiZ is enabled and the DEPTH_BUFFER_STATE changes.
    */
   if (intel->gen >= 6 || hiz_mt) {
      if (intel->gen == 6)
	 intel_emit_post_sync_nonzero_flush(intel);

      BEGIN_BATCH(2);
      OUT_BATCH(_3DSTATE_CLEAR_PARAMS << 16 |
		GEN5_DEPTH_CLEAR_VALID |
		(2 - 2));
      OUT_BATCH(depth_mt ? depth_mt->depth_clear_value : 0);
      ADVANCE_BATCH();
   }
}
/**
 * Define the base addresses which some state is referenced from.
 *
 * This allows us to avoid having to emit relocations for the objects,
 * and is actually required for binding table pointers on gen6.
 *
 * Surface state base address covers binding table pointers and
 * surface state objects, but not the surfaces that the surface state
 * objects point to.
 */
static void upload_state_base_address( struct brw_context *brw )
{
   struct intel_context *intel = &brw->intel;

   /* FINISHME: According to section 3.6.1 "STATE_BASE_ADDRESS" of
    * vol1a of the G45 PRM, MI_FLUSH with the ISC invalidate should be
    * programmed prior to STATE_BASE_ADDRESS.
    *
    * However, given that the instruction SBA (general state base
    * address) on this chipset is always set to 0 across X and GL,
    * maybe this isn't required for us in particular.
    */

   if (intel->gen >= 6) {
      if (intel->gen == 6)
	 intel_emit_post_sync_nonzero_flush(intel);

       BEGIN_BATCH(10);
       OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (10 - 2));
       /* General state base address: stateless DP read/write requests */
       OUT_BATCH(1);
       /* Surface state base address:
	* BINDING_TABLE_STATE
	* SURFACE_STATE
	*/
       OUT_RELOC(intel->batch.bo, I915_GEM_DOMAIN_SAMPLER, 0, 1);
        /* Dynamic state base address:
	 * SAMPLER_STATE
	 * SAMPLER_BORDER_COLOR_STATE
	 * CLIP, SF, WM/CC viewport state
	 * COLOR_CALC_STATE
	 * DEPTH_STENCIL_STATE
	 * BLEND_STATE
	 * Push constants (when INSTPM: CONSTANT_BUFFER Address Offset
	 * Disable is clear, which we rely on)
	 */
       OUT_RELOC(intel->batch.bo, (I915_GEM_DOMAIN_RENDER |
				   I915_GEM_DOMAIN_INSTRUCTION), 0, 1);

       OUT_BATCH(1); /* Indirect object base address: MEDIA_OBJECT data */
       OUT_RELOC(brw->cache.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
		 1); /* Instruction base address: shader kernels (incl. SIP) */

       OUT_BATCH(1); /* General state upper bound */
       /* Dynamic state upper bound.  Although the documentation says that
	* programming it to zero will cause it to be ignored, that is a lie.
	* If this isn't programmed to a real bound, the sampler border color
	* pointer is rejected, causing border color to mysteriously fail.
	*/
       OUT_BATCH(0xfffff001);
       OUT_BATCH(1); /* Indirect object upper bound */
       OUT_BATCH(1); /* Instruction access upper bound */
       ADVANCE_BATCH();
   } else if (intel->gen == 5) {
       BEGIN_BATCH(8);
       OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (8 - 2));
       OUT_BATCH(1); /* General state base address */
       OUT_RELOC(intel->batch.bo, I915_GEM_DOMAIN_SAMPLER, 0,
		 1); /* Surface state base address */
       OUT_BATCH(1); /* Indirect object base address */
       OUT_RELOC(brw->cache.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
		 1); /* Instruction base address */
       OUT_BATCH(0xfffff001); /* General state upper bound */
       OUT_BATCH(1); /* Indirect object upper bound */
       OUT_BATCH(1); /* Instruction access upper bound */
       ADVANCE_BATCH();
   } else {
       BEGIN_BATCH(6);
       OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (6 - 2));
       OUT_BATCH(1); /* General state base address */
       OUT_RELOC(intel->batch.bo, I915_GEM_DOMAIN_SAMPLER, 0,
		 1); /* Surface state base address */
       OUT_BATCH(1); /* Indirect object base address */
       OUT_BATCH(1); /* General state upper bound */
       OUT_BATCH(1); /* Indirect object upper bound */
       ADVANCE_BATCH();
   }

   /* According to section 3.6.1 of VOL1 of the 965 PRM,
    * STATE_BASE_ADDRESS updates require a reissue of:
    *
    * 3DSTATE_PIPELINE_POINTERS
    * 3DSTATE_BINDING_TABLE_POINTERS
    * MEDIA_STATE_POINTERS
    *
    * and this continues through Ironlake.  The Sandy Bridge PRM, vol
    * 1 part 1 says that the folowing packets must be reissued:
    *
    * 3DSTATE_CC_POINTERS
    * 3DSTATE_BINDING_TABLE_POINTERS
    * 3DSTATE_SAMPLER_STATE_POINTERS
    * 3DSTATE_VIEWPORT_STATE_POINTERS
    * MEDIA_STATE_POINTERS
    *
    * Those are always reissued following SBA updates anyway (new
    * batch time), except in the case of the program cache BO
    * changing.  Having a separate state flag makes the sequence more
    * obvious.
    */

   brw->state.dirty.brw |= BRW_NEW_STATE_BASE_ADDRESS;
}
Ejemplo n.º 25
0
static void emit_depthbuffer(struct brw_context *brw)
{
   struct intel_context *intel = &brw->intel;
   struct gl_context *ctx = &intel->ctx;
   struct gl_framebuffer *fb = ctx->DrawBuffer;
   /* _NEW_BUFFERS */
   struct intel_renderbuffer *depth_irb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
   struct intel_renderbuffer *stencil_irb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
   struct intel_mipmap_tree *depth_mt = brw->depthstencil.depth_mt;
   struct intel_mipmap_tree *stencil_mt = brw->depthstencil.stencil_mt;
   struct intel_mipmap_tree *hiz_mt = brw->depthstencil.hiz_mt;
   uint32_t tile_x = brw->depthstencil.tile_x;
   uint32_t tile_y = brw->depthstencil.tile_y;
   unsigned int len;
   bool separate_stencil = false;

   if (stencil_mt && stencil_mt->format == MESA_FORMAT_S8)
      separate_stencil = true;

   /* 3DSTATE_DEPTH_BUFFER, 3DSTATE_STENCIL_BUFFER are both
    * non-pipelined state that will need the PIPE_CONTROL workaround.
    */
   if (intel->gen == 6) {
      intel_emit_post_sync_nonzero_flush(intel);
      intel_emit_depth_stall_flushes(intel);
   }

   /* If there's a packed depth/stencil bound to stencil only, we need to
    * emit the packed depth/stencil buffer packet.
    */
   if (!depth_irb && stencil_irb && !separate_stencil) {
      depth_irb = stencil_irb;
      depth_mt = stencil_mt;
   }

   if (intel->gen >= 6)
      len = 7;
   else if (intel->is_g4x || intel->gen == 5)
      len = 6;
   else
      len = 5;

   if (!depth_irb && !separate_stencil) {
      BEGIN_BATCH(len);
      OUT_BATCH(_3DSTATE_DEPTH_BUFFER << 16 | (len - 2));
      OUT_BATCH((BRW_DEPTHFORMAT_D32_FLOAT << 18) |
		(BRW_SURFACE_NULL << 29));
      OUT_BATCH(0);
      OUT_BATCH(0);
      OUT_BATCH(0);

      if (intel->is_g4x || intel->gen >= 5)
         OUT_BATCH(0);

      if (intel->gen >= 6)
	 OUT_BATCH(0);

      ADVANCE_BATCH();

   } else if (!depth_irb && separate_stencil) {
      /*
       * There exists a separate stencil buffer but no depth buffer.
       *
       * The stencil buffer inherits most of its fields from
       * 3DSTATE_DEPTH_BUFFER: namely the tile walk, surface type, width, and
       * height.
       *
       * Enable the hiz bit because it and the separate stencil bit must have
       * the same value. From Section 2.11.5.6.1.1 3DSTATE_DEPTH_BUFFER, Bit
       * 1.21 "Separate Stencil Enable":
       *     [DevIL]: If this field is enabled, Hierarchical Depth Buffer
       *     Enable must also be enabled.
       *
       *     [DevGT]: This field must be set to the same value (enabled or
       *     disabled) as Hierarchical Depth Buffer Enable
       *
       * The tiled bit must be set. From the Sandybridge PRM, Volume 2, Part 1,
       * Section 7.5.5.1.1 3DSTATE_DEPTH_BUFFER, Bit 1.27 Tiled Surface:
       *     [DevGT+]: This field must be set to TRUE.
       */
      assert(intel->has_separate_stencil);

      BEGIN_BATCH(len);
      OUT_BATCH(_3DSTATE_DEPTH_BUFFER << 16 | (len - 2));
      OUT_BATCH((BRW_DEPTHFORMAT_D32_FLOAT << 18) |
	        (1 << 21) | /* separate stencil enable */
	        (1 << 22) | /* hiz enable */
	        (BRW_TILEWALK_YMAJOR << 26) |
	        (1 << 27) | /* tiled surface */
	        (BRW_SURFACE_2D << 29));
      OUT_BATCH(0);
      OUT_BATCH(((stencil_irb->Base.Base.Width + tile_x - 1) << 6) |
	         (stencil_irb->Base.Base.Height + tile_y - 1) << 19);
      OUT_BATCH(0);

      if (intel->is_g4x || intel->gen >= 5)
         OUT_BATCH(tile_x | (tile_y << 16));
      else
	 assert(tile_x == 0 && tile_y == 0);

      if (intel->gen >= 6)
	 OUT_BATCH(0);

      ADVANCE_BATCH();

   } else {
      struct intel_region *region = depth_mt->region;

      /* If using separate stencil, hiz must be enabled. */
      assert(!separate_stencil || hiz_mt);

      assert(intel->gen < 6 || region->tiling == I915_TILING_Y);
      assert(!hiz_mt || region->tiling == I915_TILING_Y);

      BEGIN_BATCH(len);
      OUT_BATCH(_3DSTATE_DEPTH_BUFFER << 16 | (len - 2));
      OUT_BATCH((region->pitch - 1) |
		(brw_depthbuffer_format(brw) << 18) |
		((hiz_mt ? 1 : 0) << 21) | /* separate stencil enable */
		((hiz_mt ? 1 : 0) << 22) | /* hiz enable */
		(BRW_TILEWALK_YMAJOR << 26) |
		((region->tiling != I915_TILING_NONE) << 27) |
		(BRW_SURFACE_2D << 29));
      OUT_RELOC(region->bo,
		I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
		brw->depthstencil.depth_offset);
      OUT_BATCH((BRW_SURFACE_MIPMAPLAYOUT_BELOW << 1) |
		(((depth_irb->Base.Base.Width + tile_x) - 1) << 6) |
		(((depth_irb->Base.Base.Height + tile_y) - 1) << 19));
      OUT_BATCH(0);

      if (intel->is_g4x || intel->gen >= 5)
         OUT_BATCH(tile_x | (tile_y << 16));
      else
	 assert(tile_x == 0 && tile_y == 0);

      if (intel->gen >= 6)
	 OUT_BATCH(0);

      ADVANCE_BATCH();
   }

   if (hiz_mt || separate_stencil) {
      /*
       * In the 3DSTATE_DEPTH_BUFFER batch emitted above, the 'separate
       * stencil enable' and 'hiz enable' bits were set. Therefore we must
       * emit 3DSTATE_HIER_DEPTH_BUFFER and 3DSTATE_STENCIL_BUFFER. Even if
       * there is no stencil buffer, 3DSTATE_STENCIL_BUFFER must be emitted;
       * failure to do so causes hangs on gen5 and a stall on gen6.
       */

      /* Emit hiz buffer. */
      if (hiz_mt) {
	 BEGIN_BATCH(3);
	 OUT_BATCH((_3DSTATE_HIER_DEPTH_BUFFER << 16) | (3 - 2));
	 OUT_BATCH(hiz_mt->region->pitch - 1);
	 OUT_RELOC(hiz_mt->region->bo,
		   I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
		   brw->depthstencil.hiz_offset);
	 ADVANCE_BATCH();
      } else {
	 BEGIN_BATCH(3);
	 OUT_BATCH((_3DSTATE_HIER_DEPTH_BUFFER << 16) | (3 - 2));
	 OUT_BATCH(0);
	 OUT_BATCH(0);
	 ADVANCE_BATCH();
      }

      /* Emit stencil buffer. */
      if (separate_stencil) {
	 struct intel_region *region = stencil_mt->region;

	 BEGIN_BATCH(3);
	 OUT_BATCH((_3DSTATE_STENCIL_BUFFER << 16) | (3 - 2));
         /* The stencil buffer has quirky pitch requirements.  From Vol 2a,
          * 11.5.6.2.1 3DSTATE_STENCIL_BUFFER, field "Surface Pitch":
          *    The pitch must be set to 2x the value computed based on width, as
          *    the stencil buffer is stored with two rows interleaved.
          */
	 OUT_BATCH(2 * region->pitch - 1);
	 OUT_RELOC(region->bo,
		   I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
		   brw->depthstencil.stencil_offset);
	 ADVANCE_BATCH();
      } else {
	 BEGIN_BATCH(3);
	 OUT_BATCH((_3DSTATE_STENCIL_BUFFER << 16) | (3 - 2));
	 OUT_BATCH(0);
	 OUT_BATCH(0);
	 ADVANCE_BATCH();
      }
   }

   /*
    * On Gen >= 6, emit clear params for safety. If using hiz, then clear
    * params must be emitted.
    *
    * From Section 2.11.5.6.4.1 3DSTATE_CLEAR_PARAMS:
    *     3DSTATE_CLEAR_PARAMS packet must follow the DEPTH_BUFFER_STATE packet
    *     when HiZ is enabled and the DEPTH_BUFFER_STATE changes.
    */
   if (intel->gen >= 6 || hiz_mt) {
      if (intel->gen == 6)
	 intel_emit_post_sync_nonzero_flush(intel);

      BEGIN_BATCH(2);
      OUT_BATCH(_3DSTATE_CLEAR_PARAMS << 16 |
		GEN5_DEPTH_CLEAR_VALID |
		(2 - 2));
      OUT_BATCH(depth_irb ? depth_irb->mt->depth_clear_value : 0);
      ADVANCE_BATCH();
   }
}
Ejemplo n.º 26
0
static inline void
brw_upload_pipeline_state(struct brw_context *brw,
                          enum brw_pipeline pipeline)
{
   struct gl_context *ctx = &brw->ctx;
   int i;
   static int dirty_count = 0;
   struct brw_state_flags state = brw->state.pipelines[pipeline];

   brw_select_pipeline(brw, pipeline);

   if (0) {
      /* Always re-emit all state. */
      brw->NewGLState = ~0;
      ctx->NewDriverState = ~0ull;
   }

   if (pipeline == BRW_RENDER_PIPELINE) {
      if (brw->fragment_program != ctx->FragmentProgram._Current) {
         brw->fragment_program = ctx->FragmentProgram._Current;
         brw->ctx.NewDriverState |= BRW_NEW_FRAGMENT_PROGRAM;
      }

      if (brw->geometry_program != ctx->GeometryProgram._Current) {
         brw->geometry_program = ctx->GeometryProgram._Current;
         brw->ctx.NewDriverState |= BRW_NEW_GEOMETRY_PROGRAM;
      }

      if (brw->vertex_program != ctx->VertexProgram._Current) {
         brw->vertex_program = ctx->VertexProgram._Current;
         brw->ctx.NewDriverState |= BRW_NEW_VERTEX_PROGRAM;
      }
   }

   if (brw->compute_program != ctx->ComputeProgram._Current) {
      brw->compute_program = ctx->ComputeProgram._Current;
      brw->ctx.NewDriverState |= BRW_NEW_COMPUTE_PROGRAM;
   }

   if (brw->meta_in_progress != _mesa_meta_in_progress(ctx)) {
      brw->meta_in_progress = _mesa_meta_in_progress(ctx);
      brw->ctx.NewDriverState |= BRW_NEW_META_IN_PROGRESS;
   }

   if (brw->num_samples != ctx->DrawBuffer->Visual.samples) {
      brw->num_samples = ctx->DrawBuffer->Visual.samples;
      brw->ctx.NewDriverState |= BRW_NEW_NUM_SAMPLES;
   }

   /* Exit early if no state is flagged as dirty */
   merge_ctx_state(brw, &state);
   if ((state.mesa | state.brw) == 0)
      return;

   /* Emit Sandybridge workaround flushes on every primitive, for safety. */
   if (brw->gen == 6)
      intel_emit_post_sync_nonzero_flush(brw);

   brw_upload_programs(brw, pipeline);
   merge_ctx_state(brw, &state);

   const struct brw_tracked_state *atoms =
      brw_get_pipeline_atoms(brw, pipeline);
   const int num_atoms = brw->num_atoms[pipeline];

   if (unlikely(INTEL_DEBUG)) {
      /* Debug version which enforces various sanity checks on the
       * state flags which are generated and checked to help ensure
       * state atoms are ordered correctly in the list.
       */
      struct brw_state_flags examined, prev;
      memset(&examined, 0, sizeof(examined));
      prev = state;

      for (i = 0; i < num_atoms; i++) {
	 const struct brw_tracked_state *atom = &atoms[i];
	 struct brw_state_flags generated;

         check_and_emit_atom(brw, &state, atom);

	 accumulate_state(&examined, &atom->dirty);

	 /* generated = (prev ^ state)
	  * if (examined & generated)
	  *     fail;
	  */
	 xor_states(&generated, &prev, &state);
	 assert(!check_state(&examined, &generated));
	 prev = state;
      }
   }
   else {
      for (i = 0; i < num_atoms; i++) {
	 const struct brw_tracked_state *atom = &atoms[i];

         check_and_emit_atom(brw, &state, atom);
      }
   }

   if (unlikely(INTEL_DEBUG & DEBUG_STATE)) {
      STATIC_ASSERT(ARRAY_SIZE(brw_bits) == BRW_NUM_STATE_BITS + 1);

      brw_update_dirty_count(mesa_bits, state.mesa);
      brw_update_dirty_count(brw_bits, state.brw);
      if (dirty_count++ % 1000 == 0) {
	 brw_print_dirty_count(mesa_bits);
	 brw_print_dirty_count(brw_bits);
	 fprintf(stderr, "\n");
      }
   }
}