Esempio n. 1
0
static void update_sample_shading( struct st_context *st )
{
   if (!st->fp)
      return;

   if (!st->ctx->Extensions.ARB_sample_shading)
      return;

   cso_set_min_samples(
	 st->cso_context,
         _mesa_get_min_invocations_per_fragment(st->ctx, &st->fp->Base, false));
}
Esempio n. 2
0
static void
upload_wm_state(struct brw_context *brw)
{
   struct gl_context *ctx = &brw->ctx;
   /* BRW_NEW_FRAGMENT_PROGRAM */
   const struct brw_fragment_program *fp =
      brw_fragment_program_const(brw->fragment_program);
   /* BRW_NEW_FS_PROG_DATA */
   const struct brw_wm_prog_data *prog_data = brw->wm.prog_data;

   /* _NEW_BUFFERS */
   const bool multisampled_fbo = _mesa_geometric_samples(ctx->DrawBuffer) > 1;

   /* In case of non 1x per sample shading, only one of SIMD8 and SIMD16
    * should be enabled. We do 'SIMD16 only' dispatch if a SIMD16 shader
    * is successfully compiled. In majority of the cases that bring us
    * better performance than 'SIMD8 only' dispatch.
    */
   const int min_inv_per_frag = _mesa_get_min_invocations_per_fragment(
                                   ctx, brw->fragment_program, false);

   /* BRW_NEW_FS_PROG_DATA | _NEW_COLOR */
   const bool dual_src_blend_enable = prog_data->dual_src_blend &&
                                      (ctx->Color.BlendEnabled & 1) &&
                                      ctx->Color.Blend[0]._UsesDualSrc;

   /* _NEW_COLOR, _NEW_MULTISAMPLE */
   const bool kill_enable = prog_data->uses_kill || ctx->Color.AlphaEnabled ||
                            ctx->Multisample.SampleAlphaToCoverage ||
                            prog_data->uses_omask;

   /* Rendering against the gl-context is always taken into account. */
   const bool statistic_enable = true;

   /* _NEW_LINE | _NEW_POLYGON | _NEW_BUFFERS | _NEW_COLOR |
    * _NEW_MULTISAMPLE
    */
   gen6_upload_wm_state(brw, fp, prog_data, &brw->wm.base,
                        multisampled_fbo, min_inv_per_frag,
                        dual_src_blend_enable, kill_enable,
                        brw_color_buffer_write_enabled(brw),
                        ctx->Multisample.Enabled,
                        ctx->Line.StippleFlag, ctx->Polygon.StippleFlag,
                        statistic_enable);
}
void
gen8_upload_ps_extra(struct brw_context *brw,
                     const struct gl_fragment_program *fp,
                     const struct brw_wm_prog_data *prog_data,
                     bool multisampled_fbo)
{
   struct gl_context *ctx = &brw->ctx;
   uint32_t dw1 = 0;

   dw1 |= GEN8_PSX_PIXEL_SHADER_VALID;
   dw1 |= prog_data->computed_depth_mode << GEN8_PSX_COMPUTED_DEPTH_MODE_SHIFT;

   if (prog_data->uses_kill)
      dw1 |= GEN8_PSX_KILL_ENABLE;

   if (prog_data->num_varying_inputs != 0)
      dw1 |= GEN8_PSX_ATTRIBUTE_ENABLE;

   if (fp->Base.InputsRead & VARYING_BIT_POS)
      dw1 |= GEN8_PSX_USES_SOURCE_DEPTH | GEN8_PSX_USES_SOURCE_W;

   if (multisampled_fbo &&
       _mesa_get_min_invocations_per_fragment(ctx, fp, false) > 1)
      dw1 |= GEN8_PSX_SHADER_IS_PER_SAMPLE;

   if (fp->Base.SystemValuesRead & SYSTEM_BIT_SAMPLE_MASK_IN)
      dw1 |= GEN8_PSX_SHADER_USES_INPUT_COVERAGE_MASK;

   if (prog_data->uses_omask)
      dw1 |= GEN8_PSX_OMASK_TO_RENDER_TARGET;

   if (brw->gen >= 9 && prog_data->pulls_bary)
      dw1 |= GEN9_PSX_SHADER_PULLS_BARY;

   BEGIN_BATCH(2);
   OUT_BATCH(_3DSTATE_PS_EXTRA << 16 | (2 - 2));
   OUT_BATCH(dw1);
   ADVANCE_BATCH();
}
Esempio n. 4
0
static void
brw_wm_populate_key(struct brw_context *brw, struct brw_wm_prog_key *key)
{
   struct gl_context *ctx = &brw->ctx;
   /* BRW_NEW_FRAGMENT_PROGRAM */
   const struct brw_fragment_program *fp =
      (struct brw_fragment_program *) brw->fragment_program;
   const struct gl_program *prog = (struct gl_program *) brw->fragment_program;
   GLuint lookup = 0;
   GLuint line_aa;
   bool program_uses_dfdy = fp->program.UsesDFdy;
   const bool multisample_fbo = _mesa_geometric_samples(ctx->DrawBuffer) > 1;

   memset(key, 0, sizeof(*key));

   /* Build the index for table lookup
    */
   if (brw->gen < 6) {
      /* _NEW_COLOR */
      if (fp->program.UsesKill || ctx->Color.AlphaEnabled)
	 lookup |= IZ_PS_KILL_ALPHATEST_BIT;

      if (fp->program.Base.OutputsWritten & BITFIELD64_BIT(FRAG_RESULT_DEPTH))
	 lookup |= IZ_PS_COMPUTES_DEPTH_BIT;

      /* _NEW_DEPTH */
      if (ctx->Depth.Test)
	 lookup |= IZ_DEPTH_TEST_ENABLE_BIT;

      if (ctx->Depth.Test && ctx->Depth.Mask) /* ?? */
	 lookup |= IZ_DEPTH_WRITE_ENABLE_BIT;

      /* _NEW_STENCIL | _NEW_BUFFERS */
      if (ctx->Stencil._Enabled) {
	 lookup |= IZ_STENCIL_TEST_ENABLE_BIT;

	 if (ctx->Stencil.WriteMask[0] ||
	     ctx->Stencil.WriteMask[ctx->Stencil._BackFace])
	    lookup |= IZ_STENCIL_WRITE_ENABLE_BIT;
      }
      key->iz_lookup = lookup;
   }

   line_aa = AA_NEVER;

   /* _NEW_LINE, _NEW_POLYGON, BRW_NEW_REDUCED_PRIMITIVE */
   if (ctx->Line.SmoothFlag) {
      if (brw->reduced_primitive == GL_LINES) {
	 line_aa = AA_ALWAYS;
      }
      else if (brw->reduced_primitive == GL_TRIANGLES) {
	 if (ctx->Polygon.FrontMode == GL_LINE) {
	    line_aa = AA_SOMETIMES;

	    if (ctx->Polygon.BackMode == GL_LINE ||
		(ctx->Polygon.CullFlag &&
		 ctx->Polygon.CullFaceMode == GL_BACK))
	       line_aa = AA_ALWAYS;
	 }
	 else if (ctx->Polygon.BackMode == GL_LINE) {
	    line_aa = AA_SOMETIMES;

	    if ((ctx->Polygon.CullFlag &&
		 ctx->Polygon.CullFaceMode == GL_FRONT))
	       line_aa = AA_ALWAYS;
	 }
      }
   }

   key->line_aa = line_aa;

   /* _NEW_HINT */
   key->high_quality_derivatives =
      ctx->Hint.FragmentShaderDerivative == GL_NICEST;

   if (brw->gen < 6)
      key->stats_wm = brw->stats_wm;

   /* _NEW_LIGHT */
   key->flat_shade = (ctx->Light.ShadeModel == GL_FLAT);

   /* _NEW_FRAG_CLAMP | _NEW_BUFFERS */
   key->clamp_fragment_color = ctx->Color._ClampFragmentColor;

   /* _NEW_TEXTURE */
   brw_populate_sampler_prog_key_data(ctx, prog, brw->wm.base.sampler_count,
                                      &key->tex);

   /* _NEW_BUFFERS */
   /*
    * Include the draw buffer origin and height so that we can calculate
    * fragment position values relative to the bottom left of the drawable,
    * from the incoming screen origin relative position we get as part of our
    * payload.
    *
    * This is only needed for the WM_WPOSXY opcode when the fragment program
    * uses the gl_FragCoord input.
    *
    * We could avoid recompiling by including this as a constant referenced by
    * our program, but if we were to do that it would also be nice to handle
    * getting that constant updated at batchbuffer submit time (when we
    * hold the lock and know where the buffer really is) rather than at emit
    * time when we don't hold the lock and are just guessing.  We could also
    * just avoid using this as key data if the program doesn't use
    * fragment.position.
    *
    * For DRI2 the origin_x/y will always be (0,0) but we still need the
    * drawable height in order to invert the Y axis.
    */
   if (fp->program.Base.InputsRead & VARYING_BIT_POS) {
      key->drawable_height = _mesa_geometric_height(ctx->DrawBuffer);
   }

   if ((fp->program.Base.InputsRead & VARYING_BIT_POS) || program_uses_dfdy) {
      key->render_to_fbo = _mesa_is_user_fbo(ctx->DrawBuffer);
   }

   /* _NEW_BUFFERS */
   key->nr_color_regions = ctx->DrawBuffer->_NumColorDrawBuffers;

   /* _NEW_MULTISAMPLE, _NEW_COLOR, _NEW_BUFFERS */
   key->replicate_alpha = ctx->DrawBuffer->_NumColorDrawBuffers > 1 &&
      (ctx->Multisample.SampleAlphaToCoverage || ctx->Color.AlphaEnabled);

   /* _NEW_BUFFERS _NEW_MULTISAMPLE */
   /* Ignore sample qualifier while computing this flag. */
   key->persample_shading =
      _mesa_get_min_invocations_per_fragment(ctx, &fp->program, true) > 1;
   if (key->persample_shading)
      key->persample_2x = _mesa_geometric_samples(ctx->DrawBuffer) == 2;

   key->compute_pos_offset =
      _mesa_get_min_invocations_per_fragment(ctx, &fp->program, false) > 1 &&
      fp->program.Base.SystemValuesRead & SYSTEM_BIT_SAMPLE_POS;

   key->compute_sample_id =
      multisample_fbo &&
      ctx->Multisample.Enabled &&
      (fp->program.Base.SystemValuesRead & SYSTEM_BIT_SAMPLE_ID);

   /* BRW_NEW_VUE_MAP_GEOM_OUT */
   if (brw->gen < 6 || _mesa_bitcount_64(fp->program.Base.InputsRead &
                                         BRW_FS_VARYING_INPUT_MASK) > 16)
      key->input_slots_valid = brw->vue_map_geom_out.slots_valid;


   /* _NEW_COLOR | _NEW_BUFFERS */
   /* Pre-gen6, the hardware alpha test always used each render
    * target's alpha to do alpha test, as opposed to render target 0's alpha
    * like GL requires.  Fix that by building the alpha test into the
    * shader, and we'll skip enabling the fixed function alpha test.
    */
   if (brw->gen < 6 && ctx->DrawBuffer->_NumColorDrawBuffers > 1 &&
       ctx->Color.AlphaEnabled) {
      key->alpha_test_func = ctx->Color.AlphaFunc;
      key->alpha_test_ref = ctx->Color.AlphaRef;
   }

   /* The unique fragment program ID */
   key->program_string_id = fp->id;
}
Esempio n. 5
0
static void
upload_wm_state(struct brw_context *brw)
{
   struct gl_context *ctx = &brw->ctx;
   const struct brw_fragment_program *fp =
      brw_fragment_program_const(brw->fragment_program);
   uint32_t dw2, dw4, dw5, dw6, ksp0, ksp2;

   /* _NEW_BUFFERS */
   bool multisampled_fbo = ctx->DrawBuffer->Visual.samples > 1;

   /* CACHE_NEW_WM_PROG
    *
    * We can't fold this into gen6_upload_wm_push_constants(), because
    * according to the SNB PRM, vol 2 part 1 section 7.2.2
    * (3DSTATE_CONSTANT_PS [DevSNB]):
    *
    *     "[DevSNB]: This packet must be followed by WM_STATE."
    */
   if (brw->wm.prog_data->base.nr_params == 0) {
      /* Disable the push constant buffers. */
      BEGIN_BATCH(5);
      OUT_BATCH(_3DSTATE_CONSTANT_PS << 16 | (5 - 2));
      OUT_BATCH(0);
      OUT_BATCH(0);
      OUT_BATCH(0);
      OUT_BATCH(0);
      ADVANCE_BATCH();
   } else {
      BEGIN_BATCH(5);
      OUT_BATCH(_3DSTATE_CONSTANT_PS << 16 |
		GEN6_CONSTANT_BUFFER_0_ENABLE |
		(5 - 2));
      /* Pointer to the WM constant buffer.  Covered by the set of
       * state flags from gen6_upload_wm_push_constants.
       */
      OUT_BATCH(brw->wm.base.push_const_offset +
		brw->wm.base.push_const_size - 1);
      OUT_BATCH(0);
      OUT_BATCH(0);
      OUT_BATCH(0);
      ADVANCE_BATCH();
   }

   dw2 = dw4 = dw5 = dw6 = ksp2 = 0;
   dw4 |= GEN6_WM_STATISTICS_ENABLE;
   dw5 |= GEN6_WM_LINE_AA_WIDTH_1_0;
   dw5 |= GEN6_WM_LINE_END_CAP_AA_WIDTH_0_5;

   /* Use ALT floating point mode for ARB fragment programs, because they
    * require 0^0 == 1.  Even though _CurrentFragmentProgram is used for
    * rendering, CurrentProgram[MESA_SHADER_FRAGMENT] is used for this check
    * to differentiate between the GLSL and non-GLSL cases.
    */
   if (ctx->_Shader->CurrentProgram[MESA_SHADER_FRAGMENT] == NULL)
      dw2 |= GEN6_WM_FLOATING_POINT_MODE_ALT;

   dw2 |= (ALIGN(brw->wm.base.sampler_count, 4) / 4) <<
           GEN6_WM_SAMPLER_COUNT_SHIFT;

   /* CACHE_NEW_WM_PROG */
   dw2 |= ((brw->wm.prog_data->base.binding_table.size_bytes / 4) <<
           GEN6_WM_BINDING_TABLE_ENTRY_COUNT_SHIFT);

   dw5 |= (brw->max_wm_threads - 1) << GEN6_WM_MAX_THREADS_SHIFT;

   /* CACHE_NEW_WM_PROG */

   /* In case of non 1x per sample shading, only one of SIMD8 and SIMD16
    * should be enabled. We do 'SIMD16 only' dispatch if a SIMD16 shader
    * is successfully compiled. In majority of the cases that bring us
    * better performance than 'SIMD8 only' dispatch.
    */
   int min_inv_per_frag =
      _mesa_get_min_invocations_per_fragment(ctx, brw->fragment_program, false);
   assert(min_inv_per_frag >= 1);

   if (brw->wm.prog_data->prog_offset_16) {
      dw5 |= GEN6_WM_16_DISPATCH_ENABLE;

      if (min_inv_per_frag == 1) {
         dw5 |= GEN6_WM_8_DISPATCH_ENABLE;
         dw4 |= (brw->wm.prog_data->base.dispatch_grf_start_reg <<
                 GEN6_WM_DISPATCH_START_GRF_SHIFT_0);
         dw4 |= (brw->wm.prog_data->dispatch_grf_start_reg_16 <<
                 GEN6_WM_DISPATCH_START_GRF_SHIFT_2);
         ksp0 = brw->wm.base.prog_offset;
         ksp2 = brw->wm.base.prog_offset + brw->wm.prog_data->prog_offset_16;
      } else {
         dw4 |= (brw->wm.prog_data->dispatch_grf_start_reg_16 <<
                GEN6_WM_DISPATCH_START_GRF_SHIFT_0);
         ksp0 = brw->wm.base.prog_offset + brw->wm.prog_data->prog_offset_16;
      }
   }
   else {
      dw5 |= GEN6_WM_8_DISPATCH_ENABLE;
      dw4 |= (brw->wm.prog_data->base.dispatch_grf_start_reg <<
              GEN6_WM_DISPATCH_START_GRF_SHIFT_0);
      ksp0 = brw->wm.base.prog_offset;
   }

   /* CACHE_NEW_WM_PROG | _NEW_COLOR */
   if (brw->wm.prog_data->dual_src_blend &&
       (ctx->Color.BlendEnabled & 1) &&
       ctx->Color.Blend[0]._UsesDualSrc) {
      dw5 |= GEN6_WM_DUAL_SOURCE_BLEND_ENABLE;
   }

   /* _NEW_LINE */
   if (ctx->Line.StippleFlag)
      dw5 |= GEN6_WM_LINE_STIPPLE_ENABLE;

   /* _NEW_POLYGON */
   if (ctx->Polygon.StippleFlag)
      dw5 |= GEN6_WM_POLYGON_STIPPLE_ENABLE;

   /* BRW_NEW_FRAGMENT_PROGRAM */
   if (fp->program.Base.InputsRead & VARYING_BIT_POS)
      dw5 |= GEN6_WM_USES_SOURCE_DEPTH | GEN6_WM_USES_SOURCE_W;
   if (fp->program.Base.OutputsWritten & BITFIELD64_BIT(FRAG_RESULT_DEPTH))
      dw5 |= GEN6_WM_COMPUTED_DEPTH;
   /* CACHE_NEW_WM_PROG */
   dw6 |= brw->wm.prog_data->barycentric_interp_modes <<
      GEN6_WM_BARYCENTRIC_INTERPOLATION_MODE_SHIFT;

   /* _NEW_COLOR, _NEW_MULTISAMPLE */
   if (fp->program.UsesKill || ctx->Color.AlphaEnabled ||
       ctx->Multisample.SampleAlphaToCoverage ||
       brw->wm.prog_data->uses_omask)
      dw5 |= GEN6_WM_KILL_ENABLE;

   /* _NEW_BUFFERS | _NEW_COLOR */
   if (brw_color_buffer_write_enabled(brw) ||
       dw5 & (GEN6_WM_KILL_ENABLE | GEN6_WM_COMPUTED_DEPTH)) {
      dw5 |= GEN6_WM_DISPATCH_ENABLE;
   }

   /* From the SNB PRM, volume 2 part 1, page 278:
    * "This bit is inserted in the PS payload header and made available to
    * the DataPort (either via the message header or via header bypass) to
    * indicate that oMask data (one or two phases) is included in Render
    * Target Write messages. If present, the oMask data is used to mask off
    * samples."
    */
    if(brw->wm.prog_data->uses_omask)
      dw5 |= GEN6_WM_OMASK_TO_RENDER_TARGET;

   /* CACHE_NEW_WM_PROG */
   dw6 |= brw->wm.prog_data->num_varying_inputs <<
      GEN6_WM_NUM_SF_OUTPUTS_SHIFT;
   if (multisampled_fbo) {
      /* _NEW_MULTISAMPLE */
      if (ctx->Multisample.Enabled)
         dw6 |= GEN6_WM_MSRAST_ON_PATTERN;
      else
         dw6 |= GEN6_WM_MSRAST_OFF_PIXEL;

      if (min_inv_per_frag > 1)
         dw6 |= GEN6_WM_MSDISPMODE_PERSAMPLE;
      else {
         dw6 |= GEN6_WM_MSDISPMODE_PERPIXEL;

         /* From the Sandy Bridge PRM, Vol 2 part 1, 7.7.1 ("Pixel Grouping
          * (Dispatch Size) Control"), p.334:
          *
          *     Note: in the table below, the Valid column indicates which
          *     products that combination is supported on. Combinations of
          *     dispatch enables not listed in the table are not available on
          *     any product.
          *
          *     A: Valid on all products
          *
          *     B: Not valid on [DevSNB] if 4x PERPIXEL mode with pixel shader
          *     computed depth.
          *
          *     D: Valid on all products, except when in non-1x PERSAMPLE mode
          *     (applies to [DevSNB+] only). Not valid on [DevSNB] if 4x
          *     PERPIXEL mode with pixel shader computed depth.
          *
          *     E: Not valid on [DevSNB] if 4x PERPIXEL mode with pixel shader
          *     computed depth.
          *
          *     F: Valid on all products, except not valid on [DevSNB] if 4x
          *     PERPIXEL mode with pixel shader computed depth.
          *
          * In the table that follows, the only entry with "A" in the Valid
          * column is the entry where only 8 pixel dispatch is enabled.
          * Therefore, when we are in PERPIXEL mode with pixel shader computed
          * depth, we need to disable SIMD16 dispatch.
          */
         if (dw5 & GEN6_WM_COMPUTED_DEPTH)
            dw5 &= ~GEN6_WM_16_DISPATCH_ENABLE;
      }
   } else {
      dw6 |= GEN6_WM_MSRAST_OFF_PIXEL;
      dw6 |= GEN6_WM_MSDISPMODE_PERSAMPLE;
   }

   /* From the SNB PRM, volume 2 part 1, page 281:
    * "If the PS kernel does not need the Position XY Offsets
    * to compute a Position XY value, then this field should be
    * programmed to POSOFFSET_NONE."
    *
    * "SW Recommendation: If the PS kernel needs the Position Offsets
    * to compute a Position XY value, this field should match Position
    * ZW Interpolation Mode to ensure a consistent position.xyzw
    * computation."
    * We only require XY sample offsets. So, this recommendation doesn't
    * look useful at the moment. We might need this in future.
    */
   if (brw->wm.prog_data->uses_pos_offset)
      dw6 |= GEN6_WM_POSOFFSET_SAMPLE;
   else
      dw6 |= GEN6_WM_POSOFFSET_NONE;

   BEGIN_BATCH(9);
   OUT_BATCH(_3DSTATE_WM << 16 | (9 - 2));
   OUT_BATCH(ksp0);
   OUT_BATCH(dw2);
   if (brw->wm.prog_data->total_scratch) {
      OUT_RELOC(brw->wm.base.scratch_bo,
                I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
		ffs(brw->wm.prog_data->total_scratch) - 11);
   } else {
      OUT_BATCH(0);
   }
   OUT_BATCH(dw4);
   OUT_BATCH(dw5);
   OUT_BATCH(dw6);
   OUT_BATCH(0); /* kernel 1 pointer */
   OUT_BATCH(ksp2);
   ADVANCE_BATCH();
}
Esempio n. 6
0
static void
upload_wm_state(struct brw_context *brw)
{
   struct gl_context *ctx = &brw->ctx;
   /* BRW_NEW_FRAGMENT_PROGRAM */
   const struct brw_fragment_program *fp =
      brw_fragment_program_const(brw->fragment_program);
   /* BRW_NEW_FS_PROG_DATA */
   const struct brw_wm_prog_data *prog_data = brw->wm.prog_data;
   bool writes_depth = prog_data->computed_depth_mode != BRW_PSCDEPTH_OFF;
   uint32_t dw1, dw2;

   /* _NEW_BUFFERS */
   const bool multisampled_fbo = _mesa_geometric_samples(ctx->DrawBuffer) > 1;

   dw1 = dw2 = 0;
   dw1 |= GEN7_WM_STATISTICS_ENABLE;
   dw1 |= GEN7_WM_LINE_AA_WIDTH_1_0;
   dw1 |= GEN7_WM_LINE_END_CAP_AA_WIDTH_0_5;

   /* _NEW_LINE */
   if (ctx->Line.StippleFlag)
      dw1 |= GEN7_WM_LINE_STIPPLE_ENABLE;

   /* _NEW_POLYGON */
   if (ctx->Polygon.StippleFlag)
      dw1 |= GEN7_WM_POLYGON_STIPPLE_ENABLE;

   if (fp->program.Base.InputsRead & VARYING_BIT_POS)
      dw1 |= GEN7_WM_USES_SOURCE_DEPTH | GEN7_WM_USES_SOURCE_W;

   dw1 |= prog_data->computed_depth_mode << GEN7_WM_COMPUTED_DEPTH_MODE_SHIFT;
   dw1 |= prog_data->barycentric_interp_modes <<
      GEN7_WM_BARYCENTRIC_INTERPOLATION_MODE_SHIFT;

   /* _NEW_COLOR, _NEW_MULTISAMPLE */
   /* Enable if the pixel shader kernel generates and outputs oMask.
    */
   if (prog_data->uses_kill || ctx->Color.AlphaEnabled ||
       ctx->Multisample.SampleAlphaToCoverage ||
       prog_data->uses_omask) {
      dw1 |= GEN7_WM_KILL_ENABLE;
   }

   if (_mesa_active_fragment_shader_has_atomic_ops(&brw->ctx)) {
      dw1 |= GEN7_WM_DISPATCH_ENABLE;
   }

   /* _NEW_BUFFERS | _NEW_COLOR */
   if (brw_color_buffer_write_enabled(brw) || writes_depth ||
       dw1 & GEN7_WM_KILL_ENABLE) {
      dw1 |= GEN7_WM_DISPATCH_ENABLE;
   }
   if (multisampled_fbo) {
      /* _NEW_MULTISAMPLE */
      if (ctx->Multisample.Enabled)
         dw1 |= GEN7_WM_MSRAST_ON_PATTERN;
      else
         dw1 |= GEN7_WM_MSRAST_OFF_PIXEL;

      if (_mesa_get_min_invocations_per_fragment(ctx, brw->fragment_program, false) > 1)
         dw2 |= GEN7_WM_MSDISPMODE_PERSAMPLE;
      else
         dw2 |= GEN7_WM_MSDISPMODE_PERPIXEL;
   } else {
      dw1 |= GEN7_WM_MSRAST_OFF_PIXEL;
      dw2 |= GEN7_WM_MSDISPMODE_PERSAMPLE;
   }

   if (fp->program.Base.SystemValuesRead & SYSTEM_BIT_SAMPLE_MASK_IN) {
      dw1 |= GEN7_WM_USES_INPUT_COVERAGE_MASK;
   }

   BEGIN_BATCH(3);
   OUT_BATCH(_3DSTATE_WM << 16 | (3 - 2));
   OUT_BATCH(dw1);
   OUT_BATCH(dw2);
   ADVANCE_BATCH();
}
Esempio n. 7
0
static void
gen7_upload_ps_state(struct brw_context *brw,
                     const struct gl_fragment_program *fp,
                     const struct brw_stage_state *stage_state,
                     const struct brw_wm_prog_data *prog_data,
                     bool enable_dual_src_blend, unsigned sample_mask,
                     unsigned fast_clear_op)
{
   struct gl_context *ctx = &brw->ctx;
   uint32_t dw2, dw4, dw5, ksp0, ksp2;
   const int max_threads_shift = brw->is_haswell ?
      HSW_PS_MAX_THREADS_SHIFT : IVB_PS_MAX_THREADS_SHIFT;

   dw2 = dw4 = dw5 = ksp2 = 0;

   const unsigned sampler_count =
      DIV_ROUND_UP(CLAMP(stage_state->sampler_count, 0, 16), 4);
   dw2 |= SET_FIELD(sampler_count, GEN7_PS_SAMPLER_COUNT);

   dw2 |= ((prog_data->base.binding_table.size_bytes / 4) <<
           GEN7_PS_BINDING_TABLE_ENTRY_COUNT_SHIFT);

   if (prog_data->base.use_alt_mode)
      dw2 |= GEN7_PS_FLOATING_POINT_MODE_ALT;

   /* Haswell requires the sample mask to be set in this packet as well as
    * in 3DSTATE_SAMPLE_MASK; the values should match. */
   /* _NEW_BUFFERS, _NEW_MULTISAMPLE */
   if (brw->is_haswell)
      dw4 |= SET_FIELD(sample_mask, HSW_PS_SAMPLE_MASK);

   dw4 |= (brw->max_wm_threads - 1) << max_threads_shift;

   if (prog_data->base.nr_params > 0)
      dw4 |= GEN7_PS_PUSH_CONSTANT_ENABLE;

   /* From the IVB PRM, volume 2 part 1, page 287:
    * "This bit is inserted in the PS payload header and made available to
    * the DataPort (either via the message header or via header bypass) to
    * indicate that oMask data (one or two phases) is included in Render
    * Target Write messages. If present, the oMask data is used to mask off
    * samples."
    */
   if (prog_data->uses_omask)
      dw4 |= GEN7_PS_OMASK_TO_RENDER_TARGET;

   /* From the IVB PRM, volume 2 part 1, page 287:
    * "If the PS kernel does not need the Position XY Offsets to
    * compute a Position Value, then this field should be programmed
    * to POSOFFSET_NONE."
    * "SW Recommendation: If the PS kernel needs the Position Offsets
    * to compute a Position XY value, this field should match Position
    * ZW Interpolation Mode to ensure a consistent position.xyzw
    * computation."
    * We only require XY sample offsets. So, this recommendation doesn't
    * look useful at the moment. We might need this in future.
    */
   if (prog_data->uses_pos_offset)
      dw4 |= GEN7_PS_POSOFFSET_SAMPLE;
   else
      dw4 |= GEN7_PS_POSOFFSET_NONE;

   /* The hardware wedges if you have this bit set but don't turn on any dual
    * source blend factors.
    */
   if (enable_dual_src_blend)
      dw4 |= GEN7_PS_DUAL_SOURCE_BLEND_ENABLE;

   /* BRW_NEW_FS_PROG_DATA */
   if (prog_data->num_varying_inputs != 0)
      dw4 |= GEN7_PS_ATTRIBUTE_ENABLE;

   /* In case of non 1x per sample shading, only one of SIMD8 and SIMD16
    * should be enabled. We do 'SIMD16 only' dispatch if a SIMD16 shader
    * is successfully compiled. In majority of the cases that bring us
    * better performance than 'SIMD8 only' dispatch.
    */
   int min_inv_per_frag =
      _mesa_get_min_invocations_per_fragment(ctx, fp, false);
   assert(min_inv_per_frag >= 1);

   if (prog_data->prog_offset_16 || prog_data->no_8) {
      dw4 |= GEN7_PS_16_DISPATCH_ENABLE;
      if (!prog_data->no_8 && min_inv_per_frag == 1) {
         dw4 |= GEN7_PS_8_DISPATCH_ENABLE;
         dw5 |= (prog_data->base.dispatch_grf_start_reg <<
                 GEN7_PS_DISPATCH_START_GRF_SHIFT_0);
         dw5 |= (prog_data->dispatch_grf_start_reg_16 <<
                 GEN7_PS_DISPATCH_START_GRF_SHIFT_2);
         ksp0 = stage_state->prog_offset;
         ksp2 = stage_state->prog_offset + prog_data->prog_offset_16;
      } else {
         dw5 |= (prog_data->dispatch_grf_start_reg_16 <<
                 GEN7_PS_DISPATCH_START_GRF_SHIFT_0);
         ksp0 = stage_state->prog_offset + prog_data->prog_offset_16;
      }
   }
   else {
      dw4 |= GEN7_PS_8_DISPATCH_ENABLE;
      dw5 |= (prog_data->base.dispatch_grf_start_reg <<
              GEN7_PS_DISPATCH_START_GRF_SHIFT_0);
      ksp0 = stage_state->prog_offset;
   }

   dw4 |= fast_clear_op;

   BEGIN_BATCH(8);
   OUT_BATCH(_3DSTATE_PS << 16 | (8 - 2));
   OUT_BATCH(ksp0);
   OUT_BATCH(dw2);
   if (prog_data->base.total_scratch) {
      OUT_RELOC(brw->wm.base.scratch_bo,
		I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
		ffs(prog_data->base.total_scratch) - 11);
   } else {
      OUT_BATCH(0);
   }
   OUT_BATCH(dw4);
   OUT_BATCH(dw5);
   OUT_BATCH(0); /* kernel 1 pointer */
   OUT_BATCH(ksp2);
   ADVANCE_BATCH();
}
void
gen8_upload_ps_state(struct brw_context *brw,
                     const struct gl_fragment_program *fp,
                     const struct brw_stage_state *stage_state,
                     const struct brw_wm_prog_data *prog_data,
                     uint32_t fast_clear_op)
{
   struct gl_context *ctx = &brw->ctx;
   uint32_t dw3 = 0, dw6 = 0, dw7 = 0, ksp0, ksp2 = 0;

   /* Initialize the execution mask with VMask.  Otherwise, derivatives are
    * incorrect for subspans where some of the pixels are unlit.  We believe
    * the bit just didn't take effect in previous generations.
    */
   dw3 |= GEN7_PS_VECTOR_MASK_ENABLE;

   const unsigned sampler_count =
      DIV_ROUND_UP(CLAMP(stage_state->sampler_count, 0, 16), 4);
   dw3 |= SET_FIELD(sampler_count, GEN7_PS_SAMPLER_COUNT); 

   /* BRW_NEW_FS_PROG_DATA */
   dw3 |=
      ((prog_data->base.binding_table.size_bytes / 4) <<
       GEN7_PS_BINDING_TABLE_ENTRY_COUNT_SHIFT);

   if (prog_data->base.use_alt_mode)
      dw3 |= GEN7_PS_FLOATING_POINT_MODE_ALT;

   /* 3DSTATE_PS expects the number of threads per PSD, which is always 64;
    * it implicitly scales for different GT levels (which have some # of PSDs).
    *
    * In Gen8 the format is U8-2 whereas in Gen9 it is U8-1.
    */
   if (brw->gen >= 9)
      dw6 |= (64 - 1) << HSW_PS_MAX_THREADS_SHIFT;
   else
      dw6 |= (64 - 2) << HSW_PS_MAX_THREADS_SHIFT;

   if (prog_data->base.nr_params > 0)
      dw6 |= GEN7_PS_PUSH_CONSTANT_ENABLE;

   /* From the documentation for this packet:
    * "If the PS kernel does not need the Position XY Offsets to
    *  compute a Position Value, then this field should be programmed
    *  to POSOFFSET_NONE."
    *
    * "SW Recommendation: If the PS kernel needs the Position Offsets
    *  to compute a Position XY value, this field should match Position
    *  ZW Interpolation Mode to ensure a consistent position.xyzw
    *  computation."
    *
    * We only require XY sample offsets. So, this recommendation doesn't
    * look useful at the moment. We might need this in future.
    */
   if (prog_data->uses_pos_offset)
      dw6 |= GEN7_PS_POSOFFSET_SAMPLE;
   else
      dw6 |= GEN7_PS_POSOFFSET_NONE;

   dw6 |= fast_clear_op;

   /* _NEW_MULTISAMPLE
    * In case of non 1x per sample shading, only one of SIMD8 and SIMD16
    * should be enabled. We do 'SIMD16 only' dispatch if a SIMD16 shader
    * is successfully compiled. In majority of the cases that bring us
    * better performance than 'SIMD8 only' dispatch.
    */
   int min_invocations_per_fragment =
      _mesa_get_min_invocations_per_fragment(ctx, fp, false);
   assert(min_invocations_per_fragment >= 1);

   if (prog_data->prog_offset_16 || prog_data->no_8) {
      dw6 |= GEN7_PS_16_DISPATCH_ENABLE;
      if (!prog_data->no_8 && min_invocations_per_fragment == 1) {
         dw6 |= GEN7_PS_8_DISPATCH_ENABLE;
         dw7 |= (prog_data->base.dispatch_grf_start_reg <<
                 GEN7_PS_DISPATCH_START_GRF_SHIFT_0);
         dw7 |= (prog_data->dispatch_grf_start_reg_16 <<
                 GEN7_PS_DISPATCH_START_GRF_SHIFT_2);
         ksp0 = stage_state->prog_offset;
         ksp2 = stage_state->prog_offset + prog_data->prog_offset_16;
      } else {
         dw7 |= (prog_data->dispatch_grf_start_reg_16 <<
                 GEN7_PS_DISPATCH_START_GRF_SHIFT_0);

         ksp0 = stage_state->prog_offset + prog_data->prog_offset_16;
      }
   } else {
      dw6 |= GEN7_PS_8_DISPATCH_ENABLE;
      dw7 |= (prog_data->base.dispatch_grf_start_reg <<
              GEN7_PS_DISPATCH_START_GRF_SHIFT_0);
      ksp0 = stage_state->prog_offset;
   }

   BEGIN_BATCH(12);
   OUT_BATCH(_3DSTATE_PS << 16 | (12 - 2));
   OUT_BATCH(ksp0);
   OUT_BATCH(0);
   OUT_BATCH(dw3);
   if (prog_data->base.total_scratch) {
      OUT_RELOC64(stage_state->scratch_bo,
                  I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
                  ffs(prog_data->base.total_scratch) - 11);
   } else {
      OUT_BATCH(0);
      OUT_BATCH(0);
   }
   OUT_BATCH(dw6);
   OUT_BATCH(dw7);
   OUT_BATCH(0); /* kernel 1 pointer */
   OUT_BATCH(0);
   OUT_BATCH(ksp2);
   OUT_BATCH(0);
   ADVANCE_BATCH();
}