Exemple #1
0
bool
brw_codegen_gs_prog(struct brw_context *brw,
                    struct gl_shader_program *prog,
                    struct brw_geometry_program *gp,
                    struct brw_gs_prog_key *key)
{
   struct brw_stage_state *stage_state = &brw->gs.base;
   struct brw_gs_compile c;
   memset(&c, 0, sizeof(c));
   c.key = *key;
   c.gp = gp;

   c.prog_data.include_primitive_id =
      (gp->program.Base.InputsRead & VARYING_BIT_PRIMITIVE_ID) != 0;

   c.prog_data.invocations = gp->program.Invocations;

   /* Allocate the references to the uniforms that will end up in the
    * prog_data associated with the compiled program, and which will be freed
    * by the state cache.
    *
    * Note: param_count needs to be num_uniform_components * 4, since we add
    * padding around uniform values below vec4 size, so the worst case is that
    * every uniform is a float which gets padded to the size of a vec4.
    */
   struct gl_shader *gs = prog->_LinkedShaders[MESA_SHADER_GEOMETRY];
   int param_count = gs->num_uniform_components * 4;

   /* We also upload clip plane data as uniforms */
   param_count += MAX_CLIP_PLANES * 4;

   c.prog_data.base.base.param =
      rzalloc_array(NULL, const gl_constant_value *, param_count);
   c.prog_data.base.base.pull_param =
      rzalloc_array(NULL, const gl_constant_value *, param_count);
   c.prog_data.base.base.nr_params = param_count;

   if (brw->gen >= 7) {
      if (gp->program.OutputType == GL_POINTS) {
         /* When the output type is points, the geometry shader may output data
          * to multiple streams, and EndPrimitive() has no effect.  So we
          * configure the hardware to interpret the control data as stream ID.
          */
         c.prog_data.control_data_format = GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_SID;

         /* We only have to emit control bits if we are using streams */
         if (prog->Geom.UsesStreams)
            c.control_data_bits_per_vertex = 2;
         else
            c.control_data_bits_per_vertex = 0;
      } else {
         /* When the output type is triangle_strip or line_strip, EndPrimitive()
          * may be used to terminate the current strip and start a new one
          * (similar to primitive restart), and outputting data to multiple
          * streams is not supported.  So we configure the hardware to interpret
          * the control data as EndPrimitive information (a.k.a. "cut bits").
          */
         c.prog_data.control_data_format = GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_CUT;

         /* We only need to output control data if the shader actually calls
          * EndPrimitive().
          */
         c.control_data_bits_per_vertex = gp->program.UsesEndPrimitive ? 1 : 0;
      }
   } else {
      /* There are no control data bits in gen6. */
      c.control_data_bits_per_vertex = 0;

      /* If it is using transform feedback, enable it */
      if (prog->TransformFeedback.NumVarying)
         c.prog_data.gen6_xfb_enabled = true;
      else
         c.prog_data.gen6_xfb_enabled = false;
   }
   c.control_data_header_size_bits =
      gp->program.VerticesOut * c.control_data_bits_per_vertex;

   /* 1 HWORD = 32 bytes = 256 bits */
   c.prog_data.control_data_header_size_hwords =
      ALIGN(c.control_data_header_size_bits, 256) / 256;

   GLbitfield64 outputs_written = gp->program.Base.OutputsWritten;

   /* In order for legacy clipping to work, we need to populate the clip
    * distance varying slots whenever clipping is enabled, even if the vertex
    * shader doesn't write to gl_ClipDistance.
    */
   if (c.key.base.userclip_active) {
      outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST0);
      outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST1);
   }

   brw_compute_vue_map(brw->intelScreen->devinfo,
                       &c.prog_data.base.vue_map, outputs_written);

   /* Compute the output vertex size.
    *
    * From the Ivy Bridge PRM, Vol2 Part1 7.2.1.1 STATE_GS - Output Vertex
    * Size (p168):
    *
    *     [0,62] indicating [1,63] 16B units
    *
    *     Specifies the size of each vertex stored in the GS output entry
    *     (following any Control Header data) as a number of 128-bit units
    *     (minus one).
    *
    *     Programming Restrictions: The vertex size must be programmed as a
    *     multiple of 32B units with the following exception: Rendering is
    *     disabled (as per SOL stage state) and the vertex size output by the
    *     GS thread is 16B.
    *
    *     If rendering is enabled (as per SOL state) the vertex size must be
    *     programmed as a multiple of 32B units. In other words, the only time
    *     software can program a vertex size with an odd number of 16B units
    *     is when rendering is disabled.
    *
    * Note: B=bytes in the above text.
    *
    * It doesn't seem worth the extra trouble to optimize the case where the
    * vertex size is 16B (especially since this would require special-casing
    * the GEN assembly that writes to the URB).  So we just set the vertex
    * size to a multiple of 32B (2 vec4's) in all cases.
    *
    * The maximum output vertex size is 62*16 = 992 bytes (31 hwords).  We
    * budget that as follows:
    *
    *   512 bytes for varyings (a varying component is 4 bytes and
    *             gl_MaxGeometryOutputComponents = 128)
    *    16 bytes overhead for VARYING_SLOT_PSIZ (each varying slot is 16
    *             bytes)
    *    16 bytes overhead for gl_Position (we allocate it a slot in the VUE
    *             even if it's not used)
    *    32 bytes overhead for gl_ClipDistance (we allocate it 2 VUE slots
    *             whenever clip planes are enabled, even if the shader doesn't
    *             write to gl_ClipDistance)
    *    16 bytes overhead since the VUE size must be a multiple of 32 bytes
    *             (see below)--this causes up to 1 VUE slot to be wasted
    *   400 bytes available for varying packing overhead
    *
    * Worst-case varying packing overhead is 3/4 of a varying slot (12 bytes)
    * per interpolation type, so this is plenty.
    *
    */
   unsigned output_vertex_size_bytes = c.prog_data.base.vue_map.num_slots * 16;
   assert(brw->gen == 6 ||
          output_vertex_size_bytes <= GEN7_MAX_GS_OUTPUT_VERTEX_SIZE_BYTES);
   c.prog_data.output_vertex_size_hwords =
      ALIGN(output_vertex_size_bytes, 32) / 32;

   /* Compute URB entry size.  The maximum allowed URB entry size is 32k.
    * That divides up as follows:
    *
    *     64 bytes for the control data header (cut indices or StreamID bits)
    *   4096 bytes for varyings (a varying component is 4 bytes and
    *              gl_MaxGeometryTotalOutputComponents = 1024)
    *   4096 bytes overhead for VARYING_SLOT_PSIZ (each varying slot is 16
    *              bytes/vertex and gl_MaxGeometryOutputVertices is 256)
    *   4096 bytes overhead for gl_Position (we allocate it a slot in the VUE
    *              even if it's not used)
    *   8192 bytes overhead for gl_ClipDistance (we allocate it 2 VUE slots
    *              whenever clip planes are enabled, even if the shader doesn't
    *              write to gl_ClipDistance)
    *   4096 bytes overhead since the VUE size must be a multiple of 32
    *              bytes (see above)--this causes up to 1 VUE slot to be wasted
    *   8128 bytes available for varying packing overhead
    *
    * Worst-case varying packing overhead is 3/4 of a varying slot per
    * interpolation type, which works out to 3072 bytes, so this would allow
    * us to accommodate 2 interpolation types without any danger of running
    * out of URB space.
    *
    * In practice, the risk of running out of URB space is very small, since
    * the above figures are all worst-case, and most of them scale with the
    * number of output vertices.  So we'll just calculate the amount of space
    * we need, and if it's too large, fail to compile.
    *
    * The above is for gen7+ where we have a single URB entry that will hold
    * all the output. In gen6, we will have to allocate URB entries for every
    * vertex we emit, so our URB entries only need to be large enough to hold
    * a single vertex. Also, gen6 does not have a control data header.
    */
   unsigned output_size_bytes;
   if (brw->gen >= 7) {
      output_size_bytes =
         c.prog_data.output_vertex_size_hwords * 32 * gp->program.VerticesOut;
      output_size_bytes += 32 * c.prog_data.control_data_header_size_hwords;
   } else {
      output_size_bytes = c.prog_data.output_vertex_size_hwords * 32;
   }

   /* Broadwell stores "Vertex Count" as a full 8 DWord (32 byte) URB output,
    * which comes before the control header.
    */
   if (brw->gen >= 8)
      output_size_bytes += 32;

   assert(output_size_bytes >= 1);
   int max_output_size_bytes = GEN7_MAX_GS_URB_ENTRY_SIZE_BYTES;
   if (brw->gen == 6)
      max_output_size_bytes = GEN6_MAX_GS_URB_ENTRY_SIZE_BYTES;
   if (output_size_bytes > max_output_size_bytes)
      return false;


   /* URB entry sizes are stored as a multiple of 64 bytes in gen7+ and
    * a multiple of 128 bytes in gen6.
    */
   if (brw->gen >= 7)
      c.prog_data.base.urb_entry_size = ALIGN(output_size_bytes, 64) / 64;
   else
      c.prog_data.base.urb_entry_size = ALIGN(output_size_bytes, 128) / 128;

   c.prog_data.output_topology =
      get_hw_prim_for_gl_prim(gp->program.OutputType);

   brw_compute_vue_map(brw->intelScreen->devinfo,
                       &c.input_vue_map, c.key.input_varyings);

   /* GS inputs are read from the VUE 256 bits (2 vec4's) at a time, so we
    * need to program a URB read length of ceiling(num_slots / 2).
    */
   c.prog_data.base.urb_read_length = (c.input_vue_map.num_slots + 1) / 2;

   void *mem_ctx = ralloc_context(NULL);
   unsigned program_size;
   const unsigned *program =
      brw_gs_emit(brw, prog, &c, mem_ctx, &program_size);
   if (program == NULL) {
      ralloc_free(mem_ctx);
      return false;
   }

   /* Scratch space is used for register spilling */
   if (c.base.last_scratch) {
      perf_debug("Geometry shader triggered register spilling.  "
                 "Try reducing the number of live vec4 values to "
                 "improve performance.\n");

      c.prog_data.base.base.total_scratch
         = brw_get_scratch_size(c.base.last_scratch*REG_SIZE);

      brw_get_scratch_bo(brw, &stage_state->scratch_bo,
			 c.prog_data.base.base.total_scratch *
                         brw->max_gs_threads);
   }

   brw_upload_cache(&brw->cache, BRW_CACHE_GS_PROG,
                    &c.key, sizeof(c.key),
                    program, program_size,
                    &c.prog_data, sizeof(c.prog_data),
                    &stage_state->prog_offset, &brw->gs.prog_data);
   ralloc_free(mem_ctx);

   return true;
}
Exemple #2
0
bool
brw_codegen_vs_prog(struct brw_context *brw,
                    struct gl_shader_program *prog,
                    struct brw_vertex_program *vp,
                    struct brw_vs_prog_key *key)
{
   const struct brw_compiler *compiler = brw->intelScreen->compiler;
   GLuint program_size;
   const GLuint *program;
   struct brw_vs_prog_data prog_data;
   struct brw_stage_prog_data *stage_prog_data = &prog_data.base.base;
   void *mem_ctx;
   int i;
   struct brw_shader *vs = NULL;
   bool start_busy = false;
   double start_time = 0;

   if (prog)
      vs = (struct brw_shader *) prog->_LinkedShaders[MESA_SHADER_VERTEX];

   memset(&prog_data, 0, sizeof(prog_data));

   /* Use ALT floating point mode for ARB programs so that 0^0 == 1. */
   if (!prog)
      stage_prog_data->use_alt_mode = true;

   mem_ctx = ralloc_context(NULL);

   brw_assign_common_binding_table_offsets(MESA_SHADER_VERTEX,
                                           brw->intelScreen->devinfo,
                                           prog, &vp->program.Base,
                                           &prog_data.base.base, 0);

   /* Allocate the references to the uniforms that will end up in the
    * prog_data associated with the compiled program, and which will be freed
    * by the state cache.
    */
   int param_count = vp->program.Base.nir->num_uniforms / 4;

   if (vs)
      prog_data.base.base.nr_image_params = vs->base.NumImages;

   /* vec4_visitor::setup_uniform_clipplane_values() also uploads user clip
    * planes as uniforms.
    */
   param_count += key->nr_userclip_plane_consts * 4;

   stage_prog_data->param =
      rzalloc_array(NULL, const gl_constant_value *, param_count);
   stage_prog_data->pull_param =
      rzalloc_array(NULL, const gl_constant_value *, param_count);
   stage_prog_data->image_param =
      rzalloc_array(NULL, struct brw_image_param,
                    stage_prog_data->nr_image_params);
   stage_prog_data->nr_params = param_count;

   if (prog) {
      brw_nir_setup_glsl_uniforms(vp->program.Base.nir, prog, &vp->program.Base,
                                  &prog_data.base.base,
                                  compiler->scalar_stage[MESA_SHADER_VERTEX]);
   } else {
      brw_nir_setup_arb_uniforms(vp->program.Base.nir, &vp->program.Base,
                                 &prog_data.base.base);
   }

   GLbitfield64 outputs_written = vp->program.Base.OutputsWritten;
   prog_data.inputs_read = vp->program.Base.InputsRead;

   if (key->copy_edgeflag) {
      outputs_written |= BITFIELD64_BIT(VARYING_SLOT_EDGE);
      prog_data.inputs_read |= VERT_BIT_EDGEFLAG;
   }

   prog_data.base.cull_distance_mask =
      ((1 << vp->program.Base.CullDistanceArraySize) - 1) <<
      vp->program.Base.ClipDistanceArraySize;

   if (brw->gen < 6) {
      /* Put dummy slots into the VUE for the SF to put the replaced
       * point sprite coords in.  We shouldn't need these dummy slots,
       * which take up precious URB space, but it would mean that the SF
       * doesn't get nice aligned pairs of input coords into output
       * coords, which would be a pain to handle.
       */
      for (i = 0; i < 8; i++) {
         if (key->point_coord_replace & (1 << i))
            outputs_written |= BITFIELD64_BIT(VARYING_SLOT_TEX0 + i);
      }

      /* if back colors are written, allocate slots for front colors too */
      if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC0))
         outputs_written |= BITFIELD64_BIT(VARYING_SLOT_COL0);
      if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC1))
         outputs_written |= BITFIELD64_BIT(VARYING_SLOT_COL1);
   }

   /* In order for legacy clipping to work, we need to populate the clip
    * distance varying slots whenever clipping is enabled, even if the vertex
    * shader doesn't write to gl_ClipDistance.
    */
   if (key->nr_userclip_plane_consts > 0) {
      outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST0);
      outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST1);
   }

   brw_compute_vue_map(brw->intelScreen->devinfo,
                       &prog_data.base.vue_map, outputs_written,
                       prog ? prog->SeparateShader ||
                              prog->_LinkedShaders[MESA_SHADER_TESS_EVAL]
                            : false);

   if (0) {
      _mesa_fprint_program_opt(stderr, &vp->program.Base, PROG_PRINT_DEBUG,
			       true);
   }

   if (unlikely(brw->perf_debug)) {
      start_busy = (brw->batch.last_bo &&
                    drm_intel_bo_busy(brw->batch.last_bo));
      start_time = get_time();
   }

   if (unlikely(INTEL_DEBUG & DEBUG_VS)) {
      brw_dump_ir("vertex", prog, vs ? &vs->base : NULL, &vp->program.Base);

      fprintf(stderr, "VS Output ");
      brw_print_vue_map(stderr, &prog_data.base.vue_map);
   }

   int st_index = -1;
   if (INTEL_DEBUG & DEBUG_SHADER_TIME)
      st_index = brw_get_shader_time_index(brw, prog, &vp->program.Base, ST_VS);

   /* Emit GEN4 code.
    */
   char *error_str;
   program = brw_compile_vs(compiler, brw, mem_ctx, key,
                            &prog_data, vp->program.Base.nir,
                            brw_select_clip_planes(&brw->ctx),
                            !_mesa_is_gles3(&brw->ctx),
                            st_index, &program_size, &error_str);
   if (program == NULL) {
      if (prog) {
         prog->LinkStatus = false;
         ralloc_strcat(&prog->InfoLog, error_str);
      }

      _mesa_problem(NULL, "Failed to compile vertex shader: %s\n", error_str);

      ralloc_free(mem_ctx);
      return false;
   }

   if (unlikely(brw->perf_debug) && vs) {
      if (vs->compiled_once) {
         brw_vs_debug_recompile(brw, prog, key);
      }
      if (start_busy && !drm_intel_bo_busy(brw->batch.last_bo)) {
         perf_debug("VS compile took %.03f ms and stalled the GPU\n",
                    (get_time() - start_time) * 1000);
      }
      vs->compiled_once = true;
   }

   /* Scratch space is used for register spilling */
   if (prog_data.base.base.total_scratch) {
      brw_get_scratch_bo(brw, &brw->vs.base.scratch_bo,
			 prog_data.base.base.total_scratch *
                         brw->max_vs_threads);
   }

   brw_upload_cache(&brw->cache, BRW_CACHE_VS_PROG,
		    key, sizeof(struct brw_vs_prog_key),
		    program, program_size,
		    &prog_data, sizeof(prog_data),
		    &brw->vs.base.prog_offset, &brw->vs.prog_data);
   ralloc_free(mem_ctx);

   return true;
}
Exemple #3
0
bool
brw_codegen_gs_prog(struct brw_context *brw,
                    struct gl_shader_program *prog,
                    struct brw_geometry_program *gp,
                    struct brw_gs_prog_key *key)
{
   struct brw_compiler *compiler = brw->intelScreen->compiler;
   struct gl_shader *shader = prog->_LinkedShaders[MESA_SHADER_GEOMETRY];
   struct brw_stage_state *stage_state = &brw->gs.base;
   struct brw_gs_prog_data prog_data;
   bool start_busy = false;
   double start_time = 0;

   memset(&prog_data, 0, sizeof(prog_data));

   assign_gs_binding_table_offsets(brw->intelScreen->devinfo, prog,
                                   &gp->program.Base, &prog_data);

   /* Allocate the references to the uniforms that will end up in the
    * prog_data associated with the compiled program, and which will be freed
    * by the state cache.
    *
    * Note: param_count needs to be num_uniform_components * 4, since we add
    * padding around uniform values below vec4 size, so the worst case is that
    * every uniform is a float which gets padded to the size of a vec4.
    */
   struct gl_shader *gs = prog->_LinkedShaders[MESA_SHADER_GEOMETRY];
   struct brw_shader *bgs = (struct brw_shader *) gs;
   int param_count = gp->program.Base.nir->num_uniforms;
   if (!compiler->scalar_stage[MESA_SHADER_GEOMETRY])
      param_count *= 4;

   prog_data.base.base.param =
      rzalloc_array(NULL, const gl_constant_value *, param_count);
   prog_data.base.base.pull_param =
      rzalloc_array(NULL, const gl_constant_value *, param_count);
   prog_data.base.base.image_param =
      rzalloc_array(NULL, struct brw_image_param, gs->NumImages);
   prog_data.base.base.nr_params = param_count;
   prog_data.base.base.nr_image_params = gs->NumImages;

   brw_nir_setup_glsl_uniforms(gp->program.Base.nir, prog, &gp->program.Base,
                               &prog_data.base.base,
                               compiler->scalar_stage[MESA_SHADER_GEOMETRY]);

   GLbitfield64 outputs_written = gp->program.Base.OutputsWritten;

   brw_compute_vue_map(brw->intelScreen->devinfo,
                       &prog_data.base.vue_map, outputs_written,
                       prog ? prog->SeparateShader : false);

   if (unlikely(INTEL_DEBUG & DEBUG_GS))
      brw_dump_ir("geometry", prog, gs, NULL);

   int st_index = -1;
   if (INTEL_DEBUG & DEBUG_SHADER_TIME)
      st_index = brw_get_shader_time_index(brw, prog, NULL, ST_GS);

   if (unlikely(brw->perf_debug)) {
      start_busy = brw->batch.last_bo && drm_intel_bo_busy(brw->batch.last_bo);
      start_time = get_time();
   }

   void *mem_ctx = ralloc_context(NULL);
   unsigned program_size;
   char *error_str;
   const unsigned *program =
      brw_compile_gs(brw->intelScreen->compiler, brw, mem_ctx, key,
                     &prog_data, shader->Program->nir, prog,
                     st_index, &program_size, &error_str);
   if (program == NULL) {
      ralloc_free(mem_ctx);
      return false;
   }

   if (unlikely(brw->perf_debug)) {
      if (bgs->compiled_once) {
         brw_gs_debug_recompile(brw, prog, key);
      }
      if (start_busy && !drm_intel_bo_busy(brw->batch.last_bo)) {
         perf_debug("GS compile took %.03f ms and stalled the GPU\n",
                    (get_time() - start_time) * 1000);
      }
      bgs->compiled_once = true;
   }

   /* Scratch space is used for register spilling */
   if (prog_data.base.base.total_scratch) {
      brw_get_scratch_bo(brw, &stage_state->scratch_bo,
			 prog_data.base.base.total_scratch *
                         brw->max_gs_threads);
   }

   brw_upload_cache(&brw->cache, BRW_CACHE_GS_PROG,
                    key, sizeof(*key),
                    program, program_size,
                    &prog_data, sizeof(prog_data),
                    &stage_state->prog_offset, &brw->gs.prog_data);
   ralloc_free(mem_ctx);

   return true;
}
Exemple #4
0
static bool
brw_vs_do_compile(struct brw_context *brw,
	          struct brw_vs_compile *c)
{
   struct brw_stage_prog_data *stage_prog_data = &c->prog_data.base.base;
   struct gl_shader *vs = NULL;
   int i;

   if (c->base.shader_prog)
      vs = c->base.shader_prog->_LinkedShaders[MESA_SHADER_VERTEX];

   /* Allocate the references to the uniforms that will end up in the
    * prog_data associated with the compiled program, and which will be freed
    * by the state cache.
    */
   int param_count;
   if (vs) {
      /* We add padding around uniform values below vec4 size, with the worst
       * case being a float value that gets blown up to a vec4, so be
       * conservative here.
       */
      param_count = vs->num_uniform_components * 4;

   } else {
      param_count = c->vp->program.Base.Parameters->NumParameters * 4;
   }
   /* vec4_visitor::setup_uniform_clipplane_values() also uploads user clip
    * planes as uniforms.
    */
   param_count += c->key.base.nr_userclip_plane_consts * 4;

   stage_prog_data->param = rzalloc_array(NULL, const float *, param_count);
   stage_prog_data->pull_param = rzalloc_array(NULL, const float *, param_count);

   /* Setting nr_params here NOT to the size of the param and pull_param
    * arrays, but to the number of uniform components vec4_visitor
    * needs. vec4_visitor::setup_uniforms() will set it back to a proper value.
    */
   stage_prog_data->nr_params = ALIGN(param_count, 4) / 4;
   if (vs) {
      stage_prog_data->nr_params += vs->num_samplers;
   }

   GLbitfield64 outputs_written = c->vp->program.Base.OutputsWritten;
   c->prog_data.inputs_read = c->vp->program.Base.InputsRead;

   if (c->key.copy_edgeflag) {
      outputs_written |= BITFIELD64_BIT(VARYING_SLOT_EDGE);
      c->prog_data.inputs_read |= VERT_BIT_EDGEFLAG;
   }

   if (brw->gen < 6) {
      /* Put dummy slots into the VUE for the SF to put the replaced
       * point sprite coords in.  We shouldn't need these dummy slots,
       * which take up precious URB space, but it would mean that the SF
       * doesn't get nice aligned pairs of input coords into output
       * coords, which would be a pain to handle.
       */
      for (i = 0; i < 8; i++) {
         if (c->key.point_coord_replace & (1 << i))
            outputs_written |= BITFIELD64_BIT(VARYING_SLOT_TEX0 + i);
      }

      /* if back colors are written, allocate slots for front colors too */
      if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC0))
         outputs_written |= BITFIELD64_BIT(VARYING_SLOT_COL0);
      if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC1))
         outputs_written |= BITFIELD64_BIT(VARYING_SLOT_COL1);
   }

   /* In order for legacy clipping to work, we need to populate the clip
    * distance varying slots whenever clipping is enabled, even if the vertex
    * shader doesn't write to gl_ClipDistance.
    */
   if (c->key.base.userclip_active) {
      outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST0);
      outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST1);
   }

   brw_compute_vue_map(brw, &c->prog_data.base.vue_map, outputs_written);

   if (0) {
      _mesa_fprint_program_opt(stderr, &c->vp->program.Base, PROG_PRINT_DEBUG,
			       true);
   }

   /* Emit GEN4 code.
    */
   c->base.program = brw_vs_emit(brw, c->base.shader_prog, c,
         &c->prog_data, c->base.mem_ctx, &c->base.program_size);
   if (c->base.program == NULL)
      return false;

   if (c->base.last_scratch) {
      c->prog_data.base.total_scratch
         = brw_get_scratch_size(c->base.last_scratch*REG_SIZE);
   }

   return true;
}
Exemple #5
0
static bool
do_vs_prog(struct brw_context *brw,
	   struct gl_shader_program *prog,
	   struct brw_vertex_program *vp,
	   struct brw_vs_prog_key *key)
{
   GLuint program_size;
   const GLuint *program;
   struct brw_vs_compile c;
   struct brw_vs_prog_data prog_data;
   void *mem_ctx;
   int i;
   struct gl_shader *vs = NULL;

   if (prog)
      vs = prog->_LinkedShaders[MESA_SHADER_VERTEX];

   memset(&c, 0, sizeof(c));
   memcpy(&c.key, key, sizeof(*key));
   memset(&prog_data, 0, sizeof(prog_data));

   mem_ctx = ralloc_context(NULL);

   c.vp = vp;

   /* Allocate the references to the uniforms that will end up in the
    * prog_data associated with the compiled program, and which will be freed
    * by the state cache.
    */
   int param_count;
   if (vs) {
      /* We add padding around uniform values below vec4 size, with the worst
       * case being a float value that gets blown up to a vec4, so be
       * conservative here.
       */
      param_count = vs->num_uniform_components * 4;

   } else {
      param_count = vp->program.Base.Parameters->NumParameters * 4;
   }
   /* We also upload clip plane data as uniforms */
   param_count += MAX_CLIP_PLANES * 4;

   prog_data.base.param = rzalloc_array(NULL, const float *, param_count);
   prog_data.base.pull_param = rzalloc_array(NULL, const float *, param_count);

   GLbitfield64 outputs_written = vp->program.Base.OutputsWritten;
   prog_data.inputs_read = vp->program.Base.InputsRead;

   if (c.key.copy_edgeflag) {
      outputs_written |= BITFIELD64_BIT(VARYING_SLOT_EDGE);
      prog_data.inputs_read |= VERT_BIT_EDGEFLAG;
   }

   if (brw->gen < 6) {
      /* Put dummy slots into the VUE for the SF to put the replaced
       * point sprite coords in.  We shouldn't need these dummy slots,
       * which take up precious URB space, but it would mean that the SF
       * doesn't get nice aligned pairs of input coords into output
       * coords, which would be a pain to handle.
       */
      for (i = 0; i < 8; i++) {
         if (c.key.point_coord_replace & (1 << i))
            outputs_written |= BITFIELD64_BIT(VARYING_SLOT_TEX0 + i);
      }

      /* if back colors are written, allocate slots for front colors too */
      if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC0))
         outputs_written |= BITFIELD64_BIT(VARYING_SLOT_COL0);
      if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC1))
         outputs_written |= BITFIELD64_BIT(VARYING_SLOT_COL1);
   }

   brw_compute_vue_map(brw, &prog_data.base.vue_map, outputs_written,
                       c.key.base.userclip_active);

   if (0) {
      _mesa_fprint_program_opt(stdout, &c.vp->program.Base, PROG_PRINT_DEBUG,
			       true);
   }

   /* Emit GEN4 code.
    */
   program = brw_vs_emit(brw, prog, &c, &prog_data, mem_ctx, &program_size);
   if (program == NULL) {
      ralloc_free(mem_ctx);
      return false;
   }

   if (prog_data.base.nr_pull_params)
      prog_data.base.num_surfaces = 1;
   if (c.vp->program.Base.SamplersUsed)
      prog_data.base.num_surfaces = SURF_INDEX_VS_TEXTURE(BRW_MAX_TEX_UNIT);
   if (prog &&
       prog->_LinkedShaders[MESA_SHADER_VERTEX]->NumUniformBlocks) {
      prog_data.base.num_surfaces =
	 SURF_INDEX_VS_UBO(prog->_LinkedShaders[MESA_SHADER_VERTEX]->NumUniformBlocks);
   }

   /* Scratch space is used for register spilling */
   if (c.base.last_scratch) {
      perf_debug("Vertex shader triggered register spilling.  "
                 "Try reducing the number of live vec4 values to "
                 "improve performance.\n");

      prog_data.base.total_scratch
         = brw_get_scratch_size(c.base.last_scratch*REG_SIZE);

      brw_get_scratch_bo(brw, &brw->vs.scratch_bo,
			 prog_data.base.total_scratch * brw->max_vs_threads);
   }

   brw_upload_cache(&brw->cache, BRW_VS_PROG,
		    &c.key, sizeof(c.key),
		    program, program_size,
		    &prog_data, sizeof(prog_data),
		    &brw->vs.prog_offset, &brw->vs.prog_data);
   ralloc_free(mem_ctx);

   return true;
}
Exemple #6
0
static bool
do_vs_prog(struct brw_context *brw,
	   struct gl_shader_program *prog,
	   struct brw_vertex_program *vp,
	   struct brw_vs_prog_key *key)
{
   struct gl_context *ctx = &brw->intel.ctx;
   struct intel_context *intel = &brw->intel;
   GLuint program_size;
   const GLuint *program;
   struct brw_vs_compile c;
   void *mem_ctx;
   int aux_size;
   int i;
   struct gl_shader *vs = NULL;

   if (prog)
      vs = prog->_LinkedShaders[MESA_SHADER_VERTEX];

   memset(&c, 0, sizeof(c));
   memcpy(&c.key, key, sizeof(*key));

   mem_ctx = ralloc_context(NULL);

   brw_init_compile(brw, &c.func, mem_ctx);
   c.vp = vp;

   /* Allocate the references to the uniforms that will end up in the
    * prog_data associated with the compiled program, and which will be freed
    * by the state cache.
    */
   int param_count;
   if (vs) {
      /* We add padding around uniform values below vec4 size, with the worst
       * case being a float value that gets blown up to a vec4, so be
       * conservative here.
       */
      param_count = vs->num_uniform_components * 4;

      /* We also upload clip plane data as uniforms */
      param_count += MAX_CLIP_PLANES * 4;
   } else {
      param_count = vp->program.Base.Parameters->NumParameters * 4;
   }
   c.prog_data.param = rzalloc_array(NULL, const float *, param_count);
   c.prog_data.pull_param = rzalloc_array(NULL, const float *, param_count);

   c.prog_data.outputs_written = vp->program.Base.OutputsWritten;
   c.prog_data.inputs_read = vp->program.Base.InputsRead;

   if (c.key.copy_edgeflag) {
      c.prog_data.outputs_written |= BITFIELD64_BIT(VERT_RESULT_EDGE);
      c.prog_data.inputs_read |= VERT_BIT_EDGEFLAG;
   }

   /* Put dummy slots into the VUE for the SF to put the replaced
    * point sprite coords in.  We shouldn't need these dummy slots,
    * which take up precious URB space, but it would mean that the SF
    * doesn't get nice aligned pairs of input coords into output
    * coords, which would be a pain to handle.
    */
   for (i = 0; i < 8; i++) {
      if (c.key.point_coord_replace & (1 << i))
	 c.prog_data.outputs_written |= BITFIELD64_BIT(VERT_RESULT_TEX0 + i);
   }

   brw_compute_vue_map(&c);

   if (0) {
      _mesa_fprint_program_opt(stdout, &c.vp->program.Base, PROG_PRINT_DEBUG,
			       true);
   }

   /* Emit GEN4 code.
    */
   if (prog) {
      if (!brw_vs_emit(prog, &c)) {
	 ralloc_free(mem_ctx);
	 return false;
      }
   } else {
      brw_old_vs_emit(&c);
   }

   if (c.prog_data.nr_pull_params)
      c.prog_data.num_surfaces = 1;
   if (c.vp->program.Base.SamplersUsed)
      c.prog_data.num_surfaces = SURF_INDEX_VS_TEXTURE(BRW_MAX_TEX_UNIT);
   if (prog &&
       prog->_LinkedShaders[MESA_SHADER_VERTEX]->NumUniformBlocks) {
      c.prog_data.num_surfaces =
	 SURF_INDEX_VS_UBO(prog->_LinkedShaders[MESA_SHADER_VERTEX]->NumUniformBlocks);
   }

   /* Scratch space is used for register spilling */
   if (c.last_scratch) {
      perf_debug("Vertex shader triggered register spilling.  "
                 "Try reducing the number of live vec4 values to "
                 "improve performance.\n");

      c.prog_data.total_scratch = brw_get_scratch_size(c.last_scratch);

      brw_get_scratch_bo(intel, &brw->vs.scratch_bo,
			 c.prog_data.total_scratch * brw->max_vs_threads);
   }

   /* get the program
    */
   program = brw_get_program(&c.func, &program_size);

   /* We upload from &c.prog_data including the constant_map assuming
    * they're packed together.  It would be nice to have a
    * compile-time assert macro here.
    */
   assert(c.constant_map == (int8_t *)&c.prog_data +
	  sizeof(c.prog_data));
   assert(ctx->Const.VertexProgram.MaxNativeParameters ==
	  ARRAY_SIZE(c.constant_map));
   (void) ctx;

   aux_size = sizeof(c.prog_data);
   /* constant_map */
   aux_size += c.vp->program.Base.Parameters->NumParameters;

   brw_upload_cache(&brw->cache, BRW_VS_PROG,
		    &c.key, sizeof(c.key),
		    program, program_size,
		    &c.prog_data, aux_size,
		    &brw->vs.prog_offset, &brw->vs.prog_data);
   ralloc_free(mem_ctx);

   return true;
}
Exemple #7
0
static bool
brw_codegen_gs_prog(struct brw_context *brw,
                    struct brw_program *gp,
                    struct brw_gs_prog_key *key)
{
   struct brw_compiler *compiler = brw->screen->compiler;
   const struct gen_device_info *devinfo = &brw->screen->devinfo;
   struct brw_stage_state *stage_state = &brw->gs.base;
   struct brw_gs_prog_data prog_data;
   bool start_busy = false;
   double start_time = 0;

   memset(&prog_data, 0, sizeof(prog_data));

   assign_gs_binding_table_offsets(devinfo, &gp->program, &prog_data);

   /* Allocate the references to the uniforms that will end up in the
    * prog_data associated with the compiled program, and which will be freed
    * by the state cache.
    *
    * Note: param_count needs to be num_uniform_components * 4, since we add
    * padding around uniform values below vec4 size, so the worst case is that
    * every uniform is a float which gets padded to the size of a vec4.
    */
   int param_count = gp->program.nir->num_uniforms / 4;

   prog_data.base.base.param =
      rzalloc_array(NULL, const gl_constant_value *, param_count);
   prog_data.base.base.pull_param =
      rzalloc_array(NULL, const gl_constant_value *, param_count);
   prog_data.base.base.image_param =
      rzalloc_array(NULL, struct brw_image_param,
                    gp->program.info.num_images);
   prog_data.base.base.nr_params = param_count;
   prog_data.base.base.nr_image_params = gp->program.info.num_images;

   brw_nir_setup_glsl_uniforms(gp->program.nir, &gp->program,
                               &prog_data.base.base,
                               compiler->scalar_stage[MESA_SHADER_GEOMETRY]);
   brw_nir_analyze_ubo_ranges(compiler, gp->program.nir,
                              prog_data.base.base.ubo_ranges);

   uint64_t outputs_written = gp->program.info.outputs_written;

   brw_compute_vue_map(devinfo,
                       &prog_data.base.vue_map, outputs_written,
                       gp->program.info.separate_shader);

   int st_index = -1;
   if (INTEL_DEBUG & DEBUG_SHADER_TIME)
      st_index = brw_get_shader_time_index(brw, &gp->program, ST_GS, true);

   if (unlikely(brw->perf_debug)) {
      start_busy = brw->batch.last_bo && brw_bo_busy(brw->batch.last_bo);
      start_time = get_time();
   }

   void *mem_ctx = ralloc_context(NULL);
   unsigned program_size;
   char *error_str;
   const unsigned *program =
      brw_compile_gs(brw->screen->compiler, brw, mem_ctx, key,
                     &prog_data, gp->program.nir, &gp->program,
                     st_index, &program_size, &error_str);
   if (program == NULL) {
      ralloc_strcat(&gp->program.sh.data->InfoLog, error_str);
      _mesa_problem(NULL, "Failed to compile geometry shader: %s\n", error_str);

      ralloc_free(mem_ctx);
      return false;
   }

   if (unlikely(brw->perf_debug)) {
      if (gp->compiled_once) {
         brw_gs_debug_recompile(brw, &gp->program, key);
      }
      if (start_busy && !brw_bo_busy(brw->batch.last_bo)) {
         perf_debug("GS compile took %.03f ms and stalled the GPU\n",
                    (get_time() - start_time) * 1000);
      }
      gp->compiled_once = true;
   }

   /* Scratch space is used for register spilling */
   brw_alloc_stage_scratch(brw, stage_state,
                           prog_data.base.base.total_scratch,
                           devinfo->max_gs_threads);

   brw_upload_cache(&brw->cache, BRW_CACHE_GS_PROG,
                    key, sizeof(*key),
                    program, program_size,
                    &prog_data, sizeof(prog_data),
                    &stage_state->prog_offset, &brw->gs.base.prog_data);
   ralloc_free(mem_ctx);

   return true;
}
Exemple #8
0
static bool
do_vs_prog(struct brw_context *brw,
	   struct gl_shader_program *prog,
	   struct brw_vertex_program *vp,
	   struct brw_vs_prog_key *key)
{
   GLuint program_size;
   const GLuint *program;
   struct brw_vs_compile c;
   struct brw_vs_prog_data prog_data;
   struct brw_stage_prog_data *stage_prog_data = &prog_data.base.base;
   void *mem_ctx;
   int i;
   struct gl_shader *vs = NULL;

   if (prog)
      vs = prog->_LinkedShaders[MESA_SHADER_VERTEX];

   memset(&c, 0, sizeof(c));
   memcpy(&c.key, key, sizeof(*key));
   memset(&prog_data, 0, sizeof(prog_data));

   mem_ctx = ralloc_context(NULL);

   c.vp = vp;

   /* Allocate the references to the uniforms that will end up in the
    * prog_data associated with the compiled program, and which will be freed
    * by the state cache.
    */
   int param_count;
   if (vs) {
      /* We add padding around uniform values below vec4 size, with the worst
       * case being a float value that gets blown up to a vec4, so be
       * conservative here.
       */
      param_count = vs->num_uniform_components * 4;

   } else {
      param_count = vp->program.Base.Parameters->NumParameters * 4;
   }
   /* vec4_visitor::setup_uniform_clipplane_values() also uploads user clip
    * planes as uniforms.
    */
   param_count += c.key.base.nr_userclip_plane_consts * 4;

   stage_prog_data->param = rzalloc_array(NULL, const float *, param_count);
   stage_prog_data->pull_param = rzalloc_array(NULL, const float *, param_count);

   /* Setting nr_params here NOT to the size of the param and pull_param
    * arrays, but to the number of uniform components vec4_visitor
    * needs. vec4_visitor::setup_uniforms() will set it back to a proper value.
    */
   stage_prog_data->nr_params = ALIGN(param_count, 4) / 4;
   if (vs) {
      stage_prog_data->nr_params += vs->num_samplers;
   }

   GLbitfield64 outputs_written = vp->program.Base.OutputsWritten;
   prog_data.inputs_read = vp->program.Base.InputsRead;

   if (c.key.copy_edgeflag) {
      outputs_written |= BITFIELD64_BIT(VARYING_SLOT_EDGE);
      prog_data.inputs_read |= VERT_BIT_EDGEFLAG;
   }

   if (brw->gen < 6) {
      /* Put dummy slots into the VUE for the SF to put the replaced
       * point sprite coords in.  We shouldn't need these dummy slots,
       * which take up precious URB space, but it would mean that the SF
       * doesn't get nice aligned pairs of input coords into output
       * coords, which would be a pain to handle.
       */
      for (i = 0; i < 8; i++) {
         if (c.key.point_coord_replace & (1 << i))
            outputs_written |= BITFIELD64_BIT(VARYING_SLOT_TEX0 + i);
      }

      /* if back colors are written, allocate slots for front colors too */
      if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC0))
         outputs_written |= BITFIELD64_BIT(VARYING_SLOT_COL0);
      if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC1))
         outputs_written |= BITFIELD64_BIT(VARYING_SLOT_COL1);
   }

   /* In order for legacy clipping to work, we need to populate the clip
    * distance varying slots whenever clipping is enabled, even if the vertex
    * shader doesn't write to gl_ClipDistance.
    */
   if (c.key.base.userclip_active) {
      outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST0);
      outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST1);
   }

   brw_compute_vue_map(brw, &prog_data.base.vue_map, outputs_written);

   if (0) {
      _mesa_fprint_program_opt(stderr, &c.vp->program.Base, PROG_PRINT_DEBUG,
			       true);
   }

   /* Emit GEN4 code.
    */
   program = brw_vs_emit(brw, prog, &c, &prog_data, mem_ctx, &program_size);
   if (program == NULL) {
      ralloc_free(mem_ctx);
      return false;
   }

   /* Scratch space is used for register spilling */
   if (c.base.last_scratch) {
      perf_debug("Vertex shader triggered register spilling.  "
                 "Try reducing the number of live vec4 values to "
                 "improve performance.\n");

      prog_data.base.total_scratch
         = brw_get_scratch_size(c.base.last_scratch*REG_SIZE);

      brw_get_scratch_bo(brw, &brw->vs.base.scratch_bo,
			 prog_data.base.total_scratch * brw->max_vs_threads);
   }

   brw_upload_cache(&brw->cache, BRW_VS_PROG,
		    &c.key, sizeof(c.key),
		    program, program_size,
		    &prog_data, sizeof(prog_data),
		    &brw->vs.base.prog_offset, &brw->vs.prog_data);
   ralloc_free(mem_ctx);

   return true;
}
Exemple #9
0
extern "C" const unsigned *
brw_compile_tcs(const struct brw_compiler *compiler,
                void *log_data,
                void *mem_ctx,
                const struct brw_tcs_prog_key *key,
                struct brw_tcs_prog_data *prog_data,
                const nir_shader *src_shader,
                int shader_time_index,
                unsigned *final_assembly_size,
                char **error_str)
{
   const struct gen_device_info *devinfo = compiler->devinfo;
   struct brw_vue_prog_data *vue_prog_data = &prog_data->base;
   const bool is_scalar = compiler->scalar_stage[MESA_SHADER_TESS_CTRL];

   nir_shader *nir = nir_shader_clone(mem_ctx, src_shader);
   nir->info->outputs_written = key->outputs_written;
   nir->info->patch_outputs_written = key->patch_outputs_written;

   struct brw_vue_map input_vue_map;
   brw_compute_vue_map(devinfo, &input_vue_map, nir->info->inputs_read,
                       nir->info->separate_shader);
   brw_compute_tess_vue_map(&vue_prog_data->vue_map,
                            nir->info->outputs_written,
                            nir->info->patch_outputs_written);

   nir = brw_nir_apply_sampler_key(nir, devinfo, &key->tex, is_scalar);
   brw_nir_lower_vue_inputs(nir, is_scalar, &input_vue_map);
   brw_nir_lower_tcs_outputs(nir, &vue_prog_data->vue_map);
   if (key->quads_workaround)
      brw_nir_apply_tcs_quads_workaround(nir);

   nir = brw_postprocess_nir(nir, compiler->devinfo, is_scalar);

   if (is_scalar)
      prog_data->instances = DIV_ROUND_UP(nir->info->tcs.vertices_out, 8);
   else
      prog_data->instances = DIV_ROUND_UP(nir->info->tcs.vertices_out, 2);

   /* Compute URB entry size.  The maximum allowed URB entry size is 32k.
    * That divides up as follows:
    *
    *     32 bytes for the patch header (tessellation factors)
    *    480 bytes for per-patch varyings (a varying component is 4 bytes and
    *              gl_MaxTessPatchComponents = 120)
    *  16384 bytes for per-vertex varyings (a varying component is 4 bytes,
    *              gl_MaxPatchVertices = 32 and
    *              gl_MaxTessControlOutputComponents = 128)
    *
    *  15808 bytes left for varying packing overhead
    */
   const int num_per_patch_slots = vue_prog_data->vue_map.num_per_patch_slots;
   const int num_per_vertex_slots = vue_prog_data->vue_map.num_per_vertex_slots;
   unsigned output_size_bytes = 0;
   /* Note that the patch header is counted in num_per_patch_slots. */
   output_size_bytes += num_per_patch_slots * 16;
   output_size_bytes += nir->info->tcs.vertices_out * num_per_vertex_slots * 16;

   assert(output_size_bytes >= 1);
   if (output_size_bytes > GEN7_MAX_HS_URB_ENTRY_SIZE_BYTES)
      return NULL;

   /* URB entry sizes are stored as a multiple of 64 bytes. */
   vue_prog_data->urb_entry_size = ALIGN(output_size_bytes, 64) / 64;

   /* HS does not use the usual payload pushing from URB to GRFs,
    * because we don't have enough registers for a full-size payload, and
    * the hardware is broken on Haswell anyway.
    */
   vue_prog_data->urb_read_length = 0;

   if (unlikely(INTEL_DEBUG & DEBUG_TCS)) {
      fprintf(stderr, "TCS Input ");
      brw_print_vue_map(stderr, &input_vue_map);
      fprintf(stderr, "TCS Output ");
      brw_print_vue_map(stderr, &vue_prog_data->vue_map);
   }

   if (is_scalar) {
      fs_visitor v(compiler, log_data, mem_ctx, (void *) key,
                   &prog_data->base.base, NULL, nir, 8,
                   shader_time_index, &input_vue_map);
      if (!v.run_tcs_single_patch()) {
         if (error_str)
            *error_str = ralloc_strdup(mem_ctx, v.fail_msg);
         return NULL;
      }

      prog_data->base.base.dispatch_grf_start_reg = v.payload.num_regs;
      prog_data->base.dispatch_mode = DISPATCH_MODE_SIMD8;

      fs_generator g(compiler, log_data, mem_ctx, (void *) key,
                     &prog_data->base.base, v.promoted_constants, false,
                     MESA_SHADER_TESS_CTRL);
      if (unlikely(INTEL_DEBUG & DEBUG_TCS)) {
         g.enable_debug(ralloc_asprintf(mem_ctx,
                                        "%s tessellation control shader %s",
                                        nir->info->label ? nir->info->label
                                                        : "unnamed",
                                        nir->info->name));
      }

      g.generate_code(v.cfg, 8);

      return g.get_assembly(final_assembly_size);
   } else {
      vec4_tcs_visitor v(compiler, log_data, key, prog_data,
                         nir, mem_ctx, shader_time_index, &input_vue_map);
      if (!v.run()) {
         if (error_str)
            *error_str = ralloc_strdup(mem_ctx, v.fail_msg);
         return NULL;
      }

      if (unlikely(INTEL_DEBUG & DEBUG_TCS))
         v.dump_instructions();


      return brw_vec4_generate_assembly(compiler, log_data, mem_ctx, nir,
                                        &prog_data->base, v.cfg,
                                        final_assembly_size);
   }
}
Exemple #10
0
static void
upload_sf_state(struct brw_context *brw)
{
   struct intel_context *intel = &brw->intel;
   struct gl_context *ctx = &intel->ctx;
   struct brw_vue_map vue_map;
   uint32_t urb_entry_read_length;
   /* CACHE_NEW_VS_PROG */
   GLbitfield64 vs_outputs_written = brw->vs.prog_data->outputs_written;
   /* BRW_NEW_FRAGMENT_PROGRAM */
   uint32_t num_outputs = _mesa_bitcount_64(brw->fragment_program->Base.InputsRead);
   /* _NEW_LIGHT */
   bool shade_model_flat = ctx->Light.ShadeModel == GL_FLAT;
   uint32_t dw1, dw2, dw3, dw4, dw16, dw17;
   int i;
   /* _NEW_BUFFER */
   bool render_to_fbo = brw->intel.ctx.DrawBuffer->Name != 0;
   int attr = 0, input_index = 0;
   int urb_entry_read_offset = 1;
   float point_size;
   uint16_t attr_overrides[FRAG_ATTRIB_MAX];
   bool userclip_active;

   /* _NEW_TRANSFORM */
   userclip_active = (ctx->Transform.ClipPlanesEnabled != 0);

   brw_compute_vue_map(&vue_map, intel, userclip_active, vs_outputs_written);
   urb_entry_read_length = (vue_map.num_slots + 1)/2 - urb_entry_read_offset;
   if (urb_entry_read_length == 0) {
      /* Setting the URB entry read length to 0 causes undefined behavior, so
       * if we have no URB data to read, set it to 1.
       */
      urb_entry_read_length = 1;
   }

   dw1 =
      GEN6_SF_SWIZZLE_ENABLE |
      num_outputs << GEN6_SF_NUM_OUTPUTS_SHIFT |
      urb_entry_read_length << GEN6_SF_URB_ENTRY_READ_LENGTH_SHIFT |
      urb_entry_read_offset << GEN6_SF_URB_ENTRY_READ_OFFSET_SHIFT;

   dw2 = GEN6_SF_STATISTICS_ENABLE;

   /* Enable viewport transform only if no HiZ operation is progress
    *
    * From page 11 of the SandyBridge PRM, Volume 2, Part 1, Section 1.3, "3D
    * Primitives Overview":
    *     RECTLIST: Viewport Mapping must be DISABLED (as is typical with the
    *     use of screen- space coordinates).
    */
   if (!brw->hiz.op)
      dw2 |= GEN6_SF_VIEWPORT_TRANSFORM_ENABLE;

   dw3 = 0;
   dw4 = 0;
   dw16 = 0;
   dw17 = 0;

   /* _NEW_POLYGON */
   if ((ctx->Polygon.FrontFace == GL_CCW) ^ render_to_fbo)
      dw2 |= GEN6_SF_WINDING_CCW;

   if (ctx->Polygon.OffsetFill)
       dw2 |= GEN6_SF_GLOBAL_DEPTH_OFFSET_SOLID;

   if (ctx->Polygon.OffsetLine)
       dw2 |= GEN6_SF_GLOBAL_DEPTH_OFFSET_WIREFRAME;

   if (ctx->Polygon.OffsetPoint)
       dw2 |= GEN6_SF_GLOBAL_DEPTH_OFFSET_POINT;

   switch (ctx->Polygon.FrontMode) {
   case GL_FILL:
       dw2 |= GEN6_SF_FRONT_SOLID;
       break;

   case GL_LINE:
       dw2 |= GEN6_SF_FRONT_WIREFRAME;
       break;

   case GL_POINT:
       dw2 |= GEN6_SF_FRONT_POINT;
       break;

   default:
       assert(0);
       break;
   }

   switch (ctx->Polygon.BackMode) {
   case GL_FILL:
       dw2 |= GEN6_SF_BACK_SOLID;
       break;

   case GL_LINE:
       dw2 |= GEN6_SF_BACK_WIREFRAME;
       break;

   case GL_POINT:
       dw2 |= GEN6_SF_BACK_POINT;
       break;

   default:
       assert(0);
       break;
   }

   /* _NEW_SCISSOR */
   if (ctx->Scissor.Enabled)
      dw3 |= GEN6_SF_SCISSOR_ENABLE;

   /* _NEW_POLYGON */
   if (ctx->Polygon.CullFlag) {
      switch (ctx->Polygon.CullFaceMode) {
      case GL_FRONT:
	 dw3 |= GEN6_SF_CULL_FRONT;
	 break;
      case GL_BACK:
	 dw3 |= GEN6_SF_CULL_BACK;
	 break;
      case GL_FRONT_AND_BACK:
	 dw3 |= GEN6_SF_CULL_BOTH;
	 break;
      default:
	 assert(0);
	 break;
      }
   } else {
      dw3 |= GEN6_SF_CULL_NONE;
   }

   /* _NEW_LINE */
   dw3 |= U_FIXED(CLAMP(ctx->Line.Width, 0.0, 7.99), 7) <<
      GEN6_SF_LINE_WIDTH_SHIFT;
   if (ctx->Line.SmoothFlag) {
      dw3 |= GEN6_SF_LINE_AA_ENABLE;
      dw3 |= GEN6_SF_LINE_AA_MODE_TRUE;
      dw3 |= GEN6_SF_LINE_END_CAP_WIDTH_1_0;
   }

   /* _NEW_POINT */
   if (!(ctx->VertexProgram.PointSizeEnabled ||
	 ctx->Point._Attenuated))
      dw4 |= GEN6_SF_USE_STATE_POINT_WIDTH;

   /* Clamp to ARB_point_parameters user limits */
   point_size = CLAMP(ctx->Point.Size, ctx->Point.MinSize, ctx->Point.MaxSize);

   /* Clamp to the hardware limits and convert to fixed point */
   dw4 |= U_FIXED(CLAMP(point_size, 0.125, 255.875), 3);

   if (ctx->Point.SpriteOrigin == GL_LOWER_LEFT)
      dw1 |= GEN6_SF_POINT_SPRITE_LOWERLEFT;

   /* _NEW_LIGHT */
   if (ctx->Light.ProvokingVertex != GL_FIRST_VERTEX_CONVENTION) {
      dw4 |=
	 (2 << GEN6_SF_TRI_PROVOKE_SHIFT) |
	 (2 << GEN6_SF_TRIFAN_PROVOKE_SHIFT) |
	 (1 << GEN6_SF_LINE_PROVOKE_SHIFT);
   } else {
      dw4 |=
	 (1 << GEN6_SF_TRIFAN_PROVOKE_SHIFT);
   }

   /* Create the mapping from the FS inputs we produce to the VS outputs
    * they source from.
    */
   for (; attr < FRAG_ATTRIB_MAX; attr++) {
      enum glsl_interp_qualifier interp_qualifier =
         brw->fragment_program->InterpQualifier[attr];
      bool is_gl_Color = attr == FRAG_ATTRIB_COL0 || attr == FRAG_ATTRIB_COL1;

      if (!(brw->fragment_program->Base.InputsRead & BITFIELD64_BIT(attr)))
	 continue;

      /* _NEW_POINT */
      if (ctx->Point.PointSprite &&
	  (attr >= FRAG_ATTRIB_TEX0 && attr <= FRAG_ATTRIB_TEX7) &&
	  ctx->Point.CoordReplace[attr - FRAG_ATTRIB_TEX0]) {
	 dw16 |= (1 << input_index);
      }

      if (attr == FRAG_ATTRIB_PNTC)
	 dw16 |= (1 << input_index);

      /* flat shading */
      if (interp_qualifier == INTERP_QUALIFIER_FLAT ||
          (shade_model_flat && is_gl_Color &&
           interp_qualifier == INTERP_QUALIFIER_NONE))
         dw17 |= (1 << input_index);

      /* The hardware can only do the overrides on 16 overrides at a
       * time, and the other up to 16 have to be lined up so that the
       * input index = the output index.  We'll need to do some
       * tweaking to make sure that's the case.
       */
      assert(input_index < 16 || attr == input_index);

      /* _NEW_LIGHT | _NEW_PROGRAM */
      attr_overrides[input_index++] =
         get_attr_override(&vue_map, urb_entry_read_offset, attr,
                           ctx->VertexProgram._TwoSideEnabled);
   }

   for (; input_index < FRAG_ATTRIB_MAX; input_index++)
      attr_overrides[input_index] = 0;

   BEGIN_BATCH(20);
   OUT_BATCH(_3DSTATE_SF << 16 | (20 - 2));
   OUT_BATCH(dw1);
   OUT_BATCH(dw2);
   OUT_BATCH(dw3);
   OUT_BATCH(dw4);
   OUT_BATCH_F(ctx->Polygon.OffsetUnits * 2); /* constant.  copied from gen4 */
   OUT_BATCH_F(ctx->Polygon.OffsetFactor); /* scale */
   OUT_BATCH_F(0.0); /* XXX: global depth offset clamp */
   for (i = 0; i < 8; i++) {
      OUT_BATCH(attr_overrides[i * 2] | attr_overrides[i * 2 + 1] << 16);
   }
   OUT_BATCH(dw16); /* point sprite texcoord bitmask */
   OUT_BATCH(dw17); /* constant interp bitmask */
   OUT_BATCH(0); /* wrapshortest enables 0-7 */
   OUT_BATCH(0); /* wrapshortest enables 8-15 */
   ADVANCE_BATCH();
}
Exemple #11
0
static void compile_gs_prog( struct brw_context *brw,
			     struct brw_gs_prog_key *key )
{
   struct intel_context *intel = &brw->intel;
   struct brw_gs_compile c;
   const GLuint *program;
   void *mem_ctx;
   GLuint program_size;

   memset(&c, 0, sizeof(c));
   
   c.key = *key;
   /* The geometry shader needs to access the entire VUE. */
   brw_compute_vue_map(&c.vue_map, intel, c.key.userclip_active, c.key.attrs);
   c.nr_regs = (c.vue_map.num_slots + 1)/2;

   mem_ctx = NULL;
   
   /* Begin the compilation:
    */
   brw_init_compile(brw, &c.func, mem_ctx);

   c.func.single_program_flow = 1;

   /* For some reason the thread is spawned with only 4 channels
    * unmasked.  
    */
   brw_set_mask_control(&c.func, BRW_MASK_DISABLE);

   if (intel->gen >= 6) {
      unsigned num_verts;
      bool check_edge_flag;
      /* On Sandybridge, we use the GS for implementing transform feedback
       * (called "Stream Out" in the PRM).
       */
      switch (key->primitive) {
      case _3DPRIM_POINTLIST:
         num_verts = 1;
         check_edge_flag = false;
	 break;
      case _3DPRIM_LINELIST:
      case _3DPRIM_LINESTRIP:
      case _3DPRIM_LINELOOP:
         num_verts = 2;
         check_edge_flag = false;
	 break;
      case _3DPRIM_TRILIST:
      case _3DPRIM_TRIFAN:
      case _3DPRIM_TRISTRIP:
      case _3DPRIM_RECTLIST:
	 num_verts = 3;
         check_edge_flag = false;
         break;
      case _3DPRIM_QUADLIST:
      case _3DPRIM_QUADSTRIP:
      case _3DPRIM_POLYGON:
         num_verts = 3;
         check_edge_flag = true;
         break;
      default:
	 assert(!"Unexpected primitive type in Gen6 SOL program.");
	 return;
      }
      gen6_sol_program(&c, key, num_verts, check_edge_flag);
   } else {
      /* On Gen4-5, we use the GS to decompose certain types of primitives.
       * Note that primitives which don't require a GS program have already
       * been weeded out by now.
       */
      switch (key->primitive) {
      case _3DPRIM_QUADLIST:
	 brw_gs_quads( &c, key );
	 break;
      case _3DPRIM_QUADSTRIP:
	 brw_gs_quad_strip( &c, key );
	 break;
      case _3DPRIM_LINELOOP:
	 brw_gs_lines( &c );
	 break;
      default:
	 ralloc_free(mem_ctx);
	 return;
      }
   }

   /* get the program
    */
   program = brw_get_program(&c.func, &program_size);

   if (unlikely(INTEL_DEBUG & DEBUG_GS)) {
      int i;

      printf("gs:\n");
      for (i = 0; i < program_size / sizeof(struct brw_instruction); i++)
	 brw_disasm(stdout, &((struct brw_instruction *)program)[i],
		    intel->gen);
      printf("\n");
    }

   brw_upload_cache(&brw->cache, BRW_GS_PROG,
		    &c.key, sizeof(c.key),
		    program, program_size,
		    &c.prog_data, sizeof(c.prog_data),
		    &brw->gs.prog_offset, &brw->gs.prog_data);
   ralloc_free(mem_ctx);
}
Exemple #12
0
extern "C" const unsigned *
brw_compile_gs(const struct brw_compiler *compiler, void *log_data,
               void *mem_ctx,
               const struct brw_gs_prog_key *key,
               struct brw_gs_prog_data *prog_data,
               const nir_shader *src_shader,
               struct gl_shader_program *shader_prog,
               int shader_time_index,
               unsigned *final_assembly_size,
               char **error_str)
{
    struct brw_gs_compile c;
    memset(&c, 0, sizeof(c));
    c.key = *key;

    const bool is_scalar = compiler->scalar_stage[MESA_SHADER_GEOMETRY];
    nir_shader *shader = nir_shader_clone(mem_ctx, src_shader);

    /* The GLSL linker will have already matched up GS inputs and the outputs
     * of prior stages.  The driver does extend VS outputs in some cases, but
     * only for legacy OpenGL or Gen4-5 hardware, neither of which offer
     * geometry shader support.  So we can safely ignore that.
     *
     * For SSO pipelines, we use a fixed VUE map layout based on variable
     * locations, so we can rely on rendezvous-by-location making this work.
     */
    GLbitfield64 inputs_read = shader->info->inputs_read;
    brw_compute_vue_map(compiler->devinfo,
                        &c.input_vue_map, inputs_read,
                        shader->info->separate_shader);

    shader = brw_nir_apply_sampler_key(shader, compiler->devinfo, &key->tex,
                                       is_scalar);
    brw_nir_lower_vue_inputs(shader, is_scalar, &c.input_vue_map);
    brw_nir_lower_vue_outputs(shader, is_scalar);
    shader = brw_postprocess_nir(shader, compiler->devinfo, is_scalar);

    prog_data->base.clip_distance_mask =
        ((1 << shader->info->clip_distance_array_size) - 1);
    prog_data->base.cull_distance_mask =
        ((1 << shader->info->cull_distance_array_size) - 1) <<
        shader->info->clip_distance_array_size;

    prog_data->include_primitive_id =
        (shader->info->system_values_read & (1 << SYSTEM_VALUE_PRIMITIVE_ID)) != 0;

    prog_data->invocations = shader->info->gs.invocations;

    if (compiler->devinfo->gen >= 8)
        prog_data->static_vertex_count = nir_gs_count_vertices(shader);

    if (compiler->devinfo->gen >= 7) {
        if (shader->info->gs.output_primitive == GL_POINTS) {
            /* When the output type is points, the geometry shader may output data
             * to multiple streams, and EndPrimitive() has no effect.  So we
             * configure the hardware to interpret the control data as stream ID.
             */
            prog_data->control_data_format = GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_SID;

            /* We only have to emit control bits if we are using streams */
            if (shader_prog && shader_prog->Geom.UsesStreams)
                c.control_data_bits_per_vertex = 2;
            else
                c.control_data_bits_per_vertex = 0;
        } else {
            /* When the output type is triangle_strip or line_strip, EndPrimitive()
             * may be used to terminate the current strip and start a new one
             * (similar to primitive restart), and outputting data to multiple
             * streams is not supported.  So we configure the hardware to interpret
             * the control data as EndPrimitive information (a.k.a. "cut bits").
             */
            prog_data->control_data_format = GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_CUT;

            /* We only need to output control data if the shader actually calls
             * EndPrimitive().
             */
            c.control_data_bits_per_vertex =
                shader->info->gs.uses_end_primitive ? 1 : 0;
        }
    } else {
        /* There are no control data bits in gen6. */
        c.control_data_bits_per_vertex = 0;

        /* If it is using transform feedback, enable it */
        if (shader->info->has_transform_feedback_varyings)
            prog_data->gen6_xfb_enabled = true;
        else
            prog_data->gen6_xfb_enabled = false;
    }
    c.control_data_header_size_bits =
        shader->info->gs.vertices_out * c.control_data_bits_per_vertex;

    /* 1 HWORD = 32 bytes = 256 bits */
    prog_data->control_data_header_size_hwords =
        ALIGN(c.control_data_header_size_bits, 256) / 256;

    /* Compute the output vertex size.
     *
     * From the Ivy Bridge PRM, Vol2 Part1 7.2.1.1 STATE_GS - Output Vertex
     * Size (p168):
     *
     *     [0,62] indicating [1,63] 16B units
     *
     *     Specifies the size of each vertex stored in the GS output entry
     *     (following any Control Header data) as a number of 128-bit units
     *     (minus one).
     *
     *     Programming Restrictions: The vertex size must be programmed as a
     *     multiple of 32B units with the following exception: Rendering is
     *     disabled (as per SOL stage state) and the vertex size output by the
     *     GS thread is 16B.
     *
     *     If rendering is enabled (as per SOL state) the vertex size must be
     *     programmed as a multiple of 32B units. In other words, the only time
     *     software can program a vertex size with an odd number of 16B units
     *     is when rendering is disabled.
     *
     * Note: B=bytes in the above text.
     *
     * It doesn't seem worth the extra trouble to optimize the case where the
     * vertex size is 16B (especially since this would require special-casing
     * the GEN assembly that writes to the URB).  So we just set the vertex
     * size to a multiple of 32B (2 vec4's) in all cases.
     *
     * The maximum output vertex size is 62*16 = 992 bytes (31 hwords).  We
     * budget that as follows:
     *
     *   512 bytes for varyings (a varying component is 4 bytes and
     *             gl_MaxGeometryOutputComponents = 128)
     *    16 bytes overhead for VARYING_SLOT_PSIZ (each varying slot is 16
     *             bytes)
     *    16 bytes overhead for gl_Position (we allocate it a slot in the VUE
     *             even if it's not used)
     *    32 bytes overhead for gl_ClipDistance (we allocate it 2 VUE slots
     *             whenever clip planes are enabled, even if the shader doesn't
     *             write to gl_ClipDistance)
     *    16 bytes overhead since the VUE size must be a multiple of 32 bytes
     *             (see below)--this causes up to 1 VUE slot to be wasted
     *   400 bytes available for varying packing overhead
     *
     * Worst-case varying packing overhead is 3/4 of a varying slot (12 bytes)
     * per interpolation type, so this is plenty.
     *
     */
    unsigned output_vertex_size_bytes = prog_data->base.vue_map.num_slots * 16;
    assert(compiler->devinfo->gen == 6 ||
           output_vertex_size_bytes <= GEN7_MAX_GS_OUTPUT_VERTEX_SIZE_BYTES);
    prog_data->output_vertex_size_hwords =
        ALIGN(output_vertex_size_bytes, 32) / 32;

    /* Compute URB entry size.  The maximum allowed URB entry size is 32k.
     * That divides up as follows:
     *
     *     64 bytes for the control data header (cut indices or StreamID bits)
     *   4096 bytes for varyings (a varying component is 4 bytes and
     *              gl_MaxGeometryTotalOutputComponents = 1024)
     *   4096 bytes overhead for VARYING_SLOT_PSIZ (each varying slot is 16
     *              bytes/vertex and gl_MaxGeometryOutputVertices is 256)
     *   4096 bytes overhead for gl_Position (we allocate it a slot in the VUE
     *              even if it's not used)
     *   8192 bytes overhead for gl_ClipDistance (we allocate it 2 VUE slots
     *              whenever clip planes are enabled, even if the shader doesn't
     *              write to gl_ClipDistance)
     *   4096 bytes overhead since the VUE size must be a multiple of 32
     *              bytes (see above)--this causes up to 1 VUE slot to be wasted
     *   8128 bytes available for varying packing overhead
     *
     * Worst-case varying packing overhead is 3/4 of a varying slot per
     * interpolation type, which works out to 3072 bytes, so this would allow
     * us to accommodate 2 interpolation types without any danger of running
     * out of URB space.
     *
     * In practice, the risk of running out of URB space is very small, since
     * the above figures are all worst-case, and most of them scale with the
     * number of output vertices.  So we'll just calculate the amount of space
     * we need, and if it's too large, fail to compile.
     *
     * The above is for gen7+ where we have a single URB entry that will hold
     * all the output. In gen6, we will have to allocate URB entries for every
     * vertex we emit, so our URB entries only need to be large enough to hold
     * a single vertex. Also, gen6 does not have a control data header.
     */
    unsigned output_size_bytes;
    if (compiler->devinfo->gen >= 7) {
        output_size_bytes =
            prog_data->output_vertex_size_hwords * 32 * shader->info->gs.vertices_out;
        output_size_bytes += 32 * prog_data->control_data_header_size_hwords;
    } else {
        output_size_bytes = prog_data->output_vertex_size_hwords * 32;
    }

    /* Broadwell stores "Vertex Count" as a full 8 DWord (32 byte) URB output,
     * which comes before the control header.
     */
    if (compiler->devinfo->gen >= 8)
        output_size_bytes += 32;

    /* Shaders can technically set max_vertices = 0, at which point we
     * may have a URB size of 0 bytes.  Nothing good can come from that,
     * so enforce a minimum size.
     */
    if (output_size_bytes == 0)
        output_size_bytes = 1;

    unsigned max_output_size_bytes = GEN7_MAX_GS_URB_ENTRY_SIZE_BYTES;
    if (compiler->devinfo->gen == 6)
        max_output_size_bytes = GEN6_MAX_GS_URB_ENTRY_SIZE_BYTES;
    if (output_size_bytes > max_output_size_bytes)
        return NULL;


    /* URB entry sizes are stored as a multiple of 64 bytes in gen7+ and
     * a multiple of 128 bytes in gen6.
     */
    if (compiler->devinfo->gen >= 7)
        prog_data->base.urb_entry_size = ALIGN(output_size_bytes, 64) / 64;
    else
        prog_data->base.urb_entry_size = ALIGN(output_size_bytes, 128) / 128;

    prog_data->output_topology =
        get_hw_prim_for_gl_prim(shader->info->gs.output_primitive);

    prog_data->vertices_in = shader->info->gs.vertices_in;

    /* GS inputs are read from the VUE 256 bits (2 vec4's) at a time, so we
     * need to program a URB read length of ceiling(num_slots / 2).
     */
    prog_data->base.urb_read_length = (c.input_vue_map.num_slots + 1) / 2;

    /* Now that prog_data setup is done, we are ready to actually compile the
     * program.
     */
    if (unlikely(INTEL_DEBUG & DEBUG_GS)) {
        fprintf(stderr, "GS Input ");
        brw_print_vue_map(stderr, &c.input_vue_map);
        fprintf(stderr, "GS Output ");
        brw_print_vue_map(stderr, &prog_data->base.vue_map);
    }

    if (is_scalar) {
        fs_visitor v(compiler, log_data, mem_ctx, &c, prog_data, shader,
                     shader_time_index);
        if (v.run_gs()) {
            prog_data->base.dispatch_mode = DISPATCH_MODE_SIMD8;
            prog_data->base.base.dispatch_grf_start_reg = v.payload.num_regs;

            fs_generator g(compiler, log_data, mem_ctx, &c.key,
                           &prog_data->base.base, v.promoted_constants,
                           false, MESA_SHADER_GEOMETRY);
            if (unlikely(INTEL_DEBUG & DEBUG_GS)) {
                const char *label =
                    shader->info->label ? shader->info->label : "unnamed";
                char *name = ralloc_asprintf(mem_ctx, "%s geometry shader %s",
                                             label, shader->info->name);
                g.enable_debug(name);
            }
            g.generate_code(v.cfg, 8);
            return g.get_assembly(final_assembly_size);
        }
    }

    if (compiler->devinfo->gen >= 7) {
        /* Compile the geometry shader in DUAL_OBJECT dispatch mode, if we can do
         * so without spilling. If the GS invocations count > 1, then we can't use
         * dual object mode.
         */
        if (prog_data->invocations <= 1 &&
                likely(!(INTEL_DEBUG & DEBUG_NO_DUAL_OBJECT_GS))) {
            prog_data->base.dispatch_mode = DISPATCH_MODE_4X2_DUAL_OBJECT;

            vec4_gs_visitor v(compiler, log_data, &c, prog_data, shader,
                              mem_ctx, true /* no_spills */, shader_time_index);
            if (v.run()) {
                return brw_vec4_generate_assembly(compiler, log_data, mem_ctx,
                                                  shader, &prog_data->base, v.cfg,
                                                  final_assembly_size);
            }
        }
    }

    /* Either we failed to compile in DUAL_OBJECT mode (probably because it
     * would have required spilling) or DUAL_OBJECT mode is disabled.  So fall
     * back to DUAL_INSTANCED or SINGLE mode, which consumes fewer registers.
     *
     * FIXME: Single dispatch mode requires that the driver can handle
     * interleaving of input registers, but this is already supported (dual
     * instance mode has the same requirement). However, to take full advantage
     * of single dispatch mode to reduce register pressure we would also need to
     * do interleaved outputs, but currently, the vec4 visitor and generator
     * classes do not support this, so at the moment register pressure in
     * single and dual instance modes is the same.
     *
     * From the Ivy Bridge PRM, Vol2 Part1 7.2.1.1 "3DSTATE_GS"
     * "If InstanceCount>1, DUAL_OBJECT mode is invalid. Software will likely
     * want to use DUAL_INSTANCE mode for higher performance, but SINGLE mode
     * is also supported. When InstanceCount=1 (one instance per object) software
     * can decide which dispatch mode to use. DUAL_OBJECT mode would likely be
     * the best choice for performance, followed by SINGLE mode."
     *
     * So SINGLE mode is more performant when invocations == 1 and DUAL_INSTANCE
     * mode is more performant when invocations > 1. Gen6 only supports
     * SINGLE mode.
     */
    if (prog_data->invocations <= 1 || compiler->devinfo->gen < 7)
        prog_data->base.dispatch_mode = DISPATCH_MODE_4X1_SINGLE;
    else
        prog_data->base.dispatch_mode = DISPATCH_MODE_4X2_DUAL_INSTANCE;

    vec4_gs_visitor *gs = NULL;
    const unsigned *ret = NULL;

    if (compiler->devinfo->gen >= 7)
        gs = new vec4_gs_visitor(compiler, log_data, &c, prog_data,
                                 shader, mem_ctx, false /* no_spills */,
                                 shader_time_index);
    else
        gs = new gen6_gs_visitor(compiler, log_data, &c, prog_data, shader_prog,
                                 shader, mem_ctx, false /* no_spills */,
                                 shader_time_index);

    if (!gs->run()) {
        if (error_str)
            *error_str = ralloc_strdup(mem_ctx, gs->fail_msg);
    } else {
        ret = brw_vec4_generate_assembly(compiler, log_data, mem_ctx, shader,
                                         &prog_data->base, gs->cfg,
                                         final_assembly_size);
    }

    delete gs;
    return ret;
}