Exemple #1
0
static bool
do_vs_prog(struct brw_context *brw,
	   struct gl_shader_program *prog,
	   struct brw_vertex_program *vp,
	   struct brw_vs_prog_key *key)
{
   GLuint program_size;
   const GLuint *program;
   struct brw_vs_compile c;
   struct brw_vs_prog_data prog_data;
   void *mem_ctx;
   int i;
   struct gl_shader *vs = NULL;

   if (prog)
      vs = prog->_LinkedShaders[MESA_SHADER_VERTEX];

   memset(&c, 0, sizeof(c));
   memcpy(&c.key, key, sizeof(*key));
   memset(&prog_data, 0, sizeof(prog_data));

   mem_ctx = ralloc_context(NULL);

   c.vp = vp;

   /* Allocate the references to the uniforms that will end up in the
    * prog_data associated with the compiled program, and which will be freed
    * by the state cache.
    */
   int param_count;
   if (vs) {
      /* We add padding around uniform values below vec4 size, with the worst
       * case being a float value that gets blown up to a vec4, so be
       * conservative here.
       */
      param_count = vs->num_uniform_components * 4;

   } else {
      param_count = vp->program.Base.Parameters->NumParameters * 4;
   }
   /* We also upload clip plane data as uniforms */
   param_count += MAX_CLIP_PLANES * 4;

   prog_data.base.param = rzalloc_array(NULL, const float *, param_count);
   prog_data.base.pull_param = rzalloc_array(NULL, const float *, param_count);

   GLbitfield64 outputs_written = vp->program.Base.OutputsWritten;
   prog_data.inputs_read = vp->program.Base.InputsRead;

   if (c.key.copy_edgeflag) {
      outputs_written |= BITFIELD64_BIT(VARYING_SLOT_EDGE);
      prog_data.inputs_read |= VERT_BIT_EDGEFLAG;
   }

   if (brw->gen < 6) {
      /* Put dummy slots into the VUE for the SF to put the replaced
       * point sprite coords in.  We shouldn't need these dummy slots,
       * which take up precious URB space, but it would mean that the SF
       * doesn't get nice aligned pairs of input coords into output
       * coords, which would be a pain to handle.
       */
      for (i = 0; i < 8; i++) {
         if (c.key.point_coord_replace & (1 << i))
            outputs_written |= BITFIELD64_BIT(VARYING_SLOT_TEX0 + i);
      }

      /* if back colors are written, allocate slots for front colors too */
      if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC0))
         outputs_written |= BITFIELD64_BIT(VARYING_SLOT_COL0);
      if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC1))
         outputs_written |= BITFIELD64_BIT(VARYING_SLOT_COL1);
   }

   brw_compute_vue_map(brw, &prog_data.base.vue_map, outputs_written,
                       c.key.base.userclip_active);

   if (0) {
      _mesa_fprint_program_opt(stdout, &c.vp->program.Base, PROG_PRINT_DEBUG,
			       true);
   }

   /* Emit GEN4 code.
    */
   program = brw_vs_emit(brw, prog, &c, &prog_data, mem_ctx, &program_size);
   if (program == NULL) {
      ralloc_free(mem_ctx);
      return false;
   }

   if (prog_data.base.nr_pull_params)
      prog_data.base.num_surfaces = 1;
   if (c.vp->program.Base.SamplersUsed)
      prog_data.base.num_surfaces = SURF_INDEX_VS_TEXTURE(BRW_MAX_TEX_UNIT);
   if (prog &&
       prog->_LinkedShaders[MESA_SHADER_VERTEX]->NumUniformBlocks) {
      prog_data.base.num_surfaces =
	 SURF_INDEX_VS_UBO(prog->_LinkedShaders[MESA_SHADER_VERTEX]->NumUniformBlocks);
   }

   /* Scratch space is used for register spilling */
   if (c.base.last_scratch) {
      perf_debug("Vertex shader triggered register spilling.  "
                 "Try reducing the number of live vec4 values to "
                 "improve performance.\n");

      prog_data.base.total_scratch
         = brw_get_scratch_size(c.base.last_scratch*REG_SIZE);

      brw_get_scratch_bo(brw, &brw->vs.scratch_bo,
			 prog_data.base.total_scratch * brw->max_vs_threads);
   }

   brw_upload_cache(&brw->cache, BRW_VS_PROG,
		    &c.key, sizeof(c.key),
		    program, program_size,
		    &prog_data, sizeof(prog_data),
		    &brw->vs.prog_offset, &brw->vs.prog_data);
   ralloc_free(mem_ctx);

   return true;
}
Exemple #2
0
static bool
brw_vs_do_compile(struct brw_context *brw,
	          struct brw_vs_compile *c)
{
   struct brw_stage_prog_data *stage_prog_data = &c->prog_data.base.base;
   struct gl_shader *vs = NULL;
   int i;

   if (c->base.shader_prog)
      vs = c->base.shader_prog->_LinkedShaders[MESA_SHADER_VERTEX];

   /* Allocate the references to the uniforms that will end up in the
    * prog_data associated with the compiled program, and which will be freed
    * by the state cache.
    */
   int param_count;
   if (vs) {
      /* We add padding around uniform values below vec4 size, with the worst
       * case being a float value that gets blown up to a vec4, so be
       * conservative here.
       */
      param_count = vs->num_uniform_components * 4;

   } else {
      param_count = c->vp->program.Base.Parameters->NumParameters * 4;
   }
   /* vec4_visitor::setup_uniform_clipplane_values() also uploads user clip
    * planes as uniforms.
    */
   param_count += c->key.base.nr_userclip_plane_consts * 4;

   stage_prog_data->param = rzalloc_array(NULL, const float *, param_count);
   stage_prog_data->pull_param = rzalloc_array(NULL, const float *, param_count);

   /* Setting nr_params here NOT to the size of the param and pull_param
    * arrays, but to the number of uniform components vec4_visitor
    * needs. vec4_visitor::setup_uniforms() will set it back to a proper value.
    */
   stage_prog_data->nr_params = ALIGN(param_count, 4) / 4;
   if (vs) {
      stage_prog_data->nr_params += vs->num_samplers;
   }

   GLbitfield64 outputs_written = c->vp->program.Base.OutputsWritten;
   c->prog_data.inputs_read = c->vp->program.Base.InputsRead;

   if (c->key.copy_edgeflag) {
      outputs_written |= BITFIELD64_BIT(VARYING_SLOT_EDGE);
      c->prog_data.inputs_read |= VERT_BIT_EDGEFLAG;
   }

   if (brw->gen < 6) {
      /* Put dummy slots into the VUE for the SF to put the replaced
       * point sprite coords in.  We shouldn't need these dummy slots,
       * which take up precious URB space, but it would mean that the SF
       * doesn't get nice aligned pairs of input coords into output
       * coords, which would be a pain to handle.
       */
      for (i = 0; i < 8; i++) {
         if (c->key.point_coord_replace & (1 << i))
            outputs_written |= BITFIELD64_BIT(VARYING_SLOT_TEX0 + i);
      }

      /* if back colors are written, allocate slots for front colors too */
      if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC0))
         outputs_written |= BITFIELD64_BIT(VARYING_SLOT_COL0);
      if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC1))
         outputs_written |= BITFIELD64_BIT(VARYING_SLOT_COL1);
   }

   /* In order for legacy clipping to work, we need to populate the clip
    * distance varying slots whenever clipping is enabled, even if the vertex
    * shader doesn't write to gl_ClipDistance.
    */
   if (c->key.base.userclip_active) {
      outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST0);
      outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST1);
   }

   brw_compute_vue_map(brw, &c->prog_data.base.vue_map, outputs_written);

   if (0) {
      _mesa_fprint_program_opt(stderr, &c->vp->program.Base, PROG_PRINT_DEBUG,
			       true);
   }

   /* Emit GEN4 code.
    */
   c->base.program = brw_vs_emit(brw, c->base.shader_prog, c,
         &c->prog_data, c->base.mem_ctx, &c->base.program_size);
   if (c->base.program == NULL)
      return false;

   if (c->base.last_scratch) {
      c->prog_data.base.total_scratch
         = brw_get_scratch_size(c->base.last_scratch*REG_SIZE);
   }

   return true;
}
Exemple #3
0
static bool
do_vs_prog(struct brw_context *brw,
	   struct gl_shader_program *prog,
	   struct brw_vertex_program *vp,
	   struct brw_vs_prog_key *key)
{
   GLuint program_size;
   const GLuint *program;
   struct brw_vs_compile c;
   struct brw_vs_prog_data prog_data;
   struct brw_stage_prog_data *stage_prog_data = &prog_data.base.base;
   void *mem_ctx;
   int i;
   struct gl_shader *vs = NULL;

   if (prog)
      vs = prog->_LinkedShaders[MESA_SHADER_VERTEX];

   memset(&c, 0, sizeof(c));
   memcpy(&c.key, key, sizeof(*key));
   memset(&prog_data, 0, sizeof(prog_data));

   mem_ctx = ralloc_context(NULL);

   c.vp = vp;

   /* Allocate the references to the uniforms that will end up in the
    * prog_data associated with the compiled program, and which will be freed
    * by the state cache.
    */
   int param_count;
   if (vs) {
      /* We add padding around uniform values below vec4 size, with the worst
       * case being a float value that gets blown up to a vec4, so be
       * conservative here.
       */
      param_count = vs->num_uniform_components * 4;

   } else {
      param_count = vp->program.Base.Parameters->NumParameters * 4;
   }
   /* vec4_visitor::setup_uniform_clipplane_values() also uploads user clip
    * planes as uniforms.
    */
   param_count += c.key.base.nr_userclip_plane_consts * 4;

   stage_prog_data->param = rzalloc_array(NULL, const float *, param_count);
   stage_prog_data->pull_param = rzalloc_array(NULL, const float *, param_count);

   /* Setting nr_params here NOT to the size of the param and pull_param
    * arrays, but to the number of uniform components vec4_visitor
    * needs. vec4_visitor::setup_uniforms() will set it back to a proper value.
    */
   stage_prog_data->nr_params = ALIGN(param_count, 4) / 4;
   if (vs) {
      stage_prog_data->nr_params += vs->num_samplers;
   }

   GLbitfield64 outputs_written = vp->program.Base.OutputsWritten;
   prog_data.inputs_read = vp->program.Base.InputsRead;

   if (c.key.copy_edgeflag) {
      outputs_written |= BITFIELD64_BIT(VARYING_SLOT_EDGE);
      prog_data.inputs_read |= VERT_BIT_EDGEFLAG;
   }

   if (brw->gen < 6) {
      /* Put dummy slots into the VUE for the SF to put the replaced
       * point sprite coords in.  We shouldn't need these dummy slots,
       * which take up precious URB space, but it would mean that the SF
       * doesn't get nice aligned pairs of input coords into output
       * coords, which would be a pain to handle.
       */
      for (i = 0; i < 8; i++) {
         if (c.key.point_coord_replace & (1 << i))
            outputs_written |= BITFIELD64_BIT(VARYING_SLOT_TEX0 + i);
      }

      /* if back colors are written, allocate slots for front colors too */
      if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC0))
         outputs_written |= BITFIELD64_BIT(VARYING_SLOT_COL0);
      if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC1))
         outputs_written |= BITFIELD64_BIT(VARYING_SLOT_COL1);
   }

   /* In order for legacy clipping to work, we need to populate the clip
    * distance varying slots whenever clipping is enabled, even if the vertex
    * shader doesn't write to gl_ClipDistance.
    */
   if (c.key.base.userclip_active) {
      outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST0);
      outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST1);
   }

   brw_compute_vue_map(brw, &prog_data.base.vue_map, outputs_written);

   if (0) {
      _mesa_fprint_program_opt(stderr, &c.vp->program.Base, PROG_PRINT_DEBUG,
			       true);
   }

   /* Emit GEN4 code.
    */
   program = brw_vs_emit(brw, prog, &c, &prog_data, mem_ctx, &program_size);
   if (program == NULL) {
      ralloc_free(mem_ctx);
      return false;
   }

   /* Scratch space is used for register spilling */
   if (c.base.last_scratch) {
      perf_debug("Vertex shader triggered register spilling.  "
                 "Try reducing the number of live vec4 values to "
                 "improve performance.\n");

      prog_data.base.total_scratch
         = brw_get_scratch_size(c.base.last_scratch*REG_SIZE);

      brw_get_scratch_bo(brw, &brw->vs.base.scratch_bo,
			 prog_data.base.total_scratch * brw->max_vs_threads);
   }

   brw_upload_cache(&brw->cache, BRW_VS_PROG,
		    &c.key, sizeof(c.key),
		    program, program_size,
		    &prog_data, sizeof(prog_data),
		    &brw->vs.base.prog_offset, &brw->vs.prog_data);
   ralloc_free(mem_ctx);

   return true;
}
Exemple #4
0
static bool
do_vs_prog(struct brw_context *brw,
	   struct gl_shader_program *prog,
	   struct brw_vertex_program *vp,
	   struct brw_vs_prog_key *key)
{
   struct gl_context *ctx = &brw->intel.ctx;
   struct intel_context *intel = &brw->intel;
   GLuint program_size;
   const GLuint *program;
   struct brw_vs_compile c;
   void *mem_ctx;
   int aux_size;
   int i;
   struct gl_shader *vs = NULL;

   if (prog)
      vs = prog->_LinkedShaders[MESA_SHADER_VERTEX];

   memset(&c, 0, sizeof(c));
   memcpy(&c.key, key, sizeof(*key));

   mem_ctx = ralloc_context(NULL);

   brw_init_compile(brw, &c.func, mem_ctx);
   c.vp = vp;

   /* Allocate the references to the uniforms that will end up in the
    * prog_data associated with the compiled program, and which will be freed
    * by the state cache.
    */
   int param_count;
   if (vs) {
      /* We add padding around uniform values below vec4 size, with the worst
       * case being a float value that gets blown up to a vec4, so be
       * conservative here.
       */
      param_count = vs->num_uniform_components * 4;

      /* We also upload clip plane data as uniforms */
      param_count += MAX_CLIP_PLANES * 4;
   } else {
      param_count = vp->program.Base.Parameters->NumParameters * 4;
   }
   c.prog_data.param = rzalloc_array(NULL, const float *, param_count);
   c.prog_data.pull_param = rzalloc_array(NULL, const float *, param_count);

   c.prog_data.outputs_written = vp->program.Base.OutputsWritten;
   c.prog_data.inputs_read = vp->program.Base.InputsRead;

   if (c.key.copy_edgeflag) {
      c.prog_data.outputs_written |= BITFIELD64_BIT(VERT_RESULT_EDGE);
      c.prog_data.inputs_read |= VERT_BIT_EDGEFLAG;
   }

   /* Put dummy slots into the VUE for the SF to put the replaced
    * point sprite coords in.  We shouldn't need these dummy slots,
    * which take up precious URB space, but it would mean that the SF
    * doesn't get nice aligned pairs of input coords into output
    * coords, which would be a pain to handle.
    */
   for (i = 0; i < 8; i++) {
      if (c.key.point_coord_replace & (1 << i))
	 c.prog_data.outputs_written |= BITFIELD64_BIT(VERT_RESULT_TEX0 + i);
   }

   brw_compute_vue_map(&c);

   if (0) {
      _mesa_fprint_program_opt(stdout, &c.vp->program.Base, PROG_PRINT_DEBUG,
			       true);
   }

   /* Emit GEN4 code.
    */
   if (prog) {
      if (!brw_vs_emit(prog, &c)) {
	 ralloc_free(mem_ctx);
	 return false;
      }
   } else {
      brw_old_vs_emit(&c);
   }

   if (c.prog_data.nr_pull_params)
      c.prog_data.num_surfaces = 1;
   if (c.vp->program.Base.SamplersUsed)
      c.prog_data.num_surfaces = SURF_INDEX_VS_TEXTURE(BRW_MAX_TEX_UNIT);
   if (prog &&
       prog->_LinkedShaders[MESA_SHADER_VERTEX]->NumUniformBlocks) {
      c.prog_data.num_surfaces =
	 SURF_INDEX_VS_UBO(prog->_LinkedShaders[MESA_SHADER_VERTEX]->NumUniformBlocks);
   }

   /* Scratch space is used for register spilling */
   if (c.last_scratch) {
      perf_debug("Vertex shader triggered register spilling.  "
                 "Try reducing the number of live vec4 values to "
                 "improve performance.\n");

      c.prog_data.total_scratch = brw_get_scratch_size(c.last_scratch);

      brw_get_scratch_bo(intel, &brw->vs.scratch_bo,
			 c.prog_data.total_scratch * brw->max_vs_threads);
   }

   /* get the program
    */
   program = brw_get_program(&c.func, &program_size);

   /* We upload from &c.prog_data including the constant_map assuming
    * they're packed together.  It would be nice to have a
    * compile-time assert macro here.
    */
   assert(c.constant_map == (int8_t *)&c.prog_data +
	  sizeof(c.prog_data));
   assert(ctx->Const.VertexProgram.MaxNativeParameters ==
	  ARRAY_SIZE(c.constant_map));
   (void) ctx;

   aux_size = sizeof(c.prog_data);
   /* constant_map */
   aux_size += c.vp->program.Base.Parameters->NumParameters;

   brw_upload_cache(&brw->cache, BRW_VS_PROG,
		    &c.key, sizeof(c.key),
		    program, program_size,
		    &c.prog_data, aux_size,
		    &brw->vs.prog_offset, &brw->vs.prog_data);
   ralloc_free(mem_ctx);

   return true;
}
Exemple #5
0
static void do_vs_prog( struct brw_context *brw, 
			struct brw_vertex_program *vp,
			struct brw_vs_prog_key *key )
{
   struct gl_context *ctx = &brw->intel.ctx;
   GLuint program_size;
   const GLuint *program;
   struct brw_vs_compile c;
   int aux_size;
   int i;

   memset(&c, 0, sizeof(c));
   memcpy(&c.key, key, sizeof(*key));

   brw_init_compile(brw, &c.func);
   c.vp = vp;

   c.prog_data.outputs_written = vp->program.Base.OutputsWritten;
   c.prog_data.inputs_read = vp->program.Base.InputsRead;

   if (c.key.copy_edgeflag) {
      c.prog_data.outputs_written |= BITFIELD64_BIT(VERT_RESULT_EDGE);
      c.prog_data.inputs_read |= 1<<VERT_ATTRIB_EDGEFLAG;
   }

   /* Put dummy slots into the VUE for the SF to put the replaced
    * point sprite coords in.  We shouldn't need these dummy slots,
    * which take up precious URB space, but it would mean that the SF
    * doesn't get nice aligned pairs of input coords into output
    * coords, which would be a pain to handle.
    */
   for (i = 0; i < 8; i++) {
      if (c.key.point_coord_replace & (1 << i))
	 c.prog_data.outputs_written |= BITFIELD64_BIT(VERT_RESULT_TEX0 + i);
   }

   if (0) {
      _mesa_fprint_program_opt(stdout, &c.vp->program.Base, PROG_PRINT_DEBUG,
			       GL_TRUE);
   }

   /* Emit GEN4 code.
    */
   brw_vs_emit(&c);

   /* get the program
    */
   program = brw_get_program(&c.func, &program_size);

   /* We upload from &c.prog_data including the constant_map assuming
    * they're packed together.  It would be nice to have a
    * compile-time assert macro here.
    */
   assert(c.constant_map == (int8_t *)&c.prog_data +
	  sizeof(c.prog_data));
   assert(ctx->Const.VertexProgram.MaxNativeParameters ==
	  ARRAY_SIZE(c.constant_map));
   (void) ctx;

   aux_size = sizeof(c.prog_data);
   /* constant_map */
   aux_size += c.vp->program.Base.Parameters->NumParameters;

   drm_intel_bo_unreference(brw->vs.prog_bo);
   brw->vs.prog_bo = brw_upload_cache_with_auxdata(&brw->cache, BRW_VS_PROG,
						   &c.key, sizeof(c.key),
						   NULL, 0,
						   program, program_size,
						   &c.prog_data,
						   aux_size,
						   &brw->vs.prog_data);
}