Example #1
0
void
link_assign_atomic_counter_resources(struct gl_context *ctx,
                                     struct gl_shader_program *prog)
{
   unsigned num_buffers;
   active_atomic_buffer *abs =
      find_active_atomic_counters(ctx, prog, &num_buffers);

   prog->AtomicBuffers = rzalloc_array(prog, gl_active_atomic_buffer,
                                       num_buffers);
   prog->NumAtomicBuffers = num_buffers;

   unsigned i = 0;
   for (unsigned binding = 0;
        binding < ctx->Const.MaxAtomicBufferBindings;
        binding++) {

      /* If the binding was not used, skip.
       */
      if (abs[binding].size == 0)
         continue;

      active_atomic_buffer &ab = abs[binding];
      gl_active_atomic_buffer &mab = prog->AtomicBuffers[i];

      /* Assign buffer-specific fields. */
      mab.Binding = binding;
      mab.MinimumSize = ab.size;
      mab.Uniforms = rzalloc_array(prog->AtomicBuffers, GLuint,
                                   ab.num_counters);
      mab.NumUniforms = ab.num_counters;

      /* Assign counter-specific fields. */
      for (unsigned j = 0; j < ab.num_counters; j++) {
         ir_variable *const var = ab.counters[j].var;
         const unsigned id = ab.counters[j].id;
         gl_uniform_storage *const storage = &prog->UniformStorage[id];

         mab.Uniforms[j] = id;
         if (!var->data.explicit_binding)
            var->data.binding = i;

         storage->atomic_buffer_index = i;
         storage->offset = var->data.atomic.offset;
         storage->array_stride = (var->type->is_array() ?
                                  var->type->element_type()->atomic_size() : 0);
      }

      /* Assign stage-specific fields. */
      for (unsigned j = 0; j < MESA_SHADER_STAGES; ++j)
         mab.StageReferences[j] =
            (ab.stage_references[j] ? GL_TRUE : GL_FALSE);

      i++;
   }

   delete [] abs;
   assert(i == num_buffers);
}
Example #2
0
gen8_generator::gen8_generator(struct brw_context *brw,
                               struct gl_shader_program *shader_prog,
                               struct gl_program *prog,
                               void *mem_ctx)
   : shader_prog(shader_prog), prog(prog), brw(brw), mem_ctx(mem_ctx)
{
   ctx = &brw->ctx;

   memset(&default_state, 0, sizeof(default_state));
   default_state.mask_control = BRW_MASK_ENABLE;

   store_size = 1024;
   store = rzalloc_array(mem_ctx, gen8_instruction, store_size);
   nr_inst = 0;
   next_inst_offset = 0;

   /* Set up the control flow stacks. */
   if_stack_depth = 0;
   if_stack_array_size = 16;
   if_stack = rzalloc_array(mem_ctx, int, if_stack_array_size);

   loop_stack_depth = 0;
   loop_stack_array_size = 16;
   loop_stack = rzalloc_array(mem_ctx, int, loop_stack_array_size);
}
Example #3
0
static void
lower_sampler(nir_tex_instr *instr, const struct gl_shader_program *shader_program,
              gl_shader_stage stage, nir_builder *builder)
{
   if (instr->sampler == NULL)
      return;

   instr->sampler_index = 0;
   unsigned location = instr->sampler->var->data.location;
   unsigned array_elements = 1;
   nir_ssa_def *indirect = NULL;

   builder->cursor = nir_before_instr(&instr->instr);
   calc_sampler_offsets(&instr->sampler->deref, instr, &array_elements,
                        &indirect, builder, &location);

   if (indirect) {
      /* First, we have to resize the array of texture sources */
      nir_tex_src *new_srcs = rzalloc_array(instr, nir_tex_src,
                                            instr->num_srcs + 1);

      for (unsigned i = 0; i < instr->num_srcs; i++) {
         new_srcs[i].src_type = instr->src[i].src_type;
         nir_instr_move_src(&instr->instr, &new_srcs[i].src,
                            &instr->src[i].src);
      }

      ralloc_free(instr->src);
      instr->src = new_srcs;

      /* Now we can go ahead and move the source over to being a
       * first-class texture source.
       */
      instr->src[instr->num_srcs].src_type = nir_tex_src_sampler_offset;
      instr->num_srcs++;
      nir_instr_rewrite_src(&instr->instr,
                            &instr->src[instr->num_srcs - 1].src,
                            nir_src_for_ssa(indirect));

      instr->sampler_array_size = array_elements;
   }

   if (location > shader_program->NumUniformStorage - 1 ||
       !shader_program->UniformStorage[location].opaque[stage].active) {
      assert(!"cannot return a sampler");
      return;
   }

   instr->sampler_index +=
      shader_program->UniformStorage[location].opaque[stage].index;

   instr->sampler = NULL;
}
Example #4
0
void
brw_init_codegen(const struct brw_device_info *devinfo,
                 struct brw_codegen *p, void *mem_ctx)
{
   memset(p, 0, sizeof(*p));

   p->devinfo = devinfo;
   /*
    * Set the initial instruction store array size to 1024, if found that
    * isn't enough, then it will double the store size at brw_next_insn()
    * until out of memory.
    */
   p->store_size = 1024;
   p->store = rzalloc_array(mem_ctx, brw_inst, p->store_size);
   p->nr_insn = 0;
   p->current = p->stack;
   p->compressed = false;
   memset(p->current, 0, sizeof(p->current[0]));

   p->mem_ctx = mem_ctx;

   /* Some defaults?
    */
   brw_set_default_exec_size(p, BRW_EXECUTE_8);
   brw_set_default_mask_control(p, BRW_MASK_ENABLE); /* what does this do? */
   brw_set_default_saturate(p, 0);
   brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);

   /* Set up control flow stack */
   p->if_stack_depth = 0;
   p->if_stack_array_size = 16;
   p->if_stack = rzalloc_array(mem_ctx, int, p->if_stack_array_size);

   p->loop_stack_depth = 0;
   p->loop_stack_array_size = 16;
   p->loop_stack = rzalloc_array(mem_ctx, int, p->loop_stack_array_size);
   p->if_depth_in_loop = rzalloc_array(mem_ctx, int, p->loop_stack_array_size);

   brw_init_compaction_tables(devinfo);
}
Example #5
0
Text *text_create(Font *font, const char *char_string, int size)
{
	const uint8_t *string = (const uint8_t *) char_string;
	Text *text;
	FT_Face face = font->face;
	FT_Glyph *glyph_string;
	FT_Vector *pos;
	FT_BBox bbox;
	FT_Long width, height;
	size_t len;
	float x, y;
	int i;

	if (FT_Set_Char_Size(face, 0, size*64, 0, 0) != 0)
	{
		log_err("Error setting character size\n");
		return NULL;
	}

	text = ralloc(font, Text);
	if (text == NULL)
	{
		log_err("Out of memory\n");
		return NULL;
	}
	text->size = size;
	text->vao = text->vbo = text->texture = 0;
	text->texture_image = NULL;
	text->string = (uint8_t *) ralloc_strdup(text, (const char *) string);
	len = strlen((const char *) string); /* Bytecount */
	/* We allocate more space than necessary for the glyph string, better safe
	 * than sorry. */
	glyph_string = ralloc_array(text, FT_Glyph, len);
	pos = ralloc_array(text, FT_Vector, len);
	if (text->string == NULL || glyph_string == NULL || pos == NULL)
	{
		log_err("Out of memory\n");
		ralloc_free(text);
		return NULL;
	}

	/* The UTF-8 bytestring is converted to an array of glyphs */
	glyphstring_create(face, text, glyph_string, pos);
	/* We determine how big the text will be. This information is used
	 * to compute the size of the texture we'll store the text in */
	compute_glyphstring_bbox(glyph_string, pos, text->num_glyphs, &bbox);

	width  = bbox.xMax - bbox.xMin;
	height = bbox.yMax - bbox.yMin;
	text->width = (width/64 + 0x3) & ~0x3; /* Align to 4 bytes */
	text->height = height/64;
	text->texture_image = rzalloc_array(text, GLubyte,
			text->width * text->height);
	if (text->texture_image == NULL)
	{
		log_err("Out of memory\n");
		for (i = 0; i < text->num_glyphs; i++)
			FT_Done_Glyph(glyph_string[i]);
		ralloc_free(text);
		return NULL;
	}
	/* Now we can render the text to a texture */
	for (i = 0; i < text->num_glyphs; i++)
	{
		FT_Glyph glyph;
		FT_BitmapGlyph bitmap_glyph;
		FT_Vector pen;

		glyph = glyph_string[i];
		pen.x = pos[i].x;
		pen.y = pos[i].y;

		/* Render the new glyph and destroy the old one */
		if (FT_Glyph_To_Bitmap(&glyph, FT_LOAD_TARGET_NORMAL, &pen, 1) != 0)
		{
			log_err("Error rendering glyph to bitmap for character '%c'\n",
					string[i]);
			FT_Done_Glyph(glyph_string[i]);
			continue;
		}

		bitmap_glyph = (FT_BitmapGlyph) glyph;
		blit_glyph(bitmap_glyph, text->texture_image, (pen.x - bbox.xMin)/64,
				(pen.y - bbox.yMin)/64, text->width, text->height);
		FT_Done_Glyph(glyph);
	}
	ralloc_free(glyph_string);
	ralloc_free(pos);

	x = bbox.xMin/64;
	y = bbox.yMin/64;

	/* Why do we add text->width and not bbox.xMax? Because OpenGL wants
	 * textures aligned at 4 bytes. This fixes a very subtle bug with 
	 * a very noticeable effect */
	text->vertex[0].x = x;
	text->vertex[0].y = y;
	text->vertex[0].u = 0;
	text->vertex[0].v = 0;

	text->vertex[1].x = x + text->width;
	text->vertex[1].y = y;
	text->vertex[1].u = 1;
	text->vertex[1].v = 0;

	text->vertex[2].x = x + text->width;
	text->vertex[2].y = y + text->height;
	text->vertex[2].u = 1;
	text->vertex[2].v = 1;

	text->vertex[3].x = x;
	text->vertex[3].y = y + text->height;
	text->vertex[3].u = 0;
	text->vertex[3].v = 1;

	for (i = 0; i < 4; i++)
	{
		text->vertex[i].r = 0;
		text->vertex[i].g = 0;
		text->vertex[i].b = 1;
		text->vertex[i].a = 1;
	}

	return text;
}
Example #6
0
static void
create_buffer_blocks(void *mem_ctx, struct gl_context *ctx,
                     struct gl_shader_program *prog,
                     struct gl_uniform_block **out_blks, unsigned num_blocks,
                     struct hash_table *block_hash, unsigned num_variables,
                     bool create_ubo_blocks)
{
   if (num_blocks == 0) {
      assert(num_variables == 0);
      return;
   }

   assert(num_variables != 0);

   /* Allocate storage to hold all of the information related to uniform
    * blocks that can be queried through the API.
    */
   struct gl_uniform_block *blocks = rzalloc_array(mem_ctx, gl_uniform_block, num_blocks);
   gl_uniform_buffer_variable *variables =
      ralloc_array(blocks, gl_uniform_buffer_variable, num_variables);

   /* Add each variable from each uniform block to the API tracking
    * structures.
    */
   ubo_visitor parcel(blocks, variables, num_variables, prog);

   STATIC_ASSERT(unsigned(GLSL_INTERFACE_PACKING_STD140)
                 == unsigned(ubo_packing_std140));
   STATIC_ASSERT(unsigned(GLSL_INTERFACE_PACKING_SHARED)
                 == unsigned(ubo_packing_shared));
   STATIC_ASSERT(unsigned(GLSL_INTERFACE_PACKING_PACKED)
                 == unsigned(ubo_packing_packed));
   STATIC_ASSERT(unsigned(GLSL_INTERFACE_PACKING_STD430)
                 == unsigned(ubo_packing_std430));

   unsigned i = 0;
   struct hash_entry *entry;
   hash_table_foreach (block_hash, entry) {
      const struct link_uniform_block_active *const b =
         (const struct link_uniform_block_active *) entry->data;
      const glsl_type *block_type = b->type;

      if ((create_ubo_blocks && !b->is_shader_storage) ||
          (!create_ubo_blocks && b->is_shader_storage)) {

         if (b->array != NULL) {
            unsigned binding_offset = 0;
            char *name = ralloc_strdup(NULL,
                                       block_type->without_array()->name);
            size_t name_length = strlen(name);

            assert(b->has_instance_name);
            process_block_array(b->array, &name, name_length, blocks, &parcel,
                                variables, b, &i, &binding_offset, ctx, prog);
            ralloc_free(name);
         } else {
            blocks[i].Name = ralloc_strdup(blocks, block_type->name);
            blocks[i].Uniforms = &variables[parcel.index];
            blocks[i].Binding = (b->has_binding) ? b->binding : 0;
            blocks[i].UniformBufferSize = 0;
            blocks[i]._Packing =
               gl_uniform_block_packing(block_type->interface_packing);
            blocks[i]._RowMajor = block_type->get_interface_row_major();

            parcel.process(block_type,
                           b->has_instance_name ? block_type->name : "");

            blocks[i].UniformBufferSize = parcel.buffer_size;

            /* Check SSBO size is lower than maximum supported size for SSBO
             */
            if (b->is_shader_storage &&
                parcel.buffer_size > ctx->Const.MaxShaderStorageBlockSize) {
               linker_error(prog, "shader storage block `%s' has size %d, "
                            "which is larger than than the maximum allowed (%d)",
                            block_type->name, parcel.buffer_size,
                            ctx->Const.MaxShaderStorageBlockSize);
            }
            blocks[i].NumUniforms = (unsigned)(ptrdiff_t)
               (&variables[parcel.index] - blocks[i].Uniforms);
            i++;
         }
      }
   }

   *out_blks = blocks;

   assert(parcel.index == num_variables);
}
Example #7
0
void
link_assign_atomic_counter_resources(struct gl_context *ctx,
                                     struct gl_shader_program *prog)
{
   unsigned num_buffers;
   unsigned num_atomic_buffers[MESA_SHADER_STAGES] = {};
   active_atomic_buffer *abs =
      find_active_atomic_counters(ctx, prog, &num_buffers);

   prog->AtomicBuffers = rzalloc_array(prog, gl_active_atomic_buffer,
                                       num_buffers);
   prog->NumAtomicBuffers = num_buffers;

   unsigned i = 0;
   for (unsigned binding = 0;
        binding < ctx->Const.MaxAtomicBufferBindings;
        binding++) {

      /* If the binding was not used, skip.
       */
      if (abs[binding].size == 0)
         continue;

      active_atomic_buffer &ab = abs[binding];
      gl_active_atomic_buffer &mab = prog->AtomicBuffers[i];

      /* Assign buffer-specific fields. */
      mab.Binding = binding;
      mab.MinimumSize = ab.size;
      mab.Uniforms = rzalloc_array(prog->AtomicBuffers, GLuint,
                                   ab.num_uniforms);
      mab.NumUniforms = ab.num_uniforms;

      /* Assign counter-specific fields. */
      for (unsigned j = 0; j < ab.num_uniforms; j++) {
         ir_variable *const var = ab.uniforms[j].var;
         gl_uniform_storage *const storage =
            &prog->UniformStorage[ab.uniforms[j].uniform_loc];

         mab.Uniforms[j] = ab.uniforms[j].uniform_loc;
         if (!var->data.explicit_binding)
            var->data.binding = i;

         storage->atomic_buffer_index = i;
         storage->offset = var->data.offset;
         storage->array_stride = (var->type->is_array() ?
                                  var->type->without_array()->atomic_size() : 0);
         if (!var->type->is_matrix())
            storage->matrix_stride = 0;
      }

      /* Assign stage-specific fields. */
      for (unsigned j = 0; j < MESA_SHADER_STAGES; ++j) {
         if (ab.stage_counter_references[j]) {
            mab.StageReferences[j] = GL_TRUE;
            num_atomic_buffers[j]++;
         } else {
            mab.StageReferences[j] = GL_FALSE;
         }
      }

      i++;
   }

   /* Store a list pointers to atomic buffers per stage and store the index
    * to the intra-stage buffer list in uniform storage.
    */
   for (unsigned j = 0; j < MESA_SHADER_STAGES; ++j) {
      if (prog->_LinkedShaders[j] && num_atomic_buffers[j] > 0) {
         prog->_LinkedShaders[j]->NumAtomicBuffers = num_atomic_buffers[j];
         prog->_LinkedShaders[j]->AtomicBuffers =
            rzalloc_array(prog, gl_active_atomic_buffer *,
                          num_atomic_buffers[j]);

         unsigned intra_stage_idx = 0;
         for (unsigned i = 0; i < num_buffers; i++) {
            struct gl_active_atomic_buffer *atomic_buffer =
               &prog->AtomicBuffers[i];
            if (atomic_buffer->StageReferences[j]) {
               prog->_LinkedShaders[j]->AtomicBuffers[intra_stage_idx] =
                  atomic_buffer;

               for (unsigned u = 0; u < atomic_buffer->NumUniforms; u++) {
                  prog->UniformStorage[atomic_buffer->Uniforms[u]].opaque[j].index =
                     intra_stage_idx;
                  prog->UniformStorage[atomic_buffer->Uniforms[u]].opaque[j].active =
                     true;
               }

               intra_stage_idx++;
            }
         }
      }
   }
Example #8
0
static void
create_buffer_blocks(void *mem_ctx, struct gl_context *ctx,
                     struct gl_shader_program *prog,
                     struct gl_uniform_block **out_blks, unsigned num_blocks,
                     struct hash_table *block_hash, unsigned num_variables,
                     bool create_ubo_blocks)
{
   if (num_blocks == 0) {
      assert(num_variables == 0);
      return;
   }

   assert(num_variables != 0);

   /* Allocate storage to hold all of the information related to uniform
    * blocks that can be queried through the API.
    */
   struct gl_uniform_block *blocks =
      rzalloc_array(mem_ctx, gl_uniform_block, num_blocks);
   gl_uniform_buffer_variable *variables =
      ralloc_array(blocks, gl_uniform_buffer_variable, num_variables);

   /* Add each variable from each uniform block to the API tracking
    * structures.
    */
   ubo_visitor parcel(blocks, variables, num_variables, prog,
                      ctx->Const.UseSTD430AsDefaultPacking);

   unsigned i = 0;
   struct hash_entry *entry;
   hash_table_foreach (block_hash, entry) {
      const struct link_uniform_block_active *const b =
         (const struct link_uniform_block_active *) entry->data;
      const glsl_type *block_type = b->type;

      if ((create_ubo_blocks && !b->is_shader_storage) ||
          (!create_ubo_blocks && b->is_shader_storage)) {

         unsigned binding_offset = 0;
         if (b->array != NULL) {
            char *name = ralloc_strdup(NULL,
                                       block_type->without_array()->name);
            size_t name_length = strlen(name);

            assert(b->has_instance_name);
            process_block_array(b->array, &name, name_length, blocks, &parcel,
                                variables, b, &i, &binding_offset, ctx, prog,
                                i);
            ralloc_free(name);
         } else {
            process_block_array_leaf(block_type->name, blocks, &parcel,
                                     variables, b, &i, &binding_offset,
                                     0, ctx, prog);
         }
      }
   }

   *out_blks = blocks;

   assert(parcel.index == num_variables);
}