extern "C" int _mesa_get_sampler_uniform_value(class ir_dereference *sampler, struct gl_shader_program *shader_program, const struct gl_program *prog) { get_sampler_name getname(sampler, shader_program); GLuint shader = _mesa_program_target_to_index(prog->Target); sampler->accept(&getname); unsigned location; if (!shader_program->UniformHash->get(location, getname.name)) { linker_error(shader_program, "failed to find sampler named %s.\n", getname.name); return 0; } if (!shader_program->UniformStorage[location].sampler[shader].active) { assert(0 && "cannot return a sampler"); linker_error(shader_program, "cannot return a sampler named %s, because it is not " "used in this shader stage. This is a driver bug.\n", getname.name); return 0; } return shader_program->UniformStorage[location].sampler[shader].index + getname.offset; }
void link_check_atomic_counter_resources(struct gl_context *ctx, struct gl_shader_program *prog) { unsigned num_buffers; active_atomic_buffer *const abs = find_active_atomic_counters(ctx, prog, &num_buffers); unsigned atomic_counters[MESA_SHADER_STAGES] = {}; unsigned atomic_buffers[MESA_SHADER_STAGES] = {}; unsigned total_atomic_counters = 0; unsigned total_atomic_buffers = 0; /* Sum the required resources. Note that this counts buffers and * counters referenced by several shader stages multiple times * against the combined limit -- That's the behavior the spec * requires. */ for (unsigned i = 0; i < ctx->Const.MaxAtomicBufferBindings; i++) { if (abs[i].size == 0) continue; for (unsigned j = 0; j < MESA_SHADER_STAGES; ++j) { const unsigned n = abs[i].stage_references[j]; if (n) { atomic_counters[j] += n; total_atomic_counters += n; atomic_buffers[j]++; total_atomic_buffers++; } } } /* Check that they are within the supported limits. */ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) { if (atomic_counters[i] > ctx->Const.Program[i].MaxAtomicCounters) linker_error(prog, "Too many %s shader atomic counters", _mesa_shader_stage_to_string(i)); if (atomic_buffers[i] > ctx->Const.Program[i].MaxAtomicBuffers) linker_error(prog, "Too many %s shader atomic counter buffers", _mesa_shader_stage_to_string(i)); } if (total_atomic_counters > ctx->Const.MaxCombinedAtomicCounters) linker_error(prog, "Too many combined atomic counters"); if (total_atomic_buffers > ctx->Const.MaxCombinedAtomicBuffers) linker_error(prog, "Too many combined atomic buffers"); delete [] abs; }
static void process_block_array(struct uniform_block_array_elements *ub_array, char **name, size_t name_length, gl_uniform_block *blocks, ubo_visitor *parcel, gl_uniform_buffer_variable *variables, const struct link_uniform_block_active *const b, unsigned *block_index, unsigned *binding_offset, struct gl_context *ctx, struct gl_shader_program *prog) { if (ub_array) { for (unsigned j = 0; j < ub_array->num_array_elements; j++) { size_t new_length = name_length; /* Append the subscript to the current variable name */ ralloc_asprintf_rewrite_tail(name, &new_length, "[%u]", ub_array->array_elements[j]); process_block_array(ub_array->array, name, new_length, blocks, parcel, variables, b, block_index, binding_offset, ctx, prog); } } else { unsigned i = *block_index; const glsl_type *type = b->type->without_array(); blocks[i].Name = ralloc_strdup(blocks, *name); blocks[i].Uniforms = &variables[(*parcel).index]; /* The GL_ARB_shading_language_420pack spec says: * * "If the binding identifier is used with a uniform block * instanced as an array then the first element of the array * takes the specified block binding and each subsequent * element takes the next consecutive uniform block binding * point." */ blocks[i].Binding = (b->has_binding) ? b->binding + *binding_offset : 0; blocks[i].UniformBufferSize = 0; blocks[i]._Packing = gl_uniform_block_packing(type->interface_packing); parcel->process(type, blocks[i].Name); blocks[i].UniformBufferSize = parcel->buffer_size; /* Check SSBO size is lower than maximum supported size for SSBO */ if (b->is_shader_storage && parcel->buffer_size > ctx->Const.MaxShaderStorageBlockSize) { linker_error(prog, "shader storage block `%s' has size %d, " "which is larger than than the maximum allowed (%d)", b->type->name, parcel->buffer_size, ctx->Const.MaxShaderStorageBlockSize); } blocks[i].NumUniforms = (unsigned)(ptrdiff_t)(&variables[parcel->index] - blocks[i].Uniforms); *block_index = *block_index + 1; *binding_offset = *binding_offset + 1; } }
static unsigned get_sampler_index(struct gl_shader_program *shader_program, const char *name, const struct gl_program *prog) { GLuint shader = _mesa_program_enum_to_shader_stage(prog->Target); unsigned location; if (!shader_program->UniformHash->get(location, name)) { linker_error(shader_program, "failed to find sampler named %s.\n", name); return 0; } if (!shader_program->UniformStorage[location].sampler[shader].active) { assert(0 && "cannot return a sampler"); linker_error(shader_program, "cannot return a sampler named %s, because it is not " "used in this shader stage. This is a driver bug.\n", name); return 0; } return shader_program->UniformStorage[location].sampler[shader].index; }
static void process_block_array_leaf(const char *name, gl_uniform_block *blocks, ubo_visitor *parcel, gl_uniform_buffer_variable *variables, const struct link_uniform_block_active *const b, unsigned *block_index, unsigned *binding_offset, unsigned linearized_index, struct gl_context *ctx, struct gl_shader_program *prog) { unsigned i = *block_index; const glsl_type *type = b->type->without_array(); blocks[i].Name = ralloc_strdup(blocks, name); blocks[i].Uniforms = &variables[(*parcel).index]; /* The ARB_shading_language_420pack spec says: * * If the binding identifier is used with a uniform block instanced as * an array then the first element of the array takes the specified * block binding and each subsequent element takes the next consecutive * uniform block binding point. */ blocks[i].Binding = (b->has_binding) ? b->binding + *binding_offset : 0; blocks[i].UniformBufferSize = 0; blocks[i]._Packing = glsl_interface_packing(type->interface_packing); blocks[i]._RowMajor = type->get_interface_row_major(); blocks[i].linearized_array_index = linearized_index; parcel->process(type, b->has_instance_name ? blocks[i].Name : ""); blocks[i].UniformBufferSize = parcel->buffer_size; /* Check SSBO size is lower than maximum supported size for SSBO */ if (b->is_shader_storage && parcel->buffer_size > ctx->Const.MaxShaderStorageBlockSize) { linker_error(prog, "shader storage block `%s' has size %d, " "which is larger than than the maximum allowed (%d)", b->type->name, parcel->buffer_size, ctx->Const.MaxShaderStorageBlockSize); } blocks[i].NumUniforms = (unsigned)(ptrdiff_t)(&variables[parcel->index] - blocks[i].Uniforms); *block_index = *block_index + 1; *binding_offset = *binding_offset + 1; }
void link_uniform_blocks(void *mem_ctx, struct gl_context *ctx, struct gl_shader_program *prog, struct gl_linked_shader *shader, struct gl_uniform_block **ubo_blocks, unsigned *num_ubo_blocks, struct gl_uniform_block **ssbo_blocks, unsigned *num_ssbo_blocks) { /* This hash table will track all of the uniform blocks that have been * encountered. Since blocks with the same block-name must be the same, * the hash is organized by block-name. */ struct hash_table *block_hash = _mesa_hash_table_create(mem_ctx, _mesa_key_hash_string, _mesa_key_string_equal); if (block_hash == NULL) { _mesa_error_no_memory(__func__); linker_error(prog, "out of memory\n"); return; } /* Determine which uniform blocks are active. */ link_uniform_block_active_visitor v(mem_ctx, block_hash, prog); visit_list_elements(&v, shader->ir); /* Count the number of active uniform blocks. Count the total number of * active slots in those uniform blocks. */ unsigned num_ubo_variables = 0; unsigned num_ssbo_variables = 0; count_block_size block_size; struct hash_entry *entry; hash_table_foreach (block_hash, entry) { struct link_uniform_block_active *const b = (struct link_uniform_block_active *) entry->data; assert((b->array != NULL) == b->type->is_array()); if (b->array != NULL && (b->type->without_array()->interface_packing == GLSL_INTERFACE_PACKING_PACKED)) { b->type = resize_block_array(b->type, b->array); b->var->type = b->type; } block_size.num_active_uniforms = 0; block_size.process(b->type->without_array(), ""); if (b->array != NULL) { unsigned aoa_size = b->type->arrays_of_arrays_size(); if (b->is_shader_storage) { *num_ssbo_blocks += aoa_size; num_ssbo_variables += aoa_size * block_size.num_active_uniforms; } else { *num_ubo_blocks += aoa_size; num_ubo_variables += aoa_size * block_size.num_active_uniforms; } } else { if (b->is_shader_storage) { (*num_ssbo_blocks)++; num_ssbo_variables += block_size.num_active_uniforms; } else { (*num_ubo_blocks)++; num_ubo_variables += block_size.num_active_uniforms; } } } create_buffer_blocks(mem_ctx, ctx, prog, ubo_blocks, *num_ubo_blocks, block_hash, num_ubo_variables, true); create_buffer_blocks(mem_ctx, ctx, prog, ssbo_blocks, *num_ssbo_blocks, block_hash, num_ssbo_variables, false); _mesa_hash_table_destroy(block_hash, NULL); }
static void create_buffer_blocks(void *mem_ctx, struct gl_context *ctx, struct gl_shader_program *prog, struct gl_uniform_block **out_blks, unsigned num_blocks, struct hash_table *block_hash, unsigned num_variables, bool create_ubo_blocks) { if (num_blocks == 0) { assert(num_variables == 0); return; } assert(num_variables != 0); /* Allocate storage to hold all of the information related to uniform * blocks that can be queried through the API. */ struct gl_uniform_block *blocks = rzalloc_array(mem_ctx, gl_uniform_block, num_blocks); gl_uniform_buffer_variable *variables = ralloc_array(blocks, gl_uniform_buffer_variable, num_variables); /* Add each variable from each uniform block to the API tracking * structures. */ ubo_visitor parcel(blocks, variables, num_variables, prog); STATIC_ASSERT(unsigned(GLSL_INTERFACE_PACKING_STD140) == unsigned(ubo_packing_std140)); STATIC_ASSERT(unsigned(GLSL_INTERFACE_PACKING_SHARED) == unsigned(ubo_packing_shared)); STATIC_ASSERT(unsigned(GLSL_INTERFACE_PACKING_PACKED) == unsigned(ubo_packing_packed)); STATIC_ASSERT(unsigned(GLSL_INTERFACE_PACKING_STD430) == unsigned(ubo_packing_std430)); unsigned i = 0; struct hash_entry *entry; hash_table_foreach (block_hash, entry) { const struct link_uniform_block_active *const b = (const struct link_uniform_block_active *) entry->data; const glsl_type *block_type = b->type; if ((create_ubo_blocks && !b->is_shader_storage) || (!create_ubo_blocks && b->is_shader_storage)) { if (b->array != NULL) { unsigned binding_offset = 0; char *name = ralloc_strdup(NULL, block_type->without_array()->name); size_t name_length = strlen(name); assert(b->has_instance_name); process_block_array(b->array, &name, name_length, blocks, &parcel, variables, b, &i, &binding_offset, ctx, prog); ralloc_free(name); } else { blocks[i].Name = ralloc_strdup(blocks, block_type->name); blocks[i].Uniforms = &variables[parcel.index]; blocks[i].Binding = (b->has_binding) ? b->binding : 0; blocks[i].UniformBufferSize = 0; blocks[i]._Packing = gl_uniform_block_packing(block_type->interface_packing); blocks[i]._RowMajor = block_type->get_interface_row_major(); parcel.process(block_type, b->has_instance_name ? block_type->name : ""); blocks[i].UniformBufferSize = parcel.buffer_size; /* Check SSBO size is lower than maximum supported size for SSBO */ if (b->is_shader_storage && parcel.buffer_size > ctx->Const.MaxShaderStorageBlockSize) { linker_error(prog, "shader storage block `%s' has size %d, " "which is larger than than the maximum allowed (%d)", block_type->name, parcel.buffer_size, ctx->Const.MaxShaderStorageBlockSize); } blocks[i].NumUniforms = (unsigned)(ptrdiff_t) (&variables[parcel.index] - blocks[i].Uniforms); i++; } } } *out_blks = blocks; assert(parcel.index == num_variables); }