static void lower_instr(nir_intrinsic_instr *instr, lower_atomic_state *state) { nir_intrinsic_op op; switch (instr->intrinsic) { case nir_intrinsic_atomic_counter_read_var: op = nir_intrinsic_atomic_counter_read; break; case nir_intrinsic_atomic_counter_inc_var: op = nir_intrinsic_atomic_counter_inc; break; case nir_intrinsic_atomic_counter_dec_var: op = nir_intrinsic_atomic_counter_dec; break; default: return; } if (instr->variables[0]->var->data.mode != nir_var_uniform && instr->variables[0]->var->data.mode != nir_var_shader_storage) return; /* atomics passed as function arguments can't be lowered */ void *mem_ctx = ralloc_parent(instr); unsigned uniform_loc = instr->variables[0]->var->data.location; nir_intrinsic_instr *new_instr = nir_intrinsic_instr_create(mem_ctx, op); new_instr->const_index[0] = state->shader_program->UniformStorage[uniform_loc].opaque[state->shader->stage].index; nir_load_const_instr *offset_const = nir_load_const_instr_create(mem_ctx, 1); offset_const->value.u[0] = instr->variables[0]->var->data.atomic.offset; nir_instr_insert_before(&instr->instr, &offset_const->instr); nir_ssa_def *offset_def = &offset_const->def; nir_deref *tail = &instr->variables[0]->deref; while (tail->child != NULL) { assert(tail->child->deref_type == nir_deref_type_array); nir_deref_array *deref_array = nir_deref_as_array(tail->child); tail = tail->child; unsigned child_array_elements = tail->child != NULL ? glsl_get_aoa_size(tail->type) : 1; offset_const->value.u[0] += deref_array->base_offset * child_array_elements * ATOMIC_COUNTER_SIZE; if (deref_array->deref_array_type == nir_deref_array_type_indirect) { nir_load_const_instr *atomic_counter_size = nir_load_const_instr_create(mem_ctx, 1); atomic_counter_size->value.u[0] = child_array_elements * ATOMIC_COUNTER_SIZE; nir_instr_insert_before(&instr->instr, &atomic_counter_size->instr); nir_alu_instr *mul = nir_alu_instr_create(mem_ctx, nir_op_imul); nir_ssa_dest_init(&mul->instr, &mul->dest.dest, 1, NULL); mul->dest.write_mask = 0x1; nir_src_copy(&mul->src[0].src, &deref_array->indirect, mul); mul->src[1].src.is_ssa = true; mul->src[1].src.ssa = &atomic_counter_size->def; nir_instr_insert_before(&instr->instr, &mul->instr); nir_alu_instr *add = nir_alu_instr_create(mem_ctx, nir_op_iadd); nir_ssa_dest_init(&add->instr, &add->dest.dest, 1, NULL); add->dest.write_mask = 0x1; add->src[0].src.is_ssa = true; add->src[0].src.ssa = &mul->dest.dest.ssa; add->src[1].src.is_ssa = true; add->src[1].src.ssa = offset_def; nir_instr_insert_before(&instr->instr, &add->instr); offset_def = &add->dest.dest.ssa; } } new_instr->src[0].is_ssa = true; new_instr->src[0].ssa = offset_def; if (instr->dest.is_ssa) { nir_ssa_dest_init(&new_instr->instr, &new_instr->dest, instr->dest.ssa.num_components, NULL); nir_ssa_def_rewrite_uses(&instr->dest.ssa, nir_src_for_ssa(&new_instr->dest.ssa)); } else { nir_dest_copy(&new_instr->dest, &instr->dest, mem_ctx); } nir_instr_insert_before(&instr->instr, &new_instr->instr); nir_instr_remove(&instr->instr); }
/** * Try to mark a portion of the given varying as used. Caller must ensure * that the variable represents a shader input or output. * * If the index can't be interpreted as a constant, or some other problem * occurs, then nothing will be marked and false will be returned. */ static bool try_mask_partial_io(nir_shader *shader, nir_variable *var, nir_deref_instr *deref, bool is_output_read) { const struct glsl_type *type = var->type; if (nir_is_per_vertex_io(var, shader->info.stage)) { assert(glsl_type_is_array(type)); type = glsl_get_array_element(type); } /* The code below only handles: * * - Indexing into matrices * - Indexing into arrays of (arrays, matrices, vectors, or scalars) * * For now, we just give up if we see varying structs and arrays of structs * here marking the entire variable as used. */ if (!(glsl_type_is_matrix(type) || (glsl_type_is_array(type) && !var->data.compact && (glsl_type_is_numeric(glsl_without_array(type)) || glsl_type_is_boolean(glsl_without_array(type)))))) { /* If we don't know how to handle this case, give up and let the * caller mark the whole variable as used. */ return false; } unsigned offset = get_io_offset(deref, false); if (offset == -1) return false; unsigned num_elems; unsigned elem_width = 1; unsigned mat_cols = 1; if (glsl_type_is_array(type)) { num_elems = glsl_get_aoa_size(type); if (glsl_type_is_matrix(glsl_without_array(type))) mat_cols = glsl_get_matrix_columns(glsl_without_array(type)); } else { num_elems = glsl_get_matrix_columns(type); } /* double element width for double types that takes two slots */ if (glsl_type_is_dual_slot(glsl_without_array(type))) elem_width *= 2; if (offset >= num_elems * elem_width * mat_cols) { /* Constant index outside the bounds of the matrix/array. This could * arise as a result of constant folding of a legal GLSL program. * * Even though the spec says that indexing outside the bounds of a * matrix/array results in undefined behaviour, we don't want to pass * out-of-range values to set_io_mask() (since this could result in * slots that don't exist being marked as used), so just let the caller * mark the whole variable as used. */ return false; } set_io_mask(shader, var, offset, elem_width, is_output_read); return true; }