static void lower_load_store(nir_builder *b, nir_intrinsic_instr *intrin, glsl_type_size_align_func size_align) { b->cursor = nir_before_instr(&intrin->instr); nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]); nir_variable *var = nir_deref_instr_get_variable(deref); nir_ssa_def *offset = nir_iadd_imm(b, nir_build_deref_offset(b, deref, size_align), var->data.location); unsigned align, UNUSED size; size_align(deref->type, &size, &align); if (intrin->intrinsic == nir_intrinsic_load_deref) { nir_intrinsic_instr *load = nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_scratch); load->num_components = intrin->num_components; load->src[0] = nir_src_for_ssa(offset); nir_intrinsic_set_align(load, align, 0); nir_ssa_dest_init(&load->instr, &load->dest, intrin->dest.ssa.num_components, intrin->dest.ssa.bit_size, NULL); nir_builder_instr_insert(b, &load->instr); nir_ssa_def *value = &load->dest.ssa; if (glsl_type_is_boolean(deref->type)) value = nir_b2i32(b, value); nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(&load->dest.ssa)); } else { assert(intrin->intrinsic == nir_intrinsic_store_deref); assert(intrin->src[1].is_ssa); nir_ssa_def *value = intrin->src[1].ssa; if (glsl_type_is_boolean(deref->type)) value = nir_i2b(b, value); nir_intrinsic_instr *store = nir_intrinsic_instr_create(b->shader, nir_intrinsic_store_scratch); store->num_components = intrin->num_components; store->src[0] = nir_src_for_ssa(value); store->src[1] = nir_src_for_ssa(offset); nir_intrinsic_set_write_mask(store, nir_intrinsic_write_mask(intrin)); nir_intrinsic_set_align(store, align, 0); nir_builder_instr_insert(b, &store->instr); } nir_instr_remove(&intrin->instr); nir_deref_instr_remove_if_unused(deref); }
/** * Try to mark a portion of the given varying as used. Caller must ensure * that the variable represents a shader input or output. * * If the index can't be interpreted as a constant, or some other problem * occurs, then nothing will be marked and false will be returned. */ static bool try_mask_partial_io(nir_shader *shader, nir_variable *var, nir_deref_instr *deref, bool is_output_read) { const struct glsl_type *type = var->type; if (nir_is_per_vertex_io(var, shader->info.stage)) { assert(glsl_type_is_array(type)); type = glsl_get_array_element(type); } /* The code below only handles: * * - Indexing into matrices * - Indexing into arrays of (arrays, matrices, vectors, or scalars) * * For now, we just give up if we see varying structs and arrays of structs * here marking the entire variable as used. */ if (!(glsl_type_is_matrix(type) || (glsl_type_is_array(type) && !var->data.compact && (glsl_type_is_numeric(glsl_without_array(type)) || glsl_type_is_boolean(glsl_without_array(type)))))) { /* If we don't know how to handle this case, give up and let the * caller mark the whole variable as used. */ return false; } unsigned offset = get_io_offset(deref, false); if (offset == -1) return false; unsigned num_elems; unsigned elem_width = 1; unsigned mat_cols = 1; if (glsl_type_is_array(type)) { num_elems = glsl_get_aoa_size(type); if (glsl_type_is_matrix(glsl_without_array(type))) mat_cols = glsl_get_matrix_columns(glsl_without_array(type)); } else { num_elems = glsl_get_matrix_columns(type); } /* double element width for double types that takes two slots */ if (glsl_type_is_dual_slot(glsl_without_array(type))) elem_width *= 2; if (offset >= num_elems * elem_width * mat_cols) { /* Constant index outside the bounds of the matrix/array. This could * arise as a result of constant folding of a legal GLSL program. * * Even though the spec says that indexing outside the bounds of a * matrix/array results in undefined behaviour, we don't want to pass * out-of-range values to set_io_mask() (since this could result in * slots that don't exist being marked as used), so just let the caller * mark the whole variable as used. */ return false; } set_io_mask(shader, var, offset, elem_width, is_output_read); return true; }