static bool nir_lower_io_block(nir_block *block, void *void_state) { struct lower_io_state *state = void_state; nir_foreach_instr_safe(block, instr) { if (instr->type != nir_instr_type_intrinsic) continue; nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr); switch (intrin->intrinsic) { case nir_intrinsic_load_var: { nir_variable_mode mode = intrin->variables[0]->var->data.mode; if (mode != nir_var_shader_in && mode != nir_var_uniform) continue; bool has_indirect = deref_has_indirect(intrin->variables[0]); /* Figure out the opcode */ nir_intrinsic_op load_op; switch (mode) { case nir_var_shader_in: load_op = has_indirect ? nir_intrinsic_load_input_indirect : nir_intrinsic_load_input; break; case nir_var_uniform: load_op = has_indirect ? nir_intrinsic_load_uniform_indirect : nir_intrinsic_load_uniform; break; default: unreachable("Unknown variable mode"); } nir_intrinsic_instr *load = nir_intrinsic_instr_create(state->mem_ctx, load_op); load->num_components = intrin->num_components; nir_src indirect; unsigned offset = get_io_offset(intrin->variables[0], &intrin->instr, &indirect, state); offset += intrin->variables[0]->var->data.driver_location; load->const_index[0] = offset; if (has_indirect) load->src[0] = indirect; if (intrin->dest.is_ssa) { nir_ssa_dest_init(&load->instr, &load->dest, intrin->num_components, NULL); nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(&load->dest.ssa), state->mem_ctx); } else { nir_dest_copy(&load->dest, &intrin->dest, state->mem_ctx); } nir_instr_insert_before(&intrin->instr, &load->instr); nir_instr_remove(&intrin->instr); break; } case nir_intrinsic_store_var: { if (intrin->variables[0]->var->data.mode != nir_var_shader_out) continue; bool has_indirect = deref_has_indirect(intrin->variables[0]); nir_intrinsic_op store_op; if (has_indirect) { store_op = nir_intrinsic_store_output_indirect; } else { store_op = nir_intrinsic_store_output; } nir_intrinsic_instr *store = nir_intrinsic_instr_create(state->mem_ctx, store_op); store->num_components = intrin->num_components; nir_src indirect; unsigned offset = get_io_offset(intrin->variables[0], &intrin->instr, &indirect, state); offset += intrin->variables[0]->var->data.driver_location; store->const_index[0] = offset; nir_src_copy(&store->src[0], &intrin->src[0], state->mem_ctx); if (has_indirect) store->src[1] = indirect; nir_instr_insert_before(&intrin->instr, &store->instr); nir_instr_remove(&intrin->instr); break; } default: break; } } return true; }
/** * Try to mark a portion of the given varying as used. Caller must ensure * that the variable represents a shader input or output. * * If the index can't be interpreted as a constant, or some other problem * occurs, then nothing will be marked and false will be returned. */ static bool try_mask_partial_io(nir_shader *shader, nir_variable *var, nir_deref_instr *deref, bool is_output_read) { const struct glsl_type *type = var->type; if (nir_is_per_vertex_io(var, shader->info.stage)) { assert(glsl_type_is_array(type)); type = glsl_get_array_element(type); } /* The code below only handles: * * - Indexing into matrices * - Indexing into arrays of (arrays, matrices, vectors, or scalars) * * For now, we just give up if we see varying structs and arrays of structs * here marking the entire variable as used. */ if (!(glsl_type_is_matrix(type) || (glsl_type_is_array(type) && !var->data.compact && (glsl_type_is_numeric(glsl_without_array(type)) || glsl_type_is_boolean(glsl_without_array(type)))))) { /* If we don't know how to handle this case, give up and let the * caller mark the whole variable as used. */ return false; } unsigned offset = get_io_offset(deref, false); if (offset == -1) return false; unsigned num_elems; unsigned elem_width = 1; unsigned mat_cols = 1; if (glsl_type_is_array(type)) { num_elems = glsl_get_aoa_size(type); if (glsl_type_is_matrix(glsl_without_array(type))) mat_cols = glsl_get_matrix_columns(glsl_without_array(type)); } else { num_elems = glsl_get_matrix_columns(type); } /* double element width for double types that takes two slots */ if (glsl_type_is_dual_slot(glsl_without_array(type))) elem_width *= 2; if (offset >= num_elems * elem_width * mat_cols) { /* Constant index outside the bounds of the matrix/array. This could * arise as a result of constant folding of a legal GLSL program. * * Even though the spec says that indexing outside the bounds of a * matrix/array results in undefined behaviour, we don't want to pass * out-of-range values to set_io_mask() (since this could result in * slots that don't exist being marked as used), so just let the caller * mark the whole variable as used. */ return false; } set_io_mask(shader, var, offset, elem_width, is_output_read); return true; }