static void lower_load_const_instr_scalar(nir_load_const_instr *lower) { if (lower->def.num_components == 1) return; nir_builder b; nir_builder_init(&b, nir_cf_node_get_function(&lower->instr.block->cf_node)); b.cursor = nir_before_instr(&lower->instr); /* Emit the individual loads. */ nir_ssa_def *loads[4]; for (unsigned i = 0; i < lower->def.num_components; i++) { nir_load_const_instr *load_comp = nir_load_const_instr_create(b.shader, 1); load_comp->value.u[0] = lower->value.u[i]; nir_builder_instr_insert(&b, &load_comp->instr); loads[i] = &load_comp->def; } /* Batch things back together into a vector. */ nir_ssa_def *vec = nir_vec(&b, loads, lower->def.num_components); /* Replace the old load with a reference to our reconstructed vector. */ nir_ssa_def_rewrite_uses(&lower->def, nir_src_for_ssa(vec)); nir_instr_remove(&lower->instr); }
static bool constant_fold_alu_instr(nir_alu_instr *instr, void *mem_ctx) { nir_const_value src[4]; if (!instr->dest.dest.is_ssa) return false; for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) { if (!instr->src[i].src.is_ssa) return false; nir_instr *src_instr = instr->src[i].src.ssa->parent_instr; if (src_instr->type != nir_instr_type_load_const) return false; nir_load_const_instr* load_const = nir_instr_as_load_const(src_instr); for (unsigned j = 0; j < nir_ssa_alu_instr_src_components(instr, i); j++) { src[i].u[j] = load_const->value.u[instr->src[i].swizzle[j]]; } /* We shouldn't have any source modifiers in the optimization loop. */ assert(!instr->src[i].abs && !instr->src[i].negate); } /* We shouldn't have any saturate modifiers in the optimization loop. */ assert(!instr->dest.saturate); nir_const_value dest = nir_eval_const_opcode(instr->op, instr->dest.dest.ssa.num_components, src); nir_load_const_instr *new_instr = nir_load_const_instr_create(mem_ctx, instr->dest.dest.ssa.num_components); new_instr->value = dest; nir_instr_insert_before(&instr->instr, &new_instr->instr); nir_ssa_def_rewrite_uses(&instr->dest.dest.ssa, nir_src_for_ssa(&new_instr->def), mem_ctx); nir_instr_remove(&instr->instr); ralloc_free(instr); return true; }
static void lower_instr(nir_intrinsic_instr *instr, lower_atomic_state *state) { nir_intrinsic_op op; switch (instr->intrinsic) { case nir_intrinsic_atomic_counter_read_var: op = nir_intrinsic_atomic_counter_read; break; case nir_intrinsic_atomic_counter_inc_var: op = nir_intrinsic_atomic_counter_inc; break; case nir_intrinsic_atomic_counter_dec_var: op = nir_intrinsic_atomic_counter_dec; break; default: return; } if (instr->variables[0]->var->data.mode != nir_var_uniform && instr->variables[0]->var->data.mode != nir_var_shader_storage) return; /* atomics passed as function arguments can't be lowered */ void *mem_ctx = ralloc_parent(instr); unsigned uniform_loc = instr->variables[0]->var->data.location; nir_intrinsic_instr *new_instr = nir_intrinsic_instr_create(mem_ctx, op); new_instr->const_index[0] = state->shader_program->UniformStorage[uniform_loc].opaque[state->shader->stage].index; nir_load_const_instr *offset_const = nir_load_const_instr_create(mem_ctx, 1); offset_const->value.u[0] = instr->variables[0]->var->data.atomic.offset; nir_instr_insert_before(&instr->instr, &offset_const->instr); nir_ssa_def *offset_def = &offset_const->def; nir_deref *tail = &instr->variables[0]->deref; while (tail->child != NULL) { assert(tail->child->deref_type == nir_deref_type_array); nir_deref_array *deref_array = nir_deref_as_array(tail->child); tail = tail->child; unsigned child_array_elements = tail->child != NULL ? glsl_get_aoa_size(tail->type) : 1; offset_const->value.u[0] += deref_array->base_offset * child_array_elements * ATOMIC_COUNTER_SIZE; if (deref_array->deref_array_type == nir_deref_array_type_indirect) { nir_load_const_instr *atomic_counter_size = nir_load_const_instr_create(mem_ctx, 1); atomic_counter_size->value.u[0] = child_array_elements * ATOMIC_COUNTER_SIZE; nir_instr_insert_before(&instr->instr, &atomic_counter_size->instr); nir_alu_instr *mul = nir_alu_instr_create(mem_ctx, nir_op_imul); nir_ssa_dest_init(&mul->instr, &mul->dest.dest, 1, NULL); mul->dest.write_mask = 0x1; nir_src_copy(&mul->src[0].src, &deref_array->indirect, mul); mul->src[1].src.is_ssa = true; mul->src[1].src.ssa = &atomic_counter_size->def; nir_instr_insert_before(&instr->instr, &mul->instr); nir_alu_instr *add = nir_alu_instr_create(mem_ctx, nir_op_iadd); nir_ssa_dest_init(&add->instr, &add->dest.dest, 1, NULL); add->dest.write_mask = 0x1; add->src[0].src.is_ssa = true; add->src[0].src.ssa = &mul->dest.dest.ssa; add->src[1].src.is_ssa = true; add->src[1].src.ssa = offset_def; nir_instr_insert_before(&instr->instr, &add->instr); offset_def = &add->dest.dest.ssa; } } new_instr->src[0].is_ssa = true; new_instr->src[0].ssa = offset_def; if (instr->dest.is_ssa) { nir_ssa_dest_init(&new_instr->instr, &new_instr->dest, instr->dest.ssa.num_components, NULL); nir_ssa_def_rewrite_uses(&instr->dest.ssa, nir_src_for_ssa(&new_instr->dest.ssa)); } else { nir_dest_copy(&new_instr->dest, &instr->dest, mem_ctx); } nir_instr_insert_before(&instr->instr, &new_instr->instr); nir_instr_remove(&instr->instr); }
static nir_src get_deref_reg_src(nir_deref_var *deref, nir_instr *instr, struct locals_to_regs_state *state) { nir_src src; src.is_ssa = false; src.reg.reg = get_reg_for_deref(deref, state); src.reg.base_offset = 0; src.reg.indirect = NULL; /* It is possible for a user to create a shader that has an array with a * single element and then proceed to access it indirectly. Indirectly * accessing a non-array register is not allowed in NIR. In order to * handle this case we just convert it to a direct reference. */ if (src.reg.reg->num_array_elems == 0) return src; nir_deref *tail = &deref->deref; while (tail->child != NULL) { const struct glsl_type *parent_type = tail->type; tail = tail->child; if (tail->deref_type != nir_deref_type_array) continue; nir_deref_array *deref_array = nir_deref_as_array(tail); src.reg.base_offset *= glsl_get_length(parent_type); src.reg.base_offset += deref_array->base_offset; if (src.reg.indirect) { nir_load_const_instr *load_const = nir_load_const_instr_create(state->shader, 1, 32); load_const->value.u32[0] = glsl_get_length(parent_type); nir_instr_insert_before(instr, &load_const->instr); nir_alu_instr *mul = nir_alu_instr_create(state->shader, nir_op_imul); mul->src[0].src = *src.reg.indirect; mul->src[1].src.is_ssa = true; mul->src[1].src.ssa = &load_const->def; mul->dest.write_mask = 1; nir_ssa_dest_init(&mul->instr, &mul->dest.dest, 1, 32, NULL); nir_instr_insert_before(instr, &mul->instr); src.reg.indirect->is_ssa = true; src.reg.indirect->ssa = &mul->dest.dest.ssa; } if (deref_array->deref_array_type == nir_deref_array_type_indirect) { if (src.reg.indirect == NULL) { src.reg.indirect = ralloc(state->shader, nir_src); nir_src_copy(src.reg.indirect, &deref_array->indirect, state->shader); } else { nir_alu_instr *add = nir_alu_instr_create(state->shader, nir_op_iadd); add->src[0].src = *src.reg.indirect; nir_src_copy(&add->src[1].src, &deref_array->indirect, add); add->dest.write_mask = 1; nir_ssa_dest_init(&add->instr, &add->dest.dest, 1, 32, NULL); nir_instr_insert_before(instr, &add->instr); src.reg.indirect->is_ssa = true; src.reg.indirect->ssa = &add->dest.dest.ssa; } } } return src; }
static bool constant_fold_alu_instr(nir_alu_instr *instr, void *mem_ctx) { nir_const_value src[NIR_MAX_VEC_COMPONENTS]; if (!instr->dest.dest.is_ssa) return false; /* In the case that any outputs/inputs have unsized types, then we need to * guess the bit-size. In this case, the validator ensures that all * bit-sizes match so we can just take the bit-size from first * output/input with an unsized type. If all the outputs/inputs are sized * then we don't need to guess the bit-size at all because the code we * generate for constant opcodes in this case already knows the sizes of * the types involved and does not need the provided bit-size for anything * (although it still requires to receive a valid bit-size). */ unsigned bit_size = 0; if (!nir_alu_type_get_type_size(nir_op_infos[instr->op].output_type)) bit_size = instr->dest.dest.ssa.bit_size; for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) { if (!instr->src[i].src.is_ssa) return false; if (bit_size == 0 && !nir_alu_type_get_type_size(nir_op_infos[instr->op].input_sizes[i])) { bit_size = instr->src[i].src.ssa->bit_size; } nir_instr *src_instr = instr->src[i].src.ssa->parent_instr; if (src_instr->type != nir_instr_type_load_const) return false; nir_load_const_instr* load_const = nir_instr_as_load_const(src_instr); for (unsigned j = 0; j < nir_ssa_alu_instr_src_components(instr, i); j++) { switch(load_const->def.bit_size) { case 64: src[i].u64[j] = load_const->value.u64[instr->src[i].swizzle[j]]; break; case 32: src[i].u32[j] = load_const->value.u32[instr->src[i].swizzle[j]]; break; case 16: src[i].u16[j] = load_const->value.u16[instr->src[i].swizzle[j]]; break; case 8: src[i].u8[j] = load_const->value.u8[instr->src[i].swizzle[j]]; break; default: unreachable("Invalid bit size"); } } /* We shouldn't have any source modifiers in the optimization loop. */ assert(!instr->src[i].abs && !instr->src[i].negate); } if (bit_size == 0) bit_size = 32; /* We shouldn't have any saturate modifiers in the optimization loop. */ assert(!instr->dest.saturate); nir_const_value dest = nir_eval_const_opcode(instr->op, instr->dest.dest.ssa.num_components, bit_size, src); nir_load_const_instr *new_instr = nir_load_const_instr_create(mem_ctx, instr->dest.dest.ssa.num_components, instr->dest.dest.ssa.bit_size); new_instr->value = dest; nir_instr_insert_before(&instr->instr, &new_instr->instr); nir_ssa_def_rewrite_uses(&instr->dest.dest.ssa, nir_src_for_ssa(&new_instr->def)); nir_instr_remove(&instr->instr); ralloc_free(instr); return true; }
static void lower_instr(nir_intrinsic_instr *instr, nir_function_impl *impl) { nir_intrinsic_op op; switch (instr->intrinsic) { case nir_intrinsic_atomic_counter_read_var: op = nir_intrinsic_atomic_counter_read; break; case nir_intrinsic_atomic_counter_inc_var: op = nir_intrinsic_atomic_counter_inc; break; case nir_intrinsic_atomic_counter_dec_var: op = nir_intrinsic_atomic_counter_dec; break; default: return; } if (instr->variables[0]->var->data.mode != nir_var_uniform) return; /* atomics passed as function arguments can't be lowered */ void *mem_ctx = ralloc_parent(instr); nir_intrinsic_instr *new_instr = nir_intrinsic_instr_create(mem_ctx, op); new_instr->const_index[0] = (int) instr->variables[0]->var->data.atomic.buffer_index; nir_load_const_instr *offset_const = nir_load_const_instr_create(mem_ctx, 1); offset_const->value.u[0] = instr->variables[0]->var->data.atomic.offset; nir_instr_insert_before(&instr->instr, &offset_const->instr); nir_ssa_def *offset_def = &offset_const->def; if (instr->variables[0]->deref.child != NULL) { assert(instr->variables[0]->deref.child->deref_type == nir_deref_type_array); nir_deref_array *deref_array = nir_deref_as_array(instr->variables[0]->deref.child); assert(deref_array->deref.child == NULL); offset_const->value.u[0] += deref_array->base_offset * ATOMIC_COUNTER_SIZE; if (deref_array->deref_array_type == nir_deref_array_type_indirect) { nir_load_const_instr *atomic_counter_size = nir_load_const_instr_create(mem_ctx, 1); atomic_counter_size->value.u[0] = ATOMIC_COUNTER_SIZE; nir_instr_insert_before(&instr->instr, &atomic_counter_size->instr); nir_alu_instr *mul = nir_alu_instr_create(mem_ctx, nir_op_imul); nir_ssa_dest_init(&mul->instr, &mul->dest.dest, 1, NULL); mul->dest.write_mask = 0x1; nir_src_copy(&mul->src[0].src, &deref_array->indirect, mem_ctx); mul->src[1].src.is_ssa = true; mul->src[1].src.ssa = &atomic_counter_size->def; nir_instr_insert_before(&instr->instr, &mul->instr); nir_alu_instr *add = nir_alu_instr_create(mem_ctx, nir_op_iadd); nir_ssa_dest_init(&add->instr, &add->dest.dest, 1, NULL); add->dest.write_mask = 0x1; add->src[0].src.is_ssa = true; add->src[0].src.ssa = &mul->dest.dest.ssa; add->src[1].src.is_ssa = true; add->src[1].src.ssa = &offset_const->def; nir_instr_insert_before(&instr->instr, &add->instr); offset_def = &add->dest.dest.ssa; } } new_instr->src[0].is_ssa = true; new_instr->src[0].ssa = offset_def;; if (instr->dest.is_ssa) { nir_ssa_dest_init(&new_instr->instr, &new_instr->dest, instr->dest.ssa.num_components, NULL); nir_ssa_def_rewrite_uses(&instr->dest.ssa, nir_src_for_ssa(&new_instr->dest.ssa), mem_ctx); } else { nir_dest_copy(&new_instr->dest, &instr->dest, mem_ctx); } nir_instr_insert_before(&instr->instr, &new_instr->instr); nir_instr_remove(&instr->instr); }
static unsigned get_io_offset(nir_deref_var *deref, nir_instr *instr, nir_src *indirect, struct lower_io_state *state) { bool found_indirect = false; unsigned base_offset = 0; nir_deref *tail = &deref->deref; while (tail->child != NULL) { const struct glsl_type *parent_type = tail->type; tail = tail->child; if (tail->deref_type == nir_deref_type_array) { nir_deref_array *deref_array = nir_deref_as_array(tail); unsigned size = type_size(tail->type); base_offset += size * deref_array->base_offset; if (deref_array->deref_array_type == nir_deref_array_type_indirect) { nir_load_const_instr *load_const = nir_load_const_instr_create(state->mem_ctx, 1); load_const->value.u[0] = size; nir_instr_insert_before(instr, &load_const->instr); nir_alu_instr *mul = nir_alu_instr_create(state->mem_ctx, nir_op_imul); mul->src[0].src.is_ssa = true; mul->src[0].src.ssa = &load_const->def; nir_src_copy(&mul->src[1].src, &deref_array->indirect, state->mem_ctx); mul->dest.write_mask = 1; nir_ssa_dest_init(&mul->instr, &mul->dest.dest, 1, NULL); nir_instr_insert_before(instr, &mul->instr); if (found_indirect) { nir_alu_instr *add = nir_alu_instr_create(state->mem_ctx, nir_op_iadd); add->src[0].src = *indirect; add->src[1].src.is_ssa = true; add->src[1].src.ssa = &mul->dest.dest.ssa; add->dest.write_mask = 1; nir_ssa_dest_init(&add->instr, &add->dest.dest, 1, NULL); nir_instr_insert_before(instr, &add->instr); indirect->is_ssa = true; indirect->ssa = &add->dest.dest.ssa; } else { indirect->is_ssa = true; indirect->ssa = &mul->dest.dest.ssa; found_indirect = true; } } } else if (tail->deref_type == nir_deref_type_struct) { nir_deref_struct *deref_struct = nir_deref_as_struct(tail); for (unsigned i = 0; i < deref_struct->index; i++) base_offset += type_size(glsl_get_struct_field(parent_type, i)); } } return base_offset; }
static nir_alu_src construct_value(const nir_search_value *value, nir_alu_type type, unsigned num_components, struct match_state *state, nir_instr *instr, void *mem_ctx) { switch (value->type) { case nir_search_value_expression: { const nir_search_expression *expr = nir_search_value_as_expression(value); if (nir_op_infos[expr->opcode].output_size != 0) num_components = nir_op_infos[expr->opcode].output_size; nir_alu_instr *alu = nir_alu_instr_create(mem_ctx, expr->opcode); nir_ssa_dest_init(&alu->instr, &alu->dest.dest, num_components, NULL); alu->dest.write_mask = (1 << num_components) - 1; alu->dest.saturate = false; for (unsigned i = 0; i < nir_op_infos[expr->opcode].num_inputs; i++) { /* If the source is an explicitly sized source, then we need to reset * the number of components to match. */ if (nir_op_infos[alu->op].input_sizes[i] != 0) num_components = nir_op_infos[alu->op].input_sizes[i]; alu->src[i] = construct_value(expr->srcs[i], nir_op_infos[alu->op].input_types[i], num_components, state, instr, mem_ctx); } nir_instr_insert_before(instr, &alu->instr); nir_alu_src val; val.src = nir_src_for_ssa(&alu->dest.dest.ssa); val.negate = false; val.abs = false, memcpy(val.swizzle, identity_swizzle, sizeof val.swizzle); return val; } case nir_search_value_variable: { const nir_search_variable *var = nir_search_value_as_variable(value); assert(state->variables_seen & (1 << var->variable)); nir_alu_src val = { NIR_SRC_INIT }; nir_alu_src_copy(&val, &state->variables[var->variable], mem_ctx); assert(!var->is_constant); return val; } case nir_search_value_constant: { const nir_search_constant *c = nir_search_value_as_constant(value); nir_load_const_instr *load = nir_load_const_instr_create(mem_ctx, 1); switch (type) { case nir_type_float: load->def.name = ralloc_asprintf(mem_ctx, "%f", c->data.f); load->value.f[0] = c->data.f; break; case nir_type_int: load->def.name = ralloc_asprintf(mem_ctx, "%d", c->data.i); load->value.i[0] = c->data.i; break; case nir_type_unsigned: case nir_type_bool: load->value.u[0] = c->data.u; break; default: unreachable("Invalid alu source type"); } nir_instr_insert_before(instr, &load->instr); nir_alu_src val; val.src = nir_src_for_ssa(&load->def); val.negate = false; val.abs = false, memset(val.swizzle, 0, sizeof val.swizzle); return val; } default: unreachable("Invalid search value type"); } }