static void validate_alu_dest(nir_alu_instr *instr, validate_state *state) { nir_alu_dest *dest = &instr->dest; unsigned dest_size = dest->dest.is_ssa ? dest->dest.ssa.num_components : dest->dest.reg.reg->num_components; bool is_packed = !dest->dest.is_ssa && dest->dest.reg.reg->is_packed; /* * validate that the instruction doesn't write to components not in the * register/SSA value */ validate_assert(state, is_packed || !(dest->write_mask & ~((1 << dest_size) - 1))); /* validate that saturate is only ever used on instructions with * destinations of type float */ nir_alu_instr *alu = nir_instr_as_alu(state->instr); validate_assert(state, (nir_alu_type_get_base_type(nir_op_infos[alu->op].output_type) == nir_type_float) || !dest->saturate); validate_dest(&dest->dest, state, 0, 0); }
static void validate_deref_chain(nir_deref *deref, validate_state *state) { validate_assert(state, deref->child == NULL || ralloc_parent(deref->child) == deref); nir_deref *parent = NULL; while (deref != NULL) { switch (deref->deref_type) { case nir_deref_type_array: validate_assert(state, deref->type == glsl_get_array_element(parent->type)); if (nir_deref_as_array(deref)->deref_array_type == nir_deref_array_type_indirect) validate_src(&nir_deref_as_array(deref)->indirect, state); break; case nir_deref_type_struct: assume(parent); /* cannot happen: deref change starts w/ nir_deref_var */ validate_assert(state, deref->type == glsl_get_struct_field(parent->type, nir_deref_as_struct(deref)->index)); break; case nir_deref_type_var: break; default: validate_assert(state, !"Invalid deref type"); break; } parent = deref; deref = deref->child; } }
static void validate_ssa_src(nir_src *src, validate_state *state) { validate_assert(state, src->ssa != NULL); struct hash_entry *entry = _mesa_hash_table_search(state->ssa_defs, src->ssa); validate_assert(state, entry); if (!entry) return; ssa_def_validate_state *def_state = (ssa_def_validate_state *)entry->data; validate_assert(state, def_state->where_defined == state->impl && "using an SSA value defined in a different function"); if (state->instr) { _mesa_set_add(def_state->uses, src); } else { validate_assert(state, state->if_stmt); _mesa_set_add(def_state->if_uses, src); } /* TODO validate that the use is dominated by the definition */ }
static void validate_var_use(nir_variable *var, validate_state *state) { if (var->data.mode == nir_var_local) { struct hash_entry *entry = _mesa_hash_table_search(state->var_defs, var); validate_assert(state, entry); validate_assert(state, (nir_function_impl *) entry->data == state->impl); } }
static void validate_deref_var(void *parent_mem_ctx, nir_deref_var *deref, validate_state *state) { validate_assert(state, deref != NULL); validate_assert(state, ralloc_parent(deref) == parent_mem_ctx); validate_assert(state, deref->deref.type == deref->var->type); validate_var_use(deref->var, state); validate_deref_chain(&deref->deref, state); }
static void validate_src(nir_src *src, validate_state *state) { if (state->instr) validate_assert(state, src->parent_instr == state->instr); else validate_assert(state, src->parent_if == state->if_stmt); if (src->is_ssa) validate_ssa_src(src, state); else validate_reg_src(src, state); }
static void validate_src(nir_src *src, validate_state *state, unsigned bit_size, unsigned num_components) { if (state->instr) validate_assert(state, src->parent_instr == state->instr); else validate_assert(state, src->parent_if == state->if_stmt); if (src->is_ssa) validate_ssa_src(src, state, bit_size, num_components); else validate_reg_src(src, state, bit_size, num_components); }
static void validate_dest(nir_dest *dest, validate_state *state, unsigned bit_size, unsigned num_components) { if (dest->is_ssa) { if (bit_size) validate_assert(state, dest->ssa.bit_size == bit_size); if (num_components) validate_assert(state, dest->ssa.num_components == num_components); validate_ssa_def(&dest->ssa, state); } else { validate_reg_dest(&dest->reg, state, bit_size, num_components); } }
static void validate_instr(nir_instr *instr, validate_state *state) { validate_assert(state, instr->block == state->block); state->instr = instr; switch (instr->type) { case nir_instr_type_alu: validate_alu_instr(nir_instr_as_alu(instr), state); break; case nir_instr_type_call: validate_call_instr(nir_instr_as_call(instr), state); break; case nir_instr_type_intrinsic: validate_intrinsic_instr(nir_instr_as_intrinsic(instr), state); break; case nir_instr_type_tex: validate_tex_instr(nir_instr_as_tex(instr), state); break; case nir_instr_type_load_const: validate_load_const_instr(nir_instr_as_load_const(instr), state); break; case nir_instr_type_phi: validate_phi_instr(nir_instr_as_phi(instr), state); break; case nir_instr_type_ssa_undef: validate_ssa_undef_instr(nir_instr_as_ssa_undef(instr), state); break; case nir_instr_type_jump: break; default: validate_assert(state, !"Invalid ALU instruction type"); break; } state->instr = NULL; }
static void validate_call_instr(nir_call_instr *instr, validate_state *state) { if (instr->return_deref == NULL) { validate_assert(state, glsl_type_is_void(instr->callee->return_type)); } else { validate_assert(state, instr->return_deref->deref.type == instr->callee->return_type); validate_deref_var(instr, instr->return_deref, state); } validate_assert(state, instr->num_params == instr->callee->num_params); for (unsigned i = 0; i < instr->num_params; i++) { validate_assert(state, instr->callee->params[i].type == instr->params[i]->deref.type); validate_deref_var(instr, instr->params[i], state); } }
static void validate_reg_dest(nir_reg_dest *dest, validate_state *state) { validate_assert(state, dest->reg != NULL); validate_assert(state, dest->parent_instr == state->instr); struct hash_entry *entry2; entry2 = _mesa_hash_table_search(state->regs, dest->reg); validate_assert(state, entry2); reg_validate_state *reg_state = (reg_validate_state *) entry2->data; _mesa_set_add(reg_state->defs, dest); if (!dest->reg->is_global) { validate_assert(state, reg_state->where_defined == state->impl && "writing to a register declared in a different function"); } validate_assert(state, (dest->reg->num_array_elems == 0 || dest->base_offset < dest->reg->num_array_elems) && "definitely out-of-bounds array access"); if (dest->indirect) { validate_assert(state, dest->reg->num_array_elems != 0); validate_assert(state, (dest->indirect->is_ssa || dest->indirect->reg.indirect == NULL) && "only one level of indirection allowed"); validate_src(dest->indirect, state); } }
static void validate_reg_src(nir_src *src, validate_state *state) { validate_assert(state, src->reg.reg != NULL); struct hash_entry *entry; entry = _mesa_hash_table_search(state->regs, src->reg.reg); validate_assert(state, entry); reg_validate_state *reg_state = (reg_validate_state *) entry->data; if (state->instr) { _mesa_set_add(reg_state->uses, src); } else { validate_assert(state, state->if_stmt); _mesa_set_add(reg_state->if_uses, src); } if (!src->reg.reg->is_global) { validate_assert(state, reg_state->where_defined == state->impl && "using a register declared in a different function"); } validate_assert(state, (src->reg.reg->num_array_elems == 0 || src->reg.base_offset < src->reg.reg->num_array_elems) && "definitely out-of-bounds array access"); if (src->reg.indirect) { validate_assert(state, src->reg.reg->num_array_elems != 0); validate_assert(state, (src->reg.indirect->is_ssa || src->reg.indirect->reg.indirect == NULL) && "only one level of indirection allowed"); validate_src(src->reg.indirect, state); } }
static void validate_alu_src(nir_alu_instr *instr, unsigned index, validate_state *state) { nir_alu_src *src = &instr->src[index]; unsigned num_components; unsigned src_bit_size; if (src->src.is_ssa) { src_bit_size = src->src.ssa->bit_size; num_components = src->src.ssa->num_components; } else { src_bit_size = src->src.reg.reg->bit_size; if (src->src.reg.reg->is_packed) num_components = 4; /* can't check anything */ else num_components = src->src.reg.reg->num_components; } for (unsigned i = 0; i < 4; i++) { validate_assert(state, src->swizzle[i] < 4); if (nir_alu_instr_channel_used(instr, index, i)) validate_assert(state, src->swizzle[i] < num_components); } nir_alu_type src_type = nir_op_infos[instr->op].input_types[index]; /* 8-bit float isn't a thing */ if (nir_alu_type_get_base_type(src_type) == nir_type_float) validate_assert(state, src_bit_size == 16 || src_bit_size == 32 || src_bit_size == 64); if (nir_alu_type_get_type_size(src_type)) { /* This source has an explicit bit size */ validate_assert(state, nir_alu_type_get_type_size(src_type) == src_bit_size); } else { if (!nir_alu_type_get_type_size(nir_op_infos[instr->op].output_type)) { unsigned dest_bit_size = instr->dest.dest.is_ssa ? instr->dest.dest.ssa.bit_size : instr->dest.dest.reg.reg->bit_size; validate_assert(state, dest_bit_size == src_bit_size); } } validate_src(&src->src, state); }
static void validate_alu_instr(nir_alu_instr *instr, validate_state *state) { validate_assert(state, instr->op < nir_num_opcodes); for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) { validate_alu_src(instr, i, state); } validate_alu_dest(instr, state); }
static void validate_phi_src(nir_phi_instr *instr, nir_block *pred, validate_state *state) { state->instr = &instr->instr; validate_assert(state, instr->dest.is_ssa); exec_list_validate(&instr->srcs); nir_foreach_phi_src(src, instr) { if (src->pred == pred) { validate_assert(state, src->src.is_ssa); validate_src(&src->src, state, instr->dest.ssa.bit_size, instr->dest.ssa.num_components); state->instr = NULL; return; } } abort(); }
static void validate_phi_instr(nir_phi_instr *instr, validate_state *state) { /* * don't validate the sources until we get to them from their predecessor * basic blocks, to avoid validating an SSA use before its definition. */ validate_dest(&instr->dest, state); exec_list_validate(&instr->srcs); validate_assert(state, exec_list_length(&instr->srcs) == state->block->predecessors->entries); }
static void validate_alu_src(nir_alu_instr *instr, unsigned index, validate_state *state) { nir_alu_src *src = &instr->src[index]; unsigned num_components; if (src->src.is_ssa) { num_components = src->src.ssa->num_components; } else { if (src->src.reg.reg->is_packed) num_components = 4; /* can't check anything */ else num_components = src->src.reg.reg->num_components; } for (unsigned i = 0; i < 4; i++) { validate_assert(state, src->swizzle[i] < 4); if (nir_alu_instr_channel_used(instr, index, i)) validate_assert(state, src->swizzle[i] < num_components); } validate_src(&src->src, state, 0, 0); }
static void validate_ssa_def(nir_ssa_def *def, validate_state *state) { validate_assert(state, def->index < state->impl->ssa_alloc); validate_assert(state, !BITSET_TEST(state->ssa_defs_found, def->index)); BITSET_SET(state->ssa_defs_found, def->index); validate_assert(state, def->parent_instr == state->instr); validate_assert(state, def->num_components <= 4); list_validate(&def->uses); list_validate(&def->if_uses); ssa_def_validate_state *def_state = ralloc(state->ssa_defs, ssa_def_validate_state); def_state->where_defined = state->impl; def_state->uses = _mesa_set_create(def_state, _mesa_hash_pointer, _mesa_key_pointer_equal); def_state->if_uses = _mesa_set_create(def_state, _mesa_hash_pointer, _mesa_key_pointer_equal); _mesa_hash_table_insert(state->ssa_defs, def, def_state); }
static void validate_alu_instr(nir_alu_instr *instr, validate_state *state) { validate_assert(state, instr->op < nir_num_opcodes); unsigned instr_bit_size = 0; for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) { nir_alu_type src_type = nir_op_infos[instr->op].input_types[i]; unsigned src_bit_size = nir_src_bit_size(instr->src[i].src); if (nir_alu_type_get_type_size(src_type)) { validate_assert(state, src_bit_size == nir_alu_type_get_type_size(src_type)); } else if (instr_bit_size) { validate_assert(state, src_bit_size == instr_bit_size); } else { instr_bit_size = src_bit_size; } if (nir_alu_type_get_base_type(src_type) == nir_type_float) { /* 8-bit float isn't a thing */ validate_assert(state, src_bit_size == 16 || src_bit_size == 32 || src_bit_size == 64); } validate_alu_src(instr, i, state); } nir_alu_type dest_type = nir_op_infos[instr->op].output_type; unsigned dest_bit_size = nir_dest_bit_size(instr->dest.dest); if (nir_alu_type_get_type_size(dest_type)) { validate_assert(state, dest_bit_size == nir_alu_type_get_type_size(dest_type)); } else if (instr_bit_size) { validate_assert(state, dest_bit_size == instr_bit_size); } else { /* The only unsized thing is the destination so it's vacuously valid */ } if (nir_alu_type_get_base_type(dest_type) == nir_type_float) { /* 8-bit float isn't a thing */ validate_assert(state, dest_bit_size == 16 || dest_bit_size == 32 || dest_bit_size == 64); } validate_alu_dest(instr, state); }
static void validate_tex_instr(nir_tex_instr *instr, validate_state *state) { bool src_type_seen[nir_num_tex_src_types]; for (unsigned i = 0; i < nir_num_tex_src_types; i++) src_type_seen[i] = false; for (unsigned i = 0; i < instr->num_srcs; i++) { validate_assert(state, !src_type_seen[instr->src[i].src_type]); src_type_seen[instr->src[i].src_type] = true; validate_src(&instr->src[i].src, state); } if (instr->texture != NULL) validate_deref_var(instr, instr->texture, state); if (instr->sampler != NULL) validate_deref_var(instr, instr->sampler, state); validate_dest(&instr->dest, state); }
static void validate_deref_chain(nir_deref *deref, nir_variable_mode mode, validate_state *state) { validate_assert(state, deref->child == NULL || ralloc_parent(deref->child) == deref); nir_deref *parent = NULL; while (deref != NULL) { switch (deref->deref_type) { case nir_deref_type_array: if (mode == nir_var_shared) { /* Shared variables have a bit more relaxed rules because we need * to be able to handle array derefs on vectors. Fortunately, * nir_lower_io handles these just fine. */ validate_assert(state, glsl_type_is_array(parent->type) || glsl_type_is_matrix(parent->type) || glsl_type_is_vector(parent->type)); } else { /* Most of NIR cannot handle array derefs on vectors */ validate_assert(state, glsl_type_is_array(parent->type) || glsl_type_is_matrix(parent->type)); } validate_assert(state, deref->type == glsl_get_array_element(parent->type)); if (nir_deref_as_array(deref)->deref_array_type == nir_deref_array_type_indirect) validate_src(&nir_deref_as_array(deref)->indirect, state, 32, 1); break; case nir_deref_type_struct: assume(parent); /* cannot happen: deref change starts w/ nir_deref_var */ validate_assert(state, deref->type == glsl_get_struct_field(parent->type, nir_deref_as_struct(deref)->index)); break; case nir_deref_type_var: break; default: validate_assert(state, !"Invalid deref type"); break; } parent = deref; deref = deref->child; } }
static void validate_intrinsic_instr(nir_intrinsic_instr *instr, validate_state *state) { unsigned num_srcs = nir_intrinsic_infos[instr->intrinsic].num_srcs; for (unsigned i = 0; i < num_srcs; i++) { unsigned components_read = nir_intrinsic_infos[instr->intrinsic].src_components[i]; if (components_read == 0) components_read = instr->num_components; validate_assert(state, components_read > 0); if (instr->src[i].is_ssa) { validate_assert(state, components_read <= instr->src[i].ssa->num_components); } else if (!instr->src[i].reg.reg->is_packed) { validate_assert(state, components_read <= instr->src[i].reg.reg->num_components); } validate_src(&instr->src[i], state); } unsigned num_vars = nir_intrinsic_infos[instr->intrinsic].num_variables; for (unsigned i = 0; i < num_vars; i++) { validate_deref_var(instr, instr->variables[i], state); } if (nir_intrinsic_infos[instr->intrinsic].has_dest) { unsigned components_written = nir_intrinsic_infos[instr->intrinsic].dest_components; if (components_written == 0) components_written = instr->num_components; validate_assert(state, components_written > 0); if (instr->dest.is_ssa) { validate_assert(state, components_written <= instr->dest.ssa.num_components); } else if (!instr->dest.reg.reg->is_packed) { validate_assert(state, components_written <= instr->dest.reg.reg->num_components); } validate_dest(&instr->dest, state); } switch (instr->intrinsic) { case nir_intrinsic_load_var: { const struct glsl_type *type = nir_deref_tail(&instr->variables[0]->deref)->type; validate_assert(state, glsl_type_is_vector_or_scalar(type) || (instr->variables[0]->var->data.mode == nir_var_uniform && glsl_get_base_type(type) == GLSL_TYPE_SUBROUTINE)); validate_assert(state, instr->num_components == glsl_get_vector_elements(type)); break; } case nir_intrinsic_store_var: { const struct glsl_type *type = nir_deref_tail(&instr->variables[0]->deref)->type; validate_assert(state, glsl_type_is_vector_or_scalar(type) || (instr->variables[0]->var->data.mode == nir_var_uniform && glsl_get_base_type(type) == GLSL_TYPE_SUBROUTINE)); validate_assert(state, instr->num_components == glsl_get_vector_elements(type)); validate_assert(state, instr->variables[0]->var->data.mode != nir_var_shader_in && instr->variables[0]->var->data.mode != nir_var_uniform && instr->variables[0]->var->data.mode != nir_var_shader_storage); validate_assert(state, (nir_intrinsic_write_mask(instr) & ~((1 << instr->num_components) - 1)) == 0); break; } case nir_intrinsic_copy_var: validate_assert(state, nir_deref_tail(&instr->variables[0]->deref)->type == nir_deref_tail(&instr->variables[1]->deref)->type); validate_assert(state, instr->variables[0]->var->data.mode != nir_var_shader_in && instr->variables[0]->var->data.mode != nir_var_uniform && instr->variables[0]->var->data.mode != nir_var_shader_storage); break; default: break; } }