static void validate_deref_chain(nir_deref *deref, validate_state *state) { assert(deref->child == NULL || ralloc_parent(deref->child) == deref); nir_deref *parent = NULL; while (deref != NULL) { switch (deref->deref_type) { case nir_deref_type_array: assert(deref->type == glsl_get_array_element(parent->type)); if (nir_deref_as_array(deref)->deref_array_type == nir_deref_array_type_indirect) validate_src(&nir_deref_as_array(deref)->indirect, state); break; case nir_deref_type_struct: assert(deref->type == glsl_get_struct_field(parent->type, nir_deref_as_struct(deref)->index)); break; case nir_deref_type_var: break; default: assert(!"Invalid deref type"); break; } parent = deref; deref = deref->child; } }
static void validate_reg_src(nir_src *src, validate_state *state) { assert(src->reg.reg != NULL); struct hash_entry *entry; entry = _mesa_hash_table_search(state->regs, src->reg.reg); assert(entry); reg_validate_state *reg_state = (reg_validate_state *) entry->data; if (state->instr) { _mesa_set_add(reg_state->uses, src); } else { assert(state->if_stmt); _mesa_set_add(reg_state->if_uses, src); } if (!src->reg.reg->is_global) { assert(reg_state->where_defined == state->impl && "using a register declared in a different function"); } assert((src->reg.reg->num_array_elems == 0 || src->reg.base_offset < src->reg.reg->num_array_elems) && "definitely out-of-bounds array access"); if (src->reg.indirect) { assert(src->reg.reg->num_array_elems != 0); assert((src->reg.indirect->is_ssa || src->reg.indirect->reg.indirect == NULL) && "only one level of indirection allowed"); validate_src(src->reg.indirect, state); } }
static void validate_reg_dest(nir_reg_dest *dest, validate_state *state) { assert(dest->reg != NULL); assert(dest->parent_instr == state->instr); struct hash_entry *entry2; entry2 = _mesa_hash_table_search(state->regs, dest->reg); assert(entry2); reg_validate_state *reg_state = (reg_validate_state *) entry2->data; _mesa_set_add(reg_state->defs, dest); if (!dest->reg->is_global) { assert(reg_state->where_defined == state->impl && "writing to a register declared in a different function"); } assert((dest->reg->num_array_elems == 0 || dest->base_offset < dest->reg->num_array_elems) && "definitely out-of-bounds array access"); if (dest->indirect) { assert(dest->reg->num_array_elems != 0); assert((dest->indirect->is_ssa || dest->indirect->reg.indirect == NULL) && "only one level of indirection allowed"); validate_src(dest->indirect, state); } }
static void validate_deref_chain(nir_deref *deref, nir_variable_mode mode, validate_state *state) { validate_assert(state, deref->child == NULL || ralloc_parent(deref->child) == deref); nir_deref *parent = NULL; while (deref != NULL) { switch (deref->deref_type) { case nir_deref_type_array: if (mode == nir_var_shared) { /* Shared variables have a bit more relaxed rules because we need * to be able to handle array derefs on vectors. Fortunately, * nir_lower_io handles these just fine. */ validate_assert(state, glsl_type_is_array(parent->type) || glsl_type_is_matrix(parent->type) || glsl_type_is_vector(parent->type)); } else { /* Most of NIR cannot handle array derefs on vectors */ validate_assert(state, glsl_type_is_array(parent->type) || glsl_type_is_matrix(parent->type)); } validate_assert(state, deref->type == glsl_get_array_element(parent->type)); if (nir_deref_as_array(deref)->deref_array_type == nir_deref_array_type_indirect) validate_src(&nir_deref_as_array(deref)->indirect, state, 32, 1); break; case nir_deref_type_struct: assume(parent); /* cannot happen: deref change starts w/ nir_deref_var */ validate_assert(state, deref->type == glsl_get_struct_field(parent->type, nir_deref_as_struct(deref)->index)); break; case nir_deref_type_var: break; default: validate_assert(state, !"Invalid deref type"); break; } parent = deref; deref = deref->child; } }
static void validate_tex_instr(nir_tex_instr *instr, validate_state *state) { validate_dest(&instr->dest, state); bool src_type_seen[nir_num_tex_src_types]; for (unsigned i = 0; i < nir_num_tex_src_types; i++) src_type_seen[i] = false; for (unsigned i = 0; i < instr->num_srcs; i++) { assert(!src_type_seen[instr->src[i].src_type]); src_type_seen[instr->src[i].src_type] = true; validate_src(&instr->src[i].src, state); } if (instr->sampler != NULL) validate_deref_var(instr, instr->sampler, state); }
static void validate_alu_src(nir_alu_instr *instr, unsigned index, validate_state *state) { nir_alu_src *src = &instr->src[index]; unsigned num_components; unsigned src_bit_size; if (src->src.is_ssa) { src_bit_size = src->src.ssa->bit_size; num_components = src->src.ssa->num_components; } else { src_bit_size = src->src.reg.reg->bit_size; if (src->src.reg.reg->is_packed) num_components = 4; /* can't check anything */ else num_components = src->src.reg.reg->num_components; } for (unsigned i = 0; i < 4; i++) { validate_assert(state, src->swizzle[i] < 4); if (nir_alu_instr_channel_used(instr, index, i)) validate_assert(state, src->swizzle[i] < num_components); } nir_alu_type src_type = nir_op_infos[instr->op].input_types[index]; /* 8-bit float isn't a thing */ if (nir_alu_type_get_base_type(src_type) == nir_type_float) validate_assert(state, src_bit_size == 16 || src_bit_size == 32 || src_bit_size == 64); if (nir_alu_type_get_type_size(src_type)) { /* This source has an explicit bit size */ validate_assert(state, nir_alu_type_get_type_size(src_type) == src_bit_size); } else { if (!nir_alu_type_get_type_size(nir_op_infos[instr->op].output_type)) { unsigned dest_bit_size = instr->dest.dest.is_ssa ? instr->dest.dest.ssa.bit_size : instr->dest.dest.reg.reg->bit_size; validate_assert(state, dest_bit_size == src_bit_size); } } validate_src(&src->src, state); }
static void validate_phi_src(nir_phi_instr *instr, nir_block *pred, validate_state *state) { state->instr = &instr->instr; validate_assert(state, instr->dest.is_ssa); exec_list_validate(&instr->srcs); nir_foreach_phi_src(src, instr) { if (src->pred == pred) { validate_assert(state, src->src.is_ssa); validate_src(&src->src, state, instr->dest.ssa.bit_size, instr->dest.ssa.num_components); state->instr = NULL; return; } } abort(); }
static void validate_alu_src(nir_alu_instr *instr, unsigned index, validate_state *state) { nir_alu_src *src = &instr->src[index]; unsigned num_components; if (src->src.is_ssa) num_components = src->src.ssa->num_components; else { if (src->src.reg.reg->is_packed) num_components = 4; /* can't check anything */ else num_components = src->src.reg.reg->num_components; } for (unsigned i = 0; i < 4; i++) { assert(src->swizzle[i] < 4); if (nir_alu_instr_channel_used(instr, index, i)) assert(src->swizzle[i] < num_components); } validate_src(&src->src, state); }
static void validate_intrinsic_instr(nir_intrinsic_instr *instr, validate_state *state) { unsigned num_srcs = nir_intrinsic_infos[instr->intrinsic].num_srcs; for (unsigned i = 0; i < num_srcs; i++) { unsigned components_read = nir_intrinsic_infos[instr->intrinsic].src_components[i]; if (components_read == 0) components_read = instr->num_components; assert(components_read > 0); if (instr->src[i].is_ssa) { assert(components_read <= instr->src[i].ssa->num_components); } else if (!instr->src[i].reg.reg->is_packed) { assert(components_read <= instr->src[i].reg.reg->num_components); } validate_src(&instr->src[i], state); } if (nir_intrinsic_infos[instr->intrinsic].has_dest) { unsigned components_written = nir_intrinsic_infos[instr->intrinsic].dest_components; if (components_written == 0) components_written = instr->num_components; assert(components_written > 0); if (instr->dest.is_ssa) { assert(components_written <= instr->dest.ssa.num_components); } else if (!instr->dest.reg.reg->is_packed) { assert(components_written <= instr->dest.reg.reg->num_components); } validate_dest(&instr->dest, state); } unsigned num_vars = nir_intrinsic_infos[instr->intrinsic].num_variables; for (unsigned i = 0; i < num_vars; i++) { validate_deref_var(instr, instr->variables[i], state); } switch (instr->intrinsic) { case nir_intrinsic_load_var: assert(instr->variables[0]->var->data.mode != nir_var_shader_out); break; case nir_intrinsic_store_var: assert(instr->variables[0]->var->data.mode != nir_var_shader_in && instr->variables[0]->var->data.mode != nir_var_uniform && instr->variables[0]->var->data.mode != nir_var_shader_storage); break; case nir_intrinsic_copy_var: assert(instr->variables[0]->var->data.mode != nir_var_shader_in && instr->variables[0]->var->data.mode != nir_var_uniform && instr->variables[0]->var->data.mode != nir_var_shader_storage); assert(instr->variables[1]->var->data.mode != nir_var_shader_out); break; default: break; } }
static void validate_intrinsic_instr(nir_intrinsic_instr *instr, validate_state *state) { unsigned num_srcs = nir_intrinsic_infos[instr->intrinsic].num_srcs; for (unsigned i = 0; i < num_srcs; i++) { unsigned components_read = nir_intrinsic_infos[instr->intrinsic].src_components[i]; if (components_read == 0) components_read = instr->num_components; validate_assert(state, components_read > 0); if (instr->src[i].is_ssa) { validate_assert(state, components_read <= instr->src[i].ssa->num_components); } else if (!instr->src[i].reg.reg->is_packed) { validate_assert(state, components_read <= instr->src[i].reg.reg->num_components); } validate_src(&instr->src[i], state); } unsigned num_vars = nir_intrinsic_infos[instr->intrinsic].num_variables; for (unsigned i = 0; i < num_vars; i++) { validate_deref_var(instr, instr->variables[i], state); } if (nir_intrinsic_infos[instr->intrinsic].has_dest) { unsigned components_written = nir_intrinsic_infos[instr->intrinsic].dest_components; if (components_written == 0) components_written = instr->num_components; validate_assert(state, components_written > 0); if (instr->dest.is_ssa) { validate_assert(state, components_written <= instr->dest.ssa.num_components); } else if (!instr->dest.reg.reg->is_packed) { validate_assert(state, components_written <= instr->dest.reg.reg->num_components); } validate_dest(&instr->dest, state); } switch (instr->intrinsic) { case nir_intrinsic_load_var: { const struct glsl_type *type = nir_deref_tail(&instr->variables[0]->deref)->type; validate_assert(state, glsl_type_is_vector_or_scalar(type) || (instr->variables[0]->var->data.mode == nir_var_uniform && glsl_get_base_type(type) == GLSL_TYPE_SUBROUTINE)); validate_assert(state, instr->num_components == glsl_get_vector_elements(type)); break; } case nir_intrinsic_store_var: { const struct glsl_type *type = nir_deref_tail(&instr->variables[0]->deref)->type; validate_assert(state, glsl_type_is_vector_or_scalar(type) || (instr->variables[0]->var->data.mode == nir_var_uniform && glsl_get_base_type(type) == GLSL_TYPE_SUBROUTINE)); validate_assert(state, instr->num_components == glsl_get_vector_elements(type)); validate_assert(state, instr->variables[0]->var->data.mode != nir_var_shader_in && instr->variables[0]->var->data.mode != nir_var_uniform && instr->variables[0]->var->data.mode != nir_var_shader_storage); validate_assert(state, (nir_intrinsic_write_mask(instr) & ~((1 << instr->num_components) - 1)) == 0); break; } case nir_intrinsic_copy_var: validate_assert(state, nir_deref_tail(&instr->variables[0]->deref)->type == nir_deref_tail(&instr->variables[1]->deref)->type); validate_assert(state, instr->variables[0]->var->data.mode != nir_var_shader_in && instr->variables[0]->var->data.mode != nir_var_uniform && instr->variables[0]->var->data.mode != nir_var_shader_storage); break; default: break; } }