static bool constant_fold_block(nir_block *block, void *void_state) { struct constant_fold_state *state = void_state; nir_foreach_instr_safe(block, instr) { switch (instr->type) { case nir_instr_type_alu: state->progress |= constant_fold_alu_instr(nir_instr_as_alu(instr), state->mem_ctx); break; case nir_instr_type_intrinsic: state->progress |= constant_fold_intrinsic_instr(nir_instr_as_intrinsic(instr)); break; case nir_instr_type_tex: state->progress |= constant_fold_tex_instr(nir_instr_as_tex(instr)); break; default: /* Don't know how to constant fold */ break; } } return true; }
nir_foreach_instr(instr, block) { if (instr->type != nir_instr_type_tex) continue; nir_tex_instr *tex = nir_instr_as_tex(instr); int plane_index = nir_tex_instr_src_index(tex, nir_tex_src_plane); if (plane_index < 0) continue; nir_const_value *plane = nir_src_as_const_value(tex->src[plane_index].src); assume(plane); if (plane->i32[0] > 0) { unsigned y_samp = tex->texture_index; assume(tex->texture_index == tex->sampler_index); assume(((state->lower_3plane & (1 << y_samp)) && plane->i32[0] < 3) || (plane->i32[0] < 2)); tex->texture_index = tex->sampler_index = state->sampler_map[y_samp][plane->i32[0] - 1]; } nir_tex_instr_remove_src(tex, plane_index); }
nir_foreach_instr(instr, block) { switch (instr->type) { case nir_instr_type_intrinsic: gather_intrinsic_info(nir_instr_as_intrinsic(instr), info); break; case nir_instr_type_tex: gather_tex_info(nir_instr_as_tex(instr), info); break; default: break; } }
static bool nir_lower_tex_block(nir_block *block, void *void_state) { lower_tex_state *state = void_state; nir_builder *b = &state->b; nir_foreach_instr_safe(block, instr) { if (instr->type != nir_instr_type_tex) continue; nir_tex_instr *tex = nir_instr_as_tex(instr); bool lower_txp = !!(state->options->lower_txp & (1 << tex->sampler_dim)); /* mask of src coords to saturate (clamp): */ unsigned sat_mask = 0; if ((1 << tex->sampler_index) & state->options->saturate_r) sat_mask |= (1 << 2); /* .z */ if ((1 << tex->sampler_index) & state->options->saturate_t) sat_mask |= (1 << 1); /* .y */ if ((1 << tex->sampler_index) & state->options->saturate_s) sat_mask |= (1 << 0); /* .x */ /* If we are clamping any coords, we must lower projector first * as clamping happens *after* projection: */ if (lower_txp || sat_mask) { project_src(b, tex); state->progress = true; } if ((tex->sampler_dim == GLSL_SAMPLER_DIM_RECT) && state->options->lower_rect) { lower_rect(b, tex); state->progress = true; } if (sat_mask) { saturate_src(b, tex, sat_mask); state->progress = true; } if (((1 << tex->texture_index) & state->options->swizzle_result) && !nir_tex_instr_is_query(tex) && !(tex->is_shadow && tex->is_new_style_shadow)) { swizzle_result(b, tex, state->options->swizzles[tex->texture_index]); state->progress = true; } } return true; }
nir_foreach_instr(block, instr) { switch (instr->type) { case nir_instr_type_intrinsic: gather_intrinsic_info(nir_instr_as_intrinsic(instr), shader); break; case nir_instr_type_tex: gather_tex_info(nir_instr_as_tex(instr), shader); break; case nir_instr_type_call: assert(!"nir_shader_gather_info only works if functions are inlined"); break; default: break; } }
static bool lower_block_cb(nir_block *block, void *_state) { lower_state *state = (lower_state *) _state; nir_foreach_instr(block, instr) { if (instr->type == nir_instr_type_tex) { nir_tex_instr *tex_instr = nir_instr_as_tex(instr); lower_sampler(tex_instr, state->shader_program, state->stage, &state->builder); } } return true; }
static void init_instr(nir_instr *instr, struct exec_list *worklist) { nir_alu_instr *alu_instr; nir_intrinsic_instr *intrin_instr; nir_tex_instr *tex_instr; /* We use the pass_flags to store the live/dead information. In DCE, we * just treat it as a zero/non-zerl boolean for whether or not the * instruction is live. */ instr->pass_flags = 0; switch (instr->type) { case nir_instr_type_call: case nir_instr_type_jump: worklist_push(worklist, instr); break; case nir_instr_type_alu: alu_instr = nir_instr_as_alu(instr); if (!alu_instr->dest.dest.is_ssa) worklist_push(worklist, instr); break; case nir_instr_type_intrinsic: intrin_instr = nir_instr_as_intrinsic(instr); if (nir_intrinsic_infos[intrin_instr->intrinsic].flags & NIR_INTRINSIC_CAN_ELIMINATE) { if (nir_intrinsic_infos[intrin_instr->intrinsic].has_dest && !intrin_instr->dest.is_ssa) { worklist_push(worklist, instr); } } else { worklist_push(worklist, instr); } break; case nir_instr_type_tex: tex_instr = nir_instr_as_tex(instr); if (!tex_instr->dest.is_ssa) worklist_push(worklist, instr); break; default: break; } }
static void validate_instr(nir_instr *instr, validate_state *state) { assert(instr->block == state->block); state->instr = instr; switch (instr->type) { case nir_instr_type_alu: validate_alu_instr(nir_instr_as_alu(instr), state); break; case nir_instr_type_call: validate_call_instr(nir_instr_as_call(instr), state); break; case nir_instr_type_intrinsic: validate_intrinsic_instr(nir_instr_as_intrinsic(instr), state); break; case nir_instr_type_tex: validate_tex_instr(nir_instr_as_tex(instr), state); break; case nir_instr_type_load_const: validate_load_const_instr(nir_instr_as_load_const(instr), state); break; case nir_instr_type_phi: validate_phi_instr(nir_instr_as_phi(instr), state); break; case nir_instr_type_ssa_undef: validate_ssa_undef_instr(nir_instr_as_ssa_undef(instr), state); break; case nir_instr_type_jump: break; default: assert(!"Invalid ALU instruction type"); break; } state->instr = NULL; }
static bool vc4_nir_lower_txf_ms_block(nir_block *block, void *arg) { struct vc4_compile *c = arg; nir_function_impl *impl = nir_cf_node_get_function(&block->cf_node); nir_builder b; nir_builder_init(&b, impl); nir_foreach_instr_safe(block, instr) { if (instr->type == nir_instr_type_tex) { vc4_nir_lower_txf_ms_instr(c, &b, nir_instr_as_tex(instr)); } } return true; }
static void propagate_invariant_instr(nir_instr *instr, struct set *invariants) { switch (instr->type) { case nir_instr_type_alu: { nir_alu_instr *alu = nir_instr_as_alu(instr); if (!dest_is_invariant(&alu->dest.dest, invariants)) break; alu->exact = true; nir_foreach_src(instr, add_src_cb, invariants); break; } case nir_instr_type_tex: { nir_tex_instr *tex = nir_instr_as_tex(instr); if (dest_is_invariant(&tex->dest, invariants)) nir_foreach_src(instr, add_src_cb, invariants); break; } case nir_instr_type_intrinsic: { nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr); switch (intrin->intrinsic) { case nir_intrinsic_copy_var: /* If the destination is invariant then so is the source */ if (var_is_invariant(intrin->variables[0]->var, invariants)) add_var(intrin->variables[1]->var, invariants); break; case nir_intrinsic_load_var: if (dest_is_invariant(&intrin->dest, invariants)) add_var(intrin->variables[0]->var, invariants); break; case nir_intrinsic_store_var: if (var_is_invariant(intrin->variables[0]->var, invariants)) add_src(&intrin->src[0], invariants); break; default: /* Nothing to do */ break; } } case nir_instr_type_jump: case nir_instr_type_ssa_undef: case nir_instr_type_load_const: break; /* Nothing to do */ case nir_instr_type_phi: { nir_phi_instr *phi = nir_instr_as_phi(instr); if (!dest_is_invariant(&phi->dest, invariants)) break; nir_foreach_phi_src(src, phi) { add_src(&src->src, invariants); add_cf_node(&src->pred->cf_node, invariants); } break; } case nir_instr_type_call: unreachable("This pass must be run after function inlining"); case nir_instr_type_parallel_copy: default: unreachable("Cannot have this instruction type"); }