static void brw_nir_lower_uniforms(nir_shader *nir, bool is_scalar) { if (is_scalar) { nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms, 0, type_size_scalar_bytes); nir_lower_io(nir, nir_var_uniform, type_size_scalar_bytes); } else { nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms, 0, type_size_vec4_bytes); nir_lower_io(nir, nir_var_uniform, type_size_vec4_bytes); } }
nir_shader * brw_create_nir(struct brw_context *brw, const struct gl_shader_program *shader_prog, const struct gl_program *prog, gl_shader_stage stage, bool is_scalar) { struct gl_context *ctx = &brw->ctx; const nir_shader_compiler_options *options = ctx->Const.ShaderCompilerOptions[stage].NirOptions; struct gl_shader *shader = shader_prog ? shader_prog->_LinkedShaders[stage] : NULL; bool debug_enabled = INTEL_DEBUG & intel_debug_flag_for_shader_stage(stage); nir_shader *nir; /* First, lower the GLSL IR or Mesa IR to NIR */ if (shader_prog) { nir = glsl_to_nir(shader, options); } else { nir = prog_to_nir(prog, options); nir_convert_to_ssa(nir); /* turn registers into SSA */ } nir_validate_shader(nir); nir_lower_global_vars_to_local(nir); nir_validate_shader(nir); nir_lower_tex_projector(nir); nir_validate_shader(nir); nir_normalize_cubemap_coords(nir); nir_validate_shader(nir); nir_split_var_copies(nir); nir_validate_shader(nir); nir_optimize(nir, is_scalar); /* Lower a bunch of stuff */ nir_lower_var_copies(nir); nir_validate_shader(nir); /* Get rid of split copies */ nir_optimize(nir, is_scalar); if (is_scalar) { nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms, type_size_scalar); nir_assign_var_locations(&nir->inputs, &nir->num_inputs, type_size_scalar); nir_assign_var_locations(&nir->outputs, &nir->num_outputs, type_size_scalar); nir_lower_io(nir, type_size_scalar); } else { nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms, type_size_vec4); nir_assign_var_locations(&nir->inputs, &nir->num_inputs, type_size_vec4); foreach_list_typed(nir_variable, var, node, &nir->outputs) var->data.driver_location = var->data.location; nir_lower_io(nir, type_size_vec4); } nir_validate_shader(nir); nir_remove_dead_variables(nir); nir_validate_shader(nir); if (shader_prog) { nir_lower_samplers(nir, shader_prog); nir_validate_shader(nir); } nir_lower_system_values(nir); nir_validate_shader(nir); nir_lower_atomics(nir); nir_validate_shader(nir); nir_optimize(nir, is_scalar); if (brw->gen >= 6) { /* Try and fuse multiply-adds */ nir_opt_peephole_ffma(nir); nir_validate_shader(nir); } nir_opt_algebraic_late(nir); nir_validate_shader(nir); nir_lower_locals_to_regs(nir); nir_validate_shader(nir); nir_lower_to_source_mods(nir); nir_validate_shader(nir); nir_copy_prop(nir); nir_validate_shader(nir); nir_opt_dce(nir); nir_validate_shader(nir); if (unlikely(debug_enabled)) { /* Re-index SSA defs so we print more sensible numbers. */ nir_foreach_overload(nir, overload) { if (overload->impl) nir_index_ssa_defs(overload->impl); } fprintf(stderr, "NIR (SSA form) for %s shader:\n", _mesa_shader_stage_to_string(stage)); nir_print_shader(nir, stderr); } nir_convert_from_ssa(nir, is_scalar); nir_validate_shader(nir); if (!is_scalar) { nir_lower_vec_to_movs(nir); nir_validate_shader(nir); } /* This is the last pass we run before we start emitting stuff. It * determines when we need to insert boolean resolves on Gen <= 5. We * run it last because it stashes data in instr->pass_flags and we don't * want that to be squashed by other NIR passes. */ if (brw->gen <= 5) brw_nir_analyze_boolean_resolves(nir); nir_sweep(nir); if (unlikely(debug_enabled)) { fprintf(stderr, "NIR (final form) for %s shader:\n", _mesa_shader_stage_to_string(stage)); nir_print_shader(nir, stderr); } return nir; }