static nir_register *
get_reg_for_deref(nir_deref_instr *deref, struct locals_to_regs_state *state)
{
   uint32_t hash = hash_deref(deref);

   assert(nir_deref_instr_get_variable(deref)->constant_initializer == NULL);

   struct hash_entry *entry =
      _mesa_hash_table_search_pre_hashed(state->regs_table, hash, deref);
   if (entry)
      return entry->data;

   unsigned array_size = 1;
   for (nir_deref_instr *d = deref; d; d = nir_deref_instr_parent(d)) {
      if (d->deref_type == nir_deref_type_array)
         array_size *= glsl_get_length(nir_deref_instr_parent(d)->type);
   }

   assert(glsl_type_is_vector_or_scalar(deref->type));

   nir_register *reg = nir_local_reg_create(state->builder.impl);
   reg->num_components = glsl_get_vector_elements(deref->type);
   reg->num_array_elems = array_size > 1 ? array_size : 0;
   reg->bit_size = glsl_get_bit_size(deref->type);

   _mesa_hash_table_insert_pre_hashed(state->regs_table, hash, deref, reg);

   return reg;
}
Exemplo n.º 2
0
static void
lower_load_store(nir_builder *b,
                 nir_intrinsic_instr *intrin,
                 glsl_type_size_align_func size_align)
{
   b->cursor = nir_before_instr(&intrin->instr);

   nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
   nir_variable *var = nir_deref_instr_get_variable(deref);

   nir_ssa_def *offset =
      nir_iadd_imm(b, nir_build_deref_offset(b, deref, size_align),
                      var->data.location);

   unsigned align, UNUSED size;
   size_align(deref->type, &size, &align);

   if (intrin->intrinsic == nir_intrinsic_load_deref) {
      nir_intrinsic_instr *load =
         nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_scratch);
      load->num_components = intrin->num_components;
      load->src[0] = nir_src_for_ssa(offset);
      nir_intrinsic_set_align(load, align, 0);
      nir_ssa_dest_init(&load->instr, &load->dest,
                        intrin->dest.ssa.num_components,
                        intrin->dest.ssa.bit_size, NULL);
      nir_builder_instr_insert(b, &load->instr);

      nir_ssa_def *value = &load->dest.ssa;
      if (glsl_type_is_boolean(deref->type))
         value = nir_b2i32(b, value);

      nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
                               nir_src_for_ssa(&load->dest.ssa));
   } else {
      assert(intrin->intrinsic == nir_intrinsic_store_deref);

      assert(intrin->src[1].is_ssa);
      nir_ssa_def *value = intrin->src[1].ssa;
      if (glsl_type_is_boolean(deref->type))
         value = nir_i2b(b, value);

      nir_intrinsic_instr *store =
         nir_intrinsic_instr_create(b->shader, nir_intrinsic_store_scratch);
      store->num_components = intrin->num_components;
      store->src[0] = nir_src_for_ssa(value);
      store->src[1] = nir_src_for_ssa(offset);
      nir_intrinsic_set_write_mask(store, nir_intrinsic_write_mask(intrin));
      nir_intrinsic_set_align(store, align, 0);
      nir_builder_instr_insert(b, &store->instr);
   }

   nir_instr_remove(&intrin->instr);
   nir_deref_instr_remove_if_unused(deref);
}
Exemplo n.º 3
0
         nir_foreach_instr(instr, block) {
            if (instr->type != nir_instr_type_intrinsic)
               continue;

            nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
            if (intrin->intrinsic != nir_intrinsic_load_deref &&
                intrin->intrinsic != nir_intrinsic_store_deref)
               continue;

            nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
            if (!(deref->mode & modes))
               continue;

            if (!deref_has_indirect(nir_src_as_deref(intrin->src[0])))
               continue;

            nir_variable *var = nir_deref_instr_get_variable(deref);

            /* We set var->mode to 0 to indicate that a variable will be moved
             * to scratch.  Don't assign a scratch location twice.
             */
            if (var->data.mode == 0)
               continue;

            unsigned var_size, var_align;
            size_align(var->type, &var_size, &var_align);
            if (var_size <= size_threshold)
               continue;

            /* Remove it from its list */
            exec_node_remove(&var->node);
            /* Invalid mode used to flag "moving to scratch" */
            var->data.mode = 0;

            var->data.location = ALIGN_POT(shader->scratch_size, var_align);
            shader->scratch_size = var->data.location + var_size;
         }
Exemplo n.º 4
0
   nir_foreach_instr_safe(instr, block) {
      if (instr->type == nir_instr_type_intrinsic) {
         nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
         if (intr->intrinsic == nir_intrinsic_load_deref) {
            nir_deref_instr *deref = nir_src_as_deref(intr->src[0]);
            nir_variable *var = nir_deref_instr_get_variable(deref);

            if ((var->data.mode == nir_var_shader_in &&
                 var->data.location == VARYING_SLOT_POS) ||
                (var->data.mode == nir_var_system_value &&
                 var->data.location == SYSTEM_VALUE_FRAG_COORD)) {
               /* gl_FragCoord should not have array/struct derefs: */
               lower_fragcoord(state, intr, var);
            } else if (var->data.mode == nir_var_system_value &&
                       var->data.location == SYSTEM_VALUE_SAMPLE_POS) {
               lower_load_sample_pos(state, intr);
            } else if (var->data.mode == nir_var_shader_in &&
                       var->data.location == VARYING_SLOT_PNTC &&
                       state->shader->options->lower_wpos_pntc) {
               lower_load_pointcoord(state, intr);
            }
         } else if (intr->intrinsic == nir_intrinsic_load_frag_coord) {
            lower_fragcoord(state, intr, NULL);
         } else if (intr->intrinsic == nir_intrinsic_load_sample_pos) {
            lower_load_sample_pos(state, intr);
         } else if (intr->intrinsic == nir_intrinsic_interp_deref_at_offset) {
            lower_interp_deref_at_offset(state, intr);
         }
      } else if (instr->type == nir_instr_type_alu) {
         nir_alu_instr *alu = nir_instr_as_alu(instr);
         if (alu->op == nir_op_fddy ||
             alu->op == nir_op_fddy_fine ||
             alu->op == nir_op_fddy_coarse)
            lower_fddy(state, alu);
      }
   }
Exemplo n.º 5
0
static void
gather_intrinsic_info(nir_intrinsic_instr *instr, nir_shader *shader,
                      void *dead_ctx)
{
   switch (instr->intrinsic) {
   case nir_intrinsic_discard:
   case nir_intrinsic_discard_if:
      assert(shader->info.stage == MESA_SHADER_FRAGMENT);
      shader->info.fs.uses_discard = true;
      break;

   case nir_intrinsic_interp_deref_at_centroid:
   case nir_intrinsic_interp_deref_at_sample:
   case nir_intrinsic_interp_deref_at_offset:
   case nir_intrinsic_load_deref:
   case nir_intrinsic_store_deref:{
      nir_deref_instr *deref = nir_src_as_deref(instr->src[0]);
      nir_variable *var = nir_deref_instr_get_variable(deref);

      if (var->data.mode == nir_var_shader_in ||
          var->data.mode == nir_var_shader_out) {
         bool is_output_read = false;
         if (var->data.mode == nir_var_shader_out &&
             instr->intrinsic == nir_intrinsic_load_deref)
            is_output_read = true;

         if (!try_mask_partial_io(shader, var, deref, is_output_read))
            mark_whole_variable(shader, var, is_output_read);

         /* We need to track which input_reads bits correspond to a
          * dvec3/dvec4 input attribute */
         if (shader->info.stage == MESA_SHADER_VERTEX &&
             var->data.mode == nir_var_shader_in &&
             glsl_type_is_dual_slot(glsl_without_array(var->type))) {
            for (unsigned i = 0; i < glsl_count_attribute_slots(var->type, false); i++) {
               int idx = var->data.location + i;
               shader->info.vs.double_inputs |= BITFIELD64_BIT(idx);
            }
         }
      }
      break;
   }

   case nir_intrinsic_load_draw_id:
   case nir_intrinsic_load_frag_coord:
   case nir_intrinsic_load_front_face:
   case nir_intrinsic_load_vertex_id:
   case nir_intrinsic_load_vertex_id_zero_base:
   case nir_intrinsic_load_base_vertex:
   case nir_intrinsic_load_first_vertex:
   case nir_intrinsic_load_is_indexed_draw:
   case nir_intrinsic_load_base_instance:
   case nir_intrinsic_load_instance_id:
   case nir_intrinsic_load_sample_id:
   case nir_intrinsic_load_sample_pos:
   case nir_intrinsic_load_sample_mask_in:
   case nir_intrinsic_load_primitive_id:
   case nir_intrinsic_load_invocation_id:
   case nir_intrinsic_load_local_invocation_id:
   case nir_intrinsic_load_local_invocation_index:
   case nir_intrinsic_load_work_group_id:
   case nir_intrinsic_load_num_work_groups:
   case nir_intrinsic_load_tess_coord:
   case nir_intrinsic_load_tess_level_outer:
   case nir_intrinsic_load_tess_level_inner:
   case nir_intrinsic_load_patch_vertices_in:
      shader->info.system_values_read |=
         (1ull << nir_system_value_from_intrinsic(instr->intrinsic));
      break;

   case nir_intrinsic_end_primitive:
   case nir_intrinsic_end_primitive_with_counter:
      assert(shader->info.stage == MESA_SHADER_GEOMETRY);
      shader->info.gs.uses_end_primitive = 1;
      /* fall through */

   case nir_intrinsic_emit_vertex:
      if (nir_intrinsic_stream_id(instr) > 0)
         shader->info.gs.uses_streams = true;

      break;

   default:
      break;
   }
}