Example #1
0
static unsigned
type_size(const struct glsl_type *type)
{
   unsigned int size, i;

   switch (glsl_get_base_type(type)) {
   case GLSL_TYPE_UINT:
   case GLSL_TYPE_INT:
   case GLSL_TYPE_FLOAT:
   case GLSL_TYPE_BOOL:
      return glsl_get_components(type);
   case GLSL_TYPE_ARRAY:
      return type_size(glsl_get_array_element(type)) * glsl_get_length(type);
   case GLSL_TYPE_STRUCT:
      size = 0;
      for (i = 0; i < glsl_get_length(type); i++) {
         size += type_size(glsl_get_struct_field(type, i));
      }
      return size;
   case GLSL_TYPE_SAMPLER:
      return 0;
   case GLSL_TYPE_ATOMIC_UINT:
      return 0;
   case GLSL_TYPE_INTERFACE:
      return 0;
   case GLSL_TYPE_IMAGE:
      return 0;
   case GLSL_TYPE_VOID:
   case GLSL_TYPE_ERROR:
   case GLSL_TYPE_DOUBLE:
      unreachable("not reached");
   }

   return 0;
}
static nir_register *
get_reg_for_deref(nir_deref_instr *deref, struct locals_to_regs_state *state)
{
   uint32_t hash = hash_deref(deref);

   assert(nir_deref_instr_get_variable(deref)->constant_initializer == NULL);

   struct hash_entry *entry =
      _mesa_hash_table_search_pre_hashed(state->regs_table, hash, deref);
   if (entry)
      return entry->data;

   unsigned array_size = 1;
   for (nir_deref_instr *d = deref; d; d = nir_deref_instr_parent(d)) {
      if (d->deref_type == nir_deref_type_array)
         array_size *= glsl_get_length(nir_deref_instr_parent(d)->type);
   }

   assert(glsl_type_is_vector_or_scalar(deref->type));

   nir_register *reg = nir_local_reg_create(state->builder.impl);
   reg->num_components = glsl_get_vector_elements(deref->type);
   reg->num_array_elems = array_size > 1 ? array_size : 0;
   reg->bit_size = glsl_get_bit_size(deref->type);

   _mesa_hash_table_insert_pre_hashed(state->regs_table, hash, deref, reg);

   return reg;
}
Example #3
0
static nir_register *
get_reg_for_deref(nir_deref_var *deref, struct locals_to_regs_state *state)
{
   uint32_t hash = hash_deref(deref);

   struct hash_entry *entry =
      _mesa_hash_table_search_pre_hashed(state->regs_table, hash, deref);
   if (entry)
      return entry->data;

   unsigned array_size = 1;
   nir_deref *tail = &deref->deref;
   while (tail->child) {
      if (tail->child->deref_type == nir_deref_type_array)
         array_size *= glsl_get_length(tail->type);
      tail = tail->child;
   }

   assert(glsl_type_is_vector(tail->type) || glsl_type_is_scalar(tail->type));

   nir_register *reg = nir_local_reg_create(state->impl);
   reg->num_components = glsl_get_vector_elements(tail->type);
   reg->num_array_elems = array_size > 1 ? array_size : 0;
   reg->bit_size = glsl_get_bit_size(glsl_get_base_type(tail->type));

   _mesa_hash_table_insert_pre_hashed(state->regs_table, hash, deref, reg);
   nir_array_add(&state->derefs_array, nir_deref_var *, deref);

   return reg;
}
Example #4
0
const glsl_type *
glsl_channel_type(const glsl_type *t)
{
   switch (glsl_get_base_type(t)) {
   case GLSL_TYPE_ARRAY: {
      const glsl_type *base = glsl_channel_type(glsl_get_array_element(t));
      return glsl_array_type(base, glsl_get_length(t));
   }
   case GLSL_TYPE_UINT:
      return glsl_uint_type();
   case GLSL_TYPE_INT:
      return glsl_int_type();
   case GLSL_TYPE_FLOAT:
      return glsl_float_type();
   case GLSL_TYPE_BOOL:
      return glsl_bool_type();
   case GLSL_TYPE_DOUBLE:
      return glsl_double_type();
   case GLSL_TYPE_UINT64:
      return glsl_uint64_t_type();
   case GLSL_TYPE_INT64:
      return glsl_int64_t_type();
   case GLSL_TYPE_FLOAT16:
      return glsl_float16_t_type();
   case GLSL_TYPE_UINT16:
      return glsl_uint16_t_type();
   case GLSL_TYPE_INT16:
      return glsl_int16_t_type();
   default:
      unreachable("Unhandled base type glsl_channel_type()");
   }
}
Example #5
0
/* Tries to compute the size of an interface block based on the strides and
 * offsets that are provided to us in the SPIR-V source.
 */
static unsigned
vtn_type_block_size(struct vtn_type *type)
{
   enum glsl_base_type base_type = glsl_get_base_type(type->type);
   switch (base_type) {
   case GLSL_TYPE_UINT:
   case GLSL_TYPE_INT:
   case GLSL_TYPE_FLOAT:
   case GLSL_TYPE_BOOL:
   case GLSL_TYPE_DOUBLE: {
      unsigned cols = type->row_major ? glsl_get_vector_elements(type->type) :
                                        glsl_get_matrix_columns(type->type);
      if (cols > 1) {
         assert(type->stride > 0);
         return type->stride * cols;
      } else if (base_type == GLSL_TYPE_DOUBLE) {
         return glsl_get_vector_elements(type->type) * 8;
      } else {
         return glsl_get_vector_elements(type->type) * 4;
      }
   }

   case GLSL_TYPE_STRUCT:
   case GLSL_TYPE_INTERFACE: {
      unsigned size = 0;
      unsigned num_fields = glsl_get_length(type->type);
      for (unsigned f = 0; f < num_fields; f++) {
         unsigned field_end = type->offsets[f] +
                              vtn_type_block_size(type->members[f]);
         size = MAX2(size, field_end);
      }
      return size;
   }

   case GLSL_TYPE_ARRAY:
      assert(type->stride > 0);
      assert(glsl_get_length(type->type) > 0);
      return type->stride * glsl_get_length(type->type);

   default:
      assert(!"Invalid block type");
      return 0;
   }
}
Example #6
0
static void
_vtn_variable_load_store(struct vtn_builder *b, bool load,
                         struct vtn_access_chain *chain,
                         struct vtn_type *tail_type,
                         struct vtn_ssa_value **inout)
{
   enum glsl_base_type base_type = glsl_get_base_type(tail_type->type);
   switch (base_type) {
   case GLSL_TYPE_UINT:
   case GLSL_TYPE_INT:
   case GLSL_TYPE_FLOAT:
   case GLSL_TYPE_BOOL:
      /* At this point, we have a scalar, vector, or matrix so we know that
       * there cannot be any structure splitting still in the way.  By
       * stopping at the matrix level rather than the vector level, we
       * ensure that matrices get loaded in the optimal way even if they
       * are storred row-major in a UBO.
       */
      if (load) {
         *inout = vtn_local_load(b, vtn_access_chain_to_deref(b, chain));
      } else {
         vtn_local_store(b, *inout, vtn_access_chain_to_deref(b, chain));
      }
      return;

   case GLSL_TYPE_ARRAY:
   case GLSL_TYPE_STRUCT: {
      struct vtn_access_chain *new_chain =
         vtn_access_chain_extend(b, chain, 1);
      new_chain->link[chain->length].mode = vtn_access_mode_literal;
      unsigned elems = glsl_get_length(tail_type->type);
      if (load) {
         assert(*inout == NULL);
         *inout = rzalloc(b, struct vtn_ssa_value);
         (*inout)->type = tail_type->type;
         (*inout)->elems = rzalloc_array(b, struct vtn_ssa_value *, elems);
      }
      for (unsigned i = 0; i < elems; i++) {
         new_chain->link[chain->length].id = i;
         struct vtn_type *elem_type = base_type == GLSL_TYPE_ARRAY ?
            tail_type->array_element : tail_type->members[i];
         _vtn_variable_load_store(b, load, new_chain, elem_type,
                                  &(*inout)->elems[i]);
      }
      return;
   }

   default:
      unreachable("Invalid access chain type");
   }
}
static nir_src
get_deref_reg_src(nir_deref_instr *deref, struct locals_to_regs_state *state)
{
   nir_builder *b = &state->builder;

   nir_src src;

   src.is_ssa = false;
   src.reg.reg = get_reg_for_deref(deref, state);
   src.reg.base_offset = 0;
   src.reg.indirect = NULL;

   /* It is possible for a user to create a shader that has an array with a
    * single element and then proceed to access it indirectly.  Indirectly
    * accessing a non-array register is not allowed in NIR.  In order to
    * handle this case we just convert it to a direct reference.
    */
   if (src.reg.reg->num_array_elems == 0)
      return src;

   unsigned inner_array_size = 1;
   for (const nir_deref_instr *d = deref; d; d = nir_deref_instr_parent(d)) {
      if (d->deref_type != nir_deref_type_array)
         continue;

      if (nir_src_is_const(d->arr.index) && !src.reg.indirect) {
         src.reg.base_offset += nir_src_as_uint(d->arr.index) *
                                inner_array_size;
      } else {
         if (src.reg.indirect) {
            assert(src.reg.base_offset == 0);
         } else {
            src.reg.indirect = ralloc(b->shader, nir_src);
            *src.reg.indirect =
               nir_src_for_ssa(nir_imm_int(b, src.reg.base_offset));
            src.reg.base_offset = 0;
         }

         assert(src.reg.indirect->is_ssa);
         nir_ssa_def *index = nir_i2i(b, nir_ssa_for_src(b, d->arr.index, 1), 32);
         src.reg.indirect->ssa =
            nir_iadd(b, src.reg.indirect->ssa,
                        nir_imul(b, index, nir_imm_int(b, inner_array_size)));
      }

      inner_array_size *= glsl_get_length(nir_deref_instr_parent(d)->type);
   }

   return src;
}
Example #8
0
/* Calculate the sampler index based on array indicies and also
 * calculate the base uniform location for struct members.
 */
static void
calc_sampler_offsets(nir_deref *tail, nir_tex_instr *instr,
                     unsigned *array_elements, nir_ssa_def **indirect,
                     nir_builder *b, unsigned *location)
{
   if (tail->child == NULL)
      return;

   switch (tail->child->deref_type) {
   case nir_deref_type_array: {
      nir_deref_array *deref_array = nir_deref_as_array(tail->child);

      assert(deref_array->deref_array_type != nir_deref_array_type_wildcard);

      calc_sampler_offsets(tail->child, instr, array_elements,
                           indirect, b, location);
      instr->sampler_index += deref_array->base_offset * *array_elements;

      if (deref_array->deref_array_type == nir_deref_array_type_indirect) {
         nir_ssa_def *mul =
            nir_imul(b, nir_imm_int(b, *array_elements),
                     nir_ssa_for_src(b, deref_array->indirect, 1));

         nir_instr_rewrite_src(&instr->instr, &deref_array->indirect,
                               NIR_SRC_INIT);

         if (*indirect) {
            *indirect = nir_iadd(b, *indirect, mul);
         } else {
            *indirect = mul;
         }
      }

      *array_elements *= glsl_get_length(tail->type);
       break;
   }

   case nir_deref_type_struct: {
      nir_deref_struct *deref_struct = nir_deref_as_struct(tail->child);
      *location += glsl_get_record_location_offset(tail->type, deref_struct->index);
      calc_sampler_offsets(tail->child, instr, array_elements,
                           indirect, b, location);
      break;
   }

   default:
      unreachable("Invalid deref type");
      break;
   }
}
Example #9
0
static struct deref_node *
deref_node_create(struct deref_node *parent,
                  const struct glsl_type *type, nir_shader *shader)
{
   size_t size = sizeof(struct deref_node) +
                 glsl_get_length(type) * sizeof(struct deref_node *);

   struct deref_node *node = rzalloc_size(shader, size);
   node->type = type;
   node->parent = parent;
   node->deref = NULL;
   exec_node_init(&node->direct_derefs_link);

   return node;
}
static void
emit_load_store(nir_builder *b, nir_intrinsic_instr *orig_instr,
                nir_deref_var *deref, nir_deref *tail,
                nir_ssa_def **dest, nir_ssa_def *src)
{
   for (; tail->child; tail = tail->child) {
      if (tail->child->deref_type != nir_deref_type_array)
         continue;

      nir_deref_array *arr = nir_deref_as_array(tail->child);
      if (arr->deref_array_type != nir_deref_array_type_indirect)
         continue;

      int length = glsl_get_length(tail->type);

      emit_indirect_load_store(b, orig_instr, deref, tail, -arr->base_offset,
                               length - arr->base_offset, dest, src);
      return;
   }

   assert(tail && tail->child == NULL);

   /* We reached the end of the deref chain.  Emit the instruction */

   if (src == NULL) {
      /* This is a load instruction */
      nir_intrinsic_instr *load =
         nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_var);
      load->num_components = orig_instr->num_components;
      load->variables[0] =
         nir_deref_as_var(nir_copy_deref(load, &deref->deref));
      unsigned bit_size = orig_instr->dest.ssa.bit_size;
      nir_ssa_dest_init(&load->instr, &load->dest,
                        load->num_components, bit_size, NULL);
      nir_builder_instr_insert(b, &load->instr);
      *dest = &load->dest.ssa;
   } else {
      /* This is a store instruction */
      nir_intrinsic_instr *store =
         nir_intrinsic_instr_create(b->shader, nir_intrinsic_store_var);
      store->num_components = orig_instr->num_components;
      nir_intrinsic_set_write_mask(store, nir_intrinsic_write_mask(orig_instr));
      store->variables[0] =
         nir_deref_as_var(nir_copy_deref(store, &deref->deref));
      store->src[0] = nir_src_for_ssa(src);
      nir_builder_instr_insert(b, &store->instr);
   }
}
Example #11
0
/**
 * Mark an entire variable as used.  Caller must ensure that the variable
 * represents a shader input or output.
 */
static void
mark_whole_variable(nir_shader *shader, nir_variable *var, bool is_output_read)
{
   const struct glsl_type *type = var->type;

   if (nir_is_per_vertex_io(var, shader->info.stage)) {
      assert(glsl_type_is_array(type));
      type = glsl_get_array_element(type);
   }

   const unsigned slots =
      var->data.compact ? DIV_ROUND_UP(glsl_get_length(type), 4)
                        : glsl_count_attribute_slots(type, false);

   set_io_mask(shader, var, 0, slots, is_output_read);
}
Example #12
0
static void
vtn_build_subgroup_instr(struct vtn_builder *b,
                         nir_intrinsic_op nir_op,
                         struct vtn_ssa_value *dst,
                         struct vtn_ssa_value *src0,
                         nir_ssa_def *index,
                         unsigned const_idx0,
                         unsigned const_idx1)
{
   /* Some of the subgroup operations take an index.  SPIR-V allows this to be
    * any integer type.  To make things simpler for drivers, we only support
    * 32-bit indices.
    */
   if (index && index->bit_size != 32)
      index = nir_u2u32(&b->nb, index);

   vtn_assert(dst->type == src0->type);
   if (!glsl_type_is_vector_or_scalar(dst->type)) {
      for (unsigned i = 0; i < glsl_get_length(dst->type); i++) {
         vtn_build_subgroup_instr(b, nir_op, dst->elems[i],
                                  src0->elems[i], index,
                                  const_idx0, const_idx1);
      }
      return;
   }

   nir_intrinsic_instr *intrin =
      nir_intrinsic_instr_create(b->nb.shader, nir_op);
   nir_ssa_dest_init_for_type(&intrin->instr, &intrin->dest,
                              dst->type, NULL);
   intrin->num_components = intrin->dest.ssa.num_components;

   intrin->src[0] = nir_src_for_ssa(src0->def);
   if (index)
      intrin->src[1] = nir_src_for_ssa(index);

   intrin->const_index[0] = const_idx0;
   intrin->const_index[1] = const_idx1;

   nir_builder_instr_insert(&b->nb, &intrin->instr);

   dst->def = &intrin->dest.ssa;
}
Example #13
0
static void
_vtn_variable_copy(struct vtn_builder *b, struct vtn_access_chain *dest,
                   struct vtn_access_chain *src, struct vtn_type *tail_type)
{
   enum glsl_base_type base_type = glsl_get_base_type(tail_type->type);
   switch (base_type) {
   case GLSL_TYPE_UINT:
   case GLSL_TYPE_INT:
   case GLSL_TYPE_FLOAT:
   case GLSL_TYPE_BOOL:
      /* At this point, we have a scalar, vector, or matrix so we know that
       * there cannot be any structure splitting still in the way.  By
       * stopping at the matrix level rather than the vector level, we
       * ensure that matrices get loaded in the optimal way even if they
       * are storred row-major in a UBO.
       */
      vtn_variable_store(b, vtn_variable_load(b, src), dest);
      return;

   case GLSL_TYPE_ARRAY:
   case GLSL_TYPE_STRUCT: {
      struct vtn_access_chain *new_src, *new_dest;
      new_src = vtn_access_chain_extend(b, src, 1);
      new_dest = vtn_access_chain_extend(b, dest, 1);
      new_src->link[src->length].mode = vtn_access_mode_literal;
      new_dest->link[dest->length].mode = vtn_access_mode_literal;
      unsigned elems = glsl_get_length(tail_type->type);
      for (unsigned i = 0; i < elems; i++) {
         new_src->link[src->length].id = i;
         new_dest->link[dest->length].id = i;
         struct vtn_type *elem_type = base_type == GLSL_TYPE_ARRAY ?
            tail_type->array_element : tail_type->members[i];
         _vtn_variable_copy(b, new_dest, new_src, elem_type);
      }
      return;
   }

   default:
      unreachable("Invalid access chain type");
   }
}
Example #14
0
/* Gets the deref_node for the given deref chain and creates it if it
 * doesn't yet exist.  If the deref is fully-qualified and direct and
 * state->add_to_direct_deref_nodes is true, it will be added to the hash
 * table of of fully-qualified direct derefs.
 */
static struct deref_node *
get_deref_node(nir_deref_var *deref, struct lower_variables_state *state)
{
   bool is_direct = true;

   /* Start at the base of the chain. */
   struct deref_node *node = get_deref_node_for_var(deref->var, state);
   assert(deref->deref.type == node->type);

   for (nir_deref *tail = deref->deref.child; tail; tail = tail->child) {
      switch (tail->deref_type) {
      case nir_deref_type_struct: {
         nir_deref_struct *deref_struct = nir_deref_as_struct(tail);

         assert(deref_struct->index < glsl_get_length(node->type));

         if (node->children[deref_struct->index] == NULL)
            node->children[deref_struct->index] =
               deref_node_create(node, tail->type, state->dead_ctx);

         node = node->children[deref_struct->index];
         break;
      }

      case nir_deref_type_array: {
         nir_deref_array *arr = nir_deref_as_array(tail);

         switch (arr->deref_array_type) {
         case nir_deref_array_type_direct:
            /* This is possible if a loop unrolls and generates an
             * out-of-bounds offset.  We need to handle this at least
             * somewhat gracefully.
             */
            if (arr->base_offset >= glsl_get_length(node->type))
               return NULL;

            if (node->children[arr->base_offset] == NULL)
               node->children[arr->base_offset] =
                  deref_node_create(node, tail->type, state->dead_ctx);

            node = node->children[arr->base_offset];
            break;

         case nir_deref_array_type_indirect:
            if (node->indirect == NULL)
               node->indirect = deref_node_create(node, tail->type,
                                                  state->dead_ctx);

            node = node->indirect;
            is_direct = false;
            break;

         case nir_deref_array_type_wildcard:
            if (node->wildcard == NULL)
               node->wildcard = deref_node_create(node, tail->type,
                                                  state->dead_ctx);

            node = node->wildcard;
            is_direct = false;
            break;

         default:
            unreachable("Invalid array deref type");
         }
         break;
      }
      default:
         unreachable("Invalid deref type");
      }
   }

   assert(node);

   /* Only insert if it isn't already in the list. */
   if (is_direct && state->add_to_direct_deref_nodes &&
       node->direct_derefs_link.next == NULL) {
      node->deref = deref;
      assert(deref->var != NULL);
      exec_list_push_tail(&state->direct_deref_nodes,
                          &node->direct_derefs_link);
   }

   return node;
}
Example #15
0
void
vtn_handle_variables(struct vtn_builder *b, SpvOp opcode,
                     const uint32_t *w, unsigned count)
{
   switch (opcode) {
   case SpvOpVariable: {
      struct vtn_variable *var = rzalloc(b, struct vtn_variable);
      var->type = vtn_value(b, w[1], vtn_value_type_type)->type;

      var->chain.var = var;
      var->chain.length = 0;

      struct vtn_value *val =
         vtn_push_value(b, w[2], vtn_value_type_access_chain);
      val->access_chain = &var->chain;

      struct vtn_type *without_array = var->type;
      while(glsl_type_is_array(without_array->type))
         without_array = without_array->array_element;

      nir_variable_mode nir_mode;
      switch ((SpvStorageClass)w[3]) {
      case SpvStorageClassUniform:
      case SpvStorageClassUniformConstant:
         if (without_array->block) {
            var->mode = vtn_variable_mode_ubo;
            b->shader->info.num_ubos++;
         } else if (without_array->buffer_block) {
            var->mode = vtn_variable_mode_ssbo;
            b->shader->info.num_ssbos++;
         } else if (glsl_type_is_image(without_array->type)) {
            var->mode = vtn_variable_mode_image;
            nir_mode = nir_var_uniform;
            b->shader->info.num_images++;
         } else if (glsl_type_is_sampler(without_array->type)) {
            var->mode = vtn_variable_mode_sampler;
            nir_mode = nir_var_uniform;
            b->shader->info.num_textures++;
         } else {
            assert(!"Invalid uniform variable type");
         }
         break;
      case SpvStorageClassPushConstant:
         var->mode = vtn_variable_mode_push_constant;
         assert(b->shader->num_uniforms == 0);
         b->shader->num_uniforms = vtn_type_block_size(var->type) * 4;
         break;
      case SpvStorageClassInput:
         var->mode = vtn_variable_mode_input;
         nir_mode = nir_var_shader_in;
         break;
      case SpvStorageClassOutput:
         var->mode = vtn_variable_mode_output;
         nir_mode = nir_var_shader_out;
         break;
      case SpvStorageClassPrivate:
         var->mode = vtn_variable_mode_global;
         nir_mode = nir_var_global;
         break;
      case SpvStorageClassFunction:
         var->mode = vtn_variable_mode_local;
         nir_mode = nir_var_local;
         break;
      case SpvStorageClassWorkgroup:
         var->mode = vtn_variable_mode_workgroup;
         nir_mode = nir_var_shared;
         break;
      case SpvStorageClassCrossWorkgroup:
      case SpvStorageClassGeneric:
      case SpvStorageClassAtomicCounter:
      default:
         unreachable("Unhandled variable storage class");
      }

      switch (var->mode) {
      case vtn_variable_mode_local:
      case vtn_variable_mode_global:
      case vtn_variable_mode_image:
      case vtn_variable_mode_sampler:
      case vtn_variable_mode_workgroup:
         /* For these, we create the variable normally */
         var->var = rzalloc(b->shader, nir_variable);
         var->var->name = ralloc_strdup(var->var, val->name);
         var->var->type = var->type->type;
         var->var->data.mode = nir_mode;

         switch (var->mode) {
         case vtn_variable_mode_image:
         case vtn_variable_mode_sampler:
            var->var->interface_type = without_array->type;
            break;
         default:
            var->var->interface_type = NULL;
            break;
         }
         break;

      case vtn_variable_mode_input:
      case vtn_variable_mode_output: {
         /* For inputs and outputs, we immediately split structures.  This
          * is for a couple of reasons.  For one, builtins may all come in
          * a struct and we really want those split out into separate
          * variables.  For another, interpolation qualifiers can be
          * applied to members of the top-level struct ane we need to be
          * able to preserve that information.
          */

         int array_length = -1;
         struct vtn_type *interface_type = var->type;
         if (b->shader->stage == MESA_SHADER_GEOMETRY &&
             glsl_type_is_array(var->type->type)) {
            /* In Geometry shaders (and some tessellation), inputs come
             * in per-vertex arrays.  However, some builtins come in
             * non-per-vertex, hence the need for the is_array check.  In
             * any case, there are no non-builtin arrays allowed so this
             * check should be sufficient.
             */
            interface_type = var->type->array_element;
            array_length = glsl_get_length(var->type->type);
         }

         if (glsl_type_is_struct(interface_type->type)) {
            /* It's a struct.  Split it. */
            unsigned num_members = glsl_get_length(interface_type->type);
            var->members = ralloc_array(b, nir_variable *, num_members);

            for (unsigned i = 0; i < num_members; i++) {
               const struct glsl_type *mtype = interface_type->members[i]->type;
               if (array_length >= 0)
                  mtype = glsl_array_type(mtype, array_length);

               var->members[i] = rzalloc(b->shader, nir_variable);
               var->members[i]->name =
                  ralloc_asprintf(var->members[i], "%s.%d", val->name, i);
               var->members[i]->type = mtype;
               var->members[i]->interface_type =
                  interface_type->members[i]->type;
               var->members[i]->data.mode = nir_mode;
            }
         } else {
            var->var = rzalloc(b->shader, nir_variable);
            var->var->name = ralloc_strdup(var->var, val->name);
            var->var->type = var->type->type;
            var->var->interface_type = interface_type->type;
            var->var->data.mode = nir_mode;
         }

         /* For inputs and outputs, we need to grab locations and builtin
          * information from the interface type.
          */
         vtn_foreach_decoration(b, interface_type->val, var_decoration_cb, var);
         break;

      case vtn_variable_mode_param:
         unreachable("Not created through OpVariable");
      }

      case vtn_variable_mode_ubo:
      case vtn_variable_mode_ssbo:
      case vtn_variable_mode_push_constant:
         /* These don't need actual variables. */
         break;
      }

      if (count > 4) {
         assert(count == 5);
         nir_constant *constant =
            vtn_value(b, w[4], vtn_value_type_constant)->constant;
         var->var->constant_initializer =
            nir_constant_clone(constant, var->var);
      }

      vtn_foreach_decoration(b, val, var_decoration_cb, var);

      if (var->mode == vtn_variable_mode_image ||
          var->mode == vtn_variable_mode_sampler) {
         /* XXX: We still need the binding information in the nir_variable
          * for these. We should fix that.
          */
         var->var->data.binding = var->binding;
         var->var->data.descriptor_set = var->descriptor_set;

         if (var->mode == vtn_variable_mode_image)
            var->var->data.image.format = without_array->image_format;
      }

      if (var->mode == vtn_variable_mode_local) {
         assert(var->members == NULL && var->var != NULL);
         nir_function_impl_add_variable(b->impl, var->var);
      } else if (var->var) {
         nir_shader_add_variable(b->shader, var->var);
      } else if (var->members) {
         unsigned count = glsl_get_length(without_array->type);
         for (unsigned i = 0; i < count; i++) {
            assert(var->members[i]->data.mode != nir_var_local);
            nir_shader_add_variable(b->shader, var->members[i]);
         }
      } else {
         assert(var->mode == vtn_variable_mode_ubo ||
                var->mode == vtn_variable_mode_ssbo ||
                var->mode == vtn_variable_mode_push_constant);
      }
      break;
   }
Example #16
0
static nir_src
get_deref_reg_src(nir_deref_var *deref, nir_instr *instr,
                  struct locals_to_regs_state *state)
{
   nir_src src;

   src.is_ssa = false;
   src.reg.reg = get_reg_for_deref(deref, state);
   src.reg.base_offset = 0;
   src.reg.indirect = NULL;

   /* It is possible for a user to create a shader that has an array with a
    * single element and then proceed to access it indirectly.  Indirectly
    * accessing a non-array register is not allowed in NIR.  In order to
    * handle this case we just convert it to a direct reference.
    */
   if (src.reg.reg->num_array_elems == 0)
      return src;

   nir_deref *tail = &deref->deref;
   while (tail->child != NULL) {
      const struct glsl_type *parent_type = tail->type;
      tail = tail->child;

      if (tail->deref_type != nir_deref_type_array)
         continue;

      nir_deref_array *deref_array = nir_deref_as_array(tail);

      src.reg.base_offset *= glsl_get_length(parent_type);
      src.reg.base_offset += deref_array->base_offset;

      if (src.reg.indirect) {
         nir_load_const_instr *load_const =
            nir_load_const_instr_create(state->shader, 1, 32);
         load_const->value.u32[0] = glsl_get_length(parent_type);
         nir_instr_insert_before(instr, &load_const->instr);

         nir_alu_instr *mul = nir_alu_instr_create(state->shader, nir_op_imul);
         mul->src[0].src = *src.reg.indirect;
         mul->src[1].src.is_ssa = true;
         mul->src[1].src.ssa = &load_const->def;
         mul->dest.write_mask = 1;
         nir_ssa_dest_init(&mul->instr, &mul->dest.dest, 1, 32, NULL);
         nir_instr_insert_before(instr, &mul->instr);

         src.reg.indirect->is_ssa = true;
         src.reg.indirect->ssa = &mul->dest.dest.ssa;
      }

      if (deref_array->deref_array_type == nir_deref_array_type_indirect) {
         if (src.reg.indirect == NULL) {
            src.reg.indirect = ralloc(state->shader, nir_src);
            nir_src_copy(src.reg.indirect, &deref_array->indirect,
                         state->shader);
         } else {
            nir_alu_instr *add = nir_alu_instr_create(state->shader,
                                                      nir_op_iadd);
            add->src[0].src = *src.reg.indirect;
            nir_src_copy(&add->src[1].src, &deref_array->indirect, add);
            add->dest.write_mask = 1;
            nir_ssa_dest_init(&add->instr, &add->dest.dest, 1, 32, NULL);
            nir_instr_insert_before(instr, &add->instr);

            src.reg.indirect->is_ssa = true;
            src.reg.indirect->ssa = &add->dest.dest.ssa;
         }
      }
   }

   return src;
}
Example #17
0
/* Recursively constructs deref chains to split a copy instruction into
 * multiple (if needed) copy instructions with full-length deref chains.
 * External callers of this function should pass the tail and head of the
 * deref chains found as the source and destination of the copy instruction
 * into this function.
 *
 * \param  old_copy  The copy instruction we are splitting
 * \param  dest_head The head of the destination deref chain we are building
 * \param  src_head  The head of the source deref chain we are building
 * \param  dest_tail The tail of the destination deref chain we are building
 * \param  src_tail  The tail of the source deref chain we are building
 * \param  state     The current split_var_copies_state object
 */
static void
split_var_copy_instr(nir_intrinsic_instr *old_copy,
                     nir_deref *dest_head, nir_deref *src_head,
                     nir_deref *dest_tail, nir_deref *src_tail,
                     struct split_var_copies_state *state)
{
   assert(src_tail->type == dest_tail->type);

   /* Make sure these really are the tails of the deref chains */
   assert(dest_tail->child == NULL);
   assert(src_tail->child == NULL);

   switch (glsl_get_base_type(src_tail->type)) {
   case GLSL_TYPE_ARRAY: {
      /* Make a wildcard dereference */
      nir_deref_array *deref = nir_deref_array_create(state->dead_ctx);
      deref->deref.type = glsl_get_array_element(src_tail->type);
      deref->deref_array_type = nir_deref_array_type_wildcard;

      /* Set the tail of both as the newly created wildcard deref.  It is
       * safe to use the same wildcard in both places because a) we will be
       * copying it before we put it in an actual instruction and b)
       * everything that will potentially add another link in the deref
       * chain will also add the same thing to both chains.
       */
      src_tail->child = &deref->deref;
      dest_tail->child = &deref->deref;

      split_var_copy_instr(old_copy, dest_head, src_head,
                           dest_tail->child, src_tail->child, state);

      /* Set it back to the way we found it */
      src_tail->child = NULL;
      dest_tail->child = NULL;
      break;
   }

   case GLSL_TYPE_STRUCT:
      /* This is the only part that actually does any interesting
       * splitting.  For array types, we just use wildcards and resolve
       * them later.  For structure types, we need to emit one copy
       * instruction for every structure element.  Because we may have
       * structs inside structs, we just recurse and let the next level
       * take care of any additional structures.
       */
      for (unsigned i = 0; i < glsl_get_length(src_tail->type); i++) {
         nir_deref_struct *deref = nir_deref_struct_create(state->dead_ctx, i);
         deref->deref.type = glsl_get_struct_field(src_tail->type, i);

         /* Set the tail of both as the newly created structure deref.  It
          * is safe to use the same wildcard in both places because a) we
          * will be copying it before we put it in an actual instruction
          * and b) everything that will potentially add another link in the
          * deref chain will also add the same thing to both chains.
          */
         src_tail->child = &deref->deref;
         dest_tail->child = &deref->deref;

         split_var_copy_instr(old_copy, dest_head, src_head,
                              dest_tail->child, src_tail->child, state);
      }
      /* Set it back to the way we found it */
      src_tail->child = NULL;
      dest_tail->child = NULL;
      break;

   case GLSL_TYPE_UINT:
   case GLSL_TYPE_INT:
   case GLSL_TYPE_FLOAT:
   case GLSL_TYPE_BOOL:
      if (glsl_type_is_matrix(src_tail->type)) {
         nir_deref_array *deref = nir_deref_array_create(state->dead_ctx);
         deref->deref.type = glsl_get_column_type(src_tail->type);
         deref->deref_array_type = nir_deref_array_type_wildcard;

         /* Set the tail of both as the newly created wildcard deref.  It
          * is safe to use the same wildcard in both places because a) we
          * will be copying it before we put it in an actual instruction
          * and b) everything that will potentially add another link in the
          * deref chain will also add the same thing to both chains.
          */
         src_tail->child = &deref->deref;
         dest_tail->child = &deref->deref;

         split_var_copy_instr(old_copy, dest_head, src_head,
                              dest_tail->child, src_tail->child, state);

         /* Set it back to the way we found it */
         src_tail->child = NULL;
         dest_tail->child = NULL;
      } else {
         /* At this point, we have fully built our deref chains and can
          * actually add the new copy instruction.
          */
         nir_intrinsic_instr *new_copy =
            nir_intrinsic_instr_create(state->mem_ctx, nir_intrinsic_copy_var);

         /* We need to make copies because a) this deref chain actually
          * belongs to the copy instruction and b) the deref chains may
          * have some of the same links due to the way we constructed them
          */
         nir_deref *src = nir_copy_deref(new_copy, src_head);
         nir_deref *dest = nir_copy_deref(new_copy, dest_head);

         new_copy->variables[0] = nir_deref_as_var(dest);
         new_copy->variables[1] = nir_deref_as_var(src);

         /* Emit the copy instruction after the old instruction.  We'll
          * remove the old one later.
          */
         nir_instr_insert_after(&old_copy->instr, &new_copy->instr);
         state->progress = true;
      }
      break;

   case GLSL_TYPE_SAMPLER:
   case GLSL_TYPE_IMAGE:
   case GLSL_TYPE_ATOMIC_UINT:
   case GLSL_TYPE_INTERFACE:
   default:
      unreachable("Cannot copy these types");
   }
}
Example #18
0
static void
_vtn_local_load_store(struct vtn_builder *b, bool load, nir_deref_var *deref,
                      nir_deref *tail, struct vtn_ssa_value *inout)
{
   /* The deref tail may contain a deref to select a component of a vector (in
    * other words, it might not be an actual tail) so we have to save it away
    * here since we overwrite it later.
    */
   nir_deref *old_child = tail->child;

   if (glsl_type_is_vector_or_scalar(tail->type)) {
      /* Terminate the deref chain in case there is one more link to pick
       * off a component of the vector.
       */
      tail->child = NULL;

      nir_intrinsic_op op = load ? nir_intrinsic_load_var :
                                   nir_intrinsic_store_var;

      nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->shader, op);
      intrin->variables[0] =
         nir_deref_as_var(nir_copy_deref(intrin, &deref->deref));
      intrin->num_components = glsl_get_vector_elements(tail->type);

      if (load) {
         nir_ssa_dest_init(&intrin->instr, &intrin->dest,
                           intrin->num_components,
                           glsl_get_bit_size(glsl_get_base_type(tail->type)),
                           NULL);
         inout->def = &intrin->dest.ssa;
      } else {
         nir_intrinsic_set_write_mask(intrin, (1 << intrin->num_components) - 1);
         intrin->src[0] = nir_src_for_ssa(inout->def);
      }

      nir_builder_instr_insert(&b->nb, &intrin->instr);
   } else if (glsl_get_base_type(tail->type) == GLSL_TYPE_ARRAY ||
              glsl_type_is_matrix(tail->type)) {
      unsigned elems = glsl_get_length(tail->type);
      nir_deref_array *deref_arr = nir_deref_array_create(b);
      deref_arr->deref_array_type = nir_deref_array_type_direct;
      deref_arr->deref.type = glsl_get_array_element(tail->type);
      tail->child = &deref_arr->deref;
      for (unsigned i = 0; i < elems; i++) {
         deref_arr->base_offset = i;
         _vtn_local_load_store(b, load, deref, tail->child, inout->elems[i]);
      }
   } else {
      assert(glsl_get_base_type(tail->type) == GLSL_TYPE_STRUCT);
      unsigned elems = glsl_get_length(tail->type);
      nir_deref_struct *deref_struct = nir_deref_struct_create(b, 0);
      tail->child = &deref_struct->deref;
      for (unsigned i = 0; i < elems; i++) {
         deref_struct->index = i;
         deref_struct->deref.type = glsl_get_struct_field(tail->type, i);
         _vtn_local_load_store(b, load, deref, tail->child, inout->elems[i]);
      }
   }

   tail->child = old_child;
}
Example #19
0
static void
_vtn_block_load_store(struct vtn_builder *b, nir_intrinsic_op op, bool load,
                      nir_ssa_def *index, nir_ssa_def *offset,
                      struct vtn_access_chain *chain, unsigned chain_idx,
                      struct vtn_type *type, struct vtn_ssa_value **inout)
{
   if (chain && chain_idx >= chain->length)
      chain = NULL;

   if (load && chain == NULL && *inout == NULL)
      *inout = vtn_create_ssa_value(b, type->type);

   enum glsl_base_type base_type = glsl_get_base_type(type->type);
   switch (base_type) {
   case GLSL_TYPE_UINT:
   case GLSL_TYPE_INT:
   case GLSL_TYPE_FLOAT:
   case GLSL_TYPE_BOOL:
      /* This is where things get interesting.  At this point, we've hit
       * a vector, a scalar, or a matrix.
       */
      if (glsl_type_is_matrix(type->type)) {
         if (chain == NULL) {
            /* Loading the whole matrix */
            struct vtn_ssa_value *transpose;
            unsigned num_ops, vec_width;
            if (type->row_major) {
               num_ops = glsl_get_vector_elements(type->type);
               vec_width = glsl_get_matrix_columns(type->type);
               if (load) {
                  const struct glsl_type *transpose_type =
                     glsl_matrix_type(base_type, vec_width, num_ops);
                  *inout = vtn_create_ssa_value(b, transpose_type);
               } else {
                  transpose = vtn_ssa_transpose(b, *inout);
                  inout = &transpose;
               }
            } else {
               num_ops = glsl_get_matrix_columns(type->type);
               vec_width = glsl_get_vector_elements(type->type);
            }

            for (unsigned i = 0; i < num_ops; i++) {
               nir_ssa_def *elem_offset =
                  nir_iadd(&b->nb, offset,
                           nir_imm_int(&b->nb, i * type->stride));
               _vtn_load_store_tail(b, op, load, index, elem_offset,
                                    &(*inout)->elems[i],
                                    glsl_vector_type(base_type, vec_width));
            }

            if (load && type->row_major)
               *inout = vtn_ssa_transpose(b, *inout);
         } else if (type->row_major) {
            /* Row-major but with an access chiain. */
            nir_ssa_def *col_offset =
               vtn_access_link_as_ssa(b, chain->link[chain_idx],
                                      type->array_element->stride);
            offset = nir_iadd(&b->nb, offset, col_offset);

            if (chain_idx + 1 < chain->length) {
               /* Picking off a single element */
               nir_ssa_def *row_offset =
                  vtn_access_link_as_ssa(b, chain->link[chain_idx + 1],
                                         type->stride);
               offset = nir_iadd(&b->nb, offset, row_offset);
               if (load)
                  *inout = vtn_create_ssa_value(b, glsl_scalar_type(base_type));
               _vtn_load_store_tail(b, op, load, index, offset, inout,
                                    glsl_scalar_type(base_type));
            } else {
               /* Grabbing a column; picking one element off each row */
               unsigned num_comps = glsl_get_vector_elements(type->type);
               const struct glsl_type *column_type =
                  glsl_get_column_type(type->type);

               nir_ssa_def *comps[4];
               for (unsigned i = 0; i < num_comps; i++) {
                  nir_ssa_def *elem_offset =
                     nir_iadd(&b->nb, offset,
                              nir_imm_int(&b->nb, i * type->stride));

                  struct vtn_ssa_value *comp, temp_val;
                  if (!load) {
                     temp_val.def = nir_channel(&b->nb, (*inout)->def, i);
                     temp_val.type = glsl_scalar_type(base_type);
                  }
                  comp = &temp_val;
                  _vtn_load_store_tail(b, op, load, index, elem_offset,
                                       &comp, glsl_scalar_type(base_type));
                  comps[i] = comp->def;
               }

               if (load) {
                  if (*inout == NULL)
                     *inout = vtn_create_ssa_value(b, column_type);

                  (*inout)->def = nir_vec(&b->nb, comps, num_comps);
               }
            }
         } else {
            /* Column-major with a deref. Fall through to array case. */
            nir_ssa_def *col_offset =
               vtn_access_link_as_ssa(b, chain->link[chain_idx], type->stride);
            offset = nir_iadd(&b->nb, offset, col_offset);

            _vtn_block_load_store(b, op, load, index, offset,
                                  chain, chain_idx + 1,
                                  type->array_element, inout);
         }
      } else if (chain == NULL) {
         /* Single whole vector */
         assert(glsl_type_is_vector_or_scalar(type->type));
         _vtn_load_store_tail(b, op, load, index, offset, inout, type->type);
      } else {
         /* Single component of a vector. Fall through to array case. */
         nir_ssa_def *elem_offset =
            vtn_access_link_as_ssa(b, chain->link[chain_idx], type->stride);
         offset = nir_iadd(&b->nb, offset, elem_offset);

         _vtn_block_load_store(b, op, load, index, offset, NULL, 0,
                               type->array_element, inout);
      }
      return;

   case GLSL_TYPE_ARRAY: {
      unsigned elems = glsl_get_length(type->type);
      for (unsigned i = 0; i < elems; i++) {
         nir_ssa_def *elem_off =
            nir_iadd(&b->nb, offset, nir_imm_int(&b->nb, i * type->stride));
         _vtn_block_load_store(b, op, load, index, elem_off, NULL, 0,
                               type->array_element, &(*inout)->elems[i]);
      }
      return;
   }

   case GLSL_TYPE_STRUCT: {
      unsigned elems = glsl_get_length(type->type);
      for (unsigned i = 0; i < elems; i++) {
         nir_ssa_def *elem_off =
            nir_iadd(&b->nb, offset, nir_imm_int(&b->nb, type->offsets[i]));
         _vtn_block_load_store(b, op, load, index, elem_off, NULL, 0,
                               type->members[i], &(*inout)->elems[i]);
      }
      return;
   }

   default:
      unreachable("Invalid block member type");
   }
}
Example #20
0
static void
var_decoration_cb(struct vtn_builder *b, struct vtn_value *val, int member,
                  const struct vtn_decoration *dec, void *void_var)
{
   struct vtn_variable *vtn_var = void_var;

   /* Handle decorations that apply to a vtn_variable as a whole */
   switch (dec->decoration) {
   case SpvDecorationNonWritable:
      /* Do nothing with this for now */
      return;
   case SpvDecorationBinding:
      vtn_var->binding = dec->literals[0];
      return;
   case SpvDecorationDescriptorSet:
      vtn_var->descriptor_set = dec->literals[0];
      return;

   case SpvDecorationLocation: {
      unsigned location = dec->literals[0];
      bool is_vertex_input;
      if (b->shader->stage == MESA_SHADER_FRAGMENT &&
          vtn_var->mode == vtn_variable_mode_output) {
         is_vertex_input = false;
         location += FRAG_RESULT_DATA0;
      } else if (b->shader->stage == MESA_SHADER_VERTEX &&
                 vtn_var->mode == vtn_variable_mode_input) {
         is_vertex_input = true;
         location += VERT_ATTRIB_GENERIC0;
      } else if (vtn_var->mode == vtn_variable_mode_input ||
                 vtn_var->mode == vtn_variable_mode_output) {
         is_vertex_input = false;
         location += VARYING_SLOT_VAR0;
      } else {
         assert(!"Location must be on input or output variable");
      }

      if (vtn_var->var) {
         vtn_var->var->data.location = location;
         vtn_var->var->data.explicit_location = true;
      } else {
         assert(vtn_var->members);
         unsigned length =
            glsl_get_length(glsl_without_array(vtn_var->type->type));
         for (unsigned i = 0; i < length; i++) {
            vtn_var->members[i]->data.location = location;
            vtn_var->members[i]->data.explicit_location = true;
            location +=
               glsl_count_attribute_slots(vtn_var->members[i]->interface_type,
                                          is_vertex_input);
         }
      }
      return;
   }

   default:
      break;
   }

   /* Now we handle decorations that apply to a particular nir_variable */
   nir_variable *nir_var = vtn_var->var;
   if (val->value_type == vtn_value_type_access_chain) {
      assert(val->access_chain->length == 0);
      assert(val->access_chain->var == void_var);
      assert(member == -1);
   } else {
      assert(val->value_type == vtn_value_type_type);
      if (member != -1)
         nir_var = vtn_var->members[member];
   }

   if (nir_var == NULL)
      return;

   switch (dec->decoration) {
   case SpvDecorationRelaxedPrecision:
      break; /* FIXME: Do nothing with this for now. */
   case SpvDecorationNoPerspective:
      nir_var->data.interpolation = INTERP_QUALIFIER_NOPERSPECTIVE;
      break;
   case SpvDecorationFlat:
      nir_var->data.interpolation = INTERP_QUALIFIER_FLAT;
      break;
   case SpvDecorationCentroid:
      nir_var->data.centroid = true;
      break;
   case SpvDecorationSample:
      nir_var->data.sample = true;
      break;
   case SpvDecorationInvariant:
      nir_var->data.invariant = true;
      break;
   case SpvDecorationConstant:
      assert(nir_var->constant_initializer != NULL);
      nir_var->data.read_only = true;
      break;
   case SpvDecorationNonWritable:
      nir_var->data.read_only = true;
      break;
   case SpvDecorationComponent:
      nir_var->data.location_frac = dec->literals[0];
      break;
   case SpvDecorationIndex:
      nir_var->data.explicit_index = true;
      nir_var->data.index = dec->literals[0];
      break;
   case SpvDecorationBuiltIn: {
      SpvBuiltIn builtin = dec->literals[0];

      if (builtin == SpvBuiltInWorkgroupSize) {
         /* This shouldn't be a builtin.  It's actually a constant. */
         nir_var->data.mode = nir_var_global;
         nir_var->data.read_only = true;

         nir_constant *c = rzalloc(nir_var, nir_constant);
         c->value.u[0] = b->shader->info.cs.local_size[0];
         c->value.u[1] = b->shader->info.cs.local_size[1];
         c->value.u[2] = b->shader->info.cs.local_size[2];
         nir_var->constant_initializer = c;
         break;
      }

      nir_variable_mode mode = nir_var->data.mode;
      vtn_get_builtin_location(b, builtin, &nir_var->data.location, &mode);
      nir_var->data.explicit_location = true;
      nir_var->data.mode = mode;

      if (builtin == SpvBuiltInFragCoord || builtin == SpvBuiltInSamplePosition)
         nir_var->data.origin_upper_left = b->origin_upper_left;
      break;
   }
   case SpvDecorationRowMajor:
   case SpvDecorationColMajor:
   case SpvDecorationGLSLShared:
   case SpvDecorationPatch:
   case SpvDecorationRestrict:
   case SpvDecorationAliased:
   case SpvDecorationVolatile:
   case SpvDecorationCoherent:
   case SpvDecorationNonReadable:
   case SpvDecorationUniform:
      /* This is really nice but we have no use for it right now. */
   case SpvDecorationCPacked:
   case SpvDecorationSaturatedConversion:
   case SpvDecorationStream:
   case SpvDecorationOffset:
   case SpvDecorationXfbBuffer:
   case SpvDecorationFuncParamAttr:
   case SpvDecorationFPRoundingMode:
   case SpvDecorationFPFastMathMode:
   case SpvDecorationLinkageAttributes:
   case SpvDecorationSpecId:
      break;
   default:
      unreachable("Unhandled variable decoration");
   }
}
Example #21
0
/* This function recursively walks the given deref chain and replaces the
 * given copy instruction with an equivalent sequence load/store
 * operations.
 *
 * @copy_instr    The copy instruction to replace; new instructions will be
 *                inserted before this one
 *
 * @dest_head     The head of the destination variable deref chain
 *
 * @src_head      The head of the source variable deref chain
 *
 * @dest_tail     The current tail of the destination variable deref chain;
 *                this is used for recursion and external callers of this
 *                function should call it with tail == head
 *
 * @src_tail      The current tail of the source variable deref chain;
 *                this is used for recursion and external callers of this
 *                function should call it with tail == head
 *
 * @state         The current variable lowering state
 */
static void
emit_copy_load_store(nir_intrinsic_instr *copy_instr,
                     nir_deref_var *dest_head, nir_deref_var *src_head,
                     nir_deref *dest_tail, nir_deref *src_tail, void *mem_ctx)
{
    /* Find the next pair of wildcards */
    nir_deref *src_arr_parent = deref_next_wildcard_parent(src_tail);
    nir_deref *dest_arr_parent = deref_next_wildcard_parent(dest_tail);

    if (src_arr_parent || dest_arr_parent) {
        /* Wildcards had better come in matched pairs */
        assert(dest_arr_parent && dest_arr_parent);

        nir_deref_array *src_arr = nir_deref_as_array(src_arr_parent->child);
        nir_deref_array *dest_arr = nir_deref_as_array(dest_arr_parent->child);

        unsigned length = glsl_get_length(src_arr_parent->type);
        /* The wildcards should represent the same number of elements */
        assert(length == glsl_get_length(dest_arr_parent->type));
        assert(length > 0);

        /* Walk over all of the elements that this wildcard refers to and
         * call emit_copy_load_store on each one of them */
        src_arr->deref_array_type = nir_deref_array_type_direct;
        dest_arr->deref_array_type = nir_deref_array_type_direct;
        for (unsigned i = 0; i < length; i++) {
            src_arr->base_offset = i;
            dest_arr->base_offset = i;
            emit_copy_load_store(copy_instr, dest_head, src_head,
                                 &dest_arr->deref, &src_arr->deref, mem_ctx);
        }
        src_arr->deref_array_type = nir_deref_array_type_wildcard;
        dest_arr->deref_array_type = nir_deref_array_type_wildcard;
    } else {
        /* In this case, we have no wildcards anymore, so all we have to do
         * is just emit the load and store operations. */
        src_tail = nir_deref_tail(src_tail);
        dest_tail = nir_deref_tail(dest_tail);

        assert(src_tail->type == dest_tail->type);

        unsigned num_components = glsl_get_vector_elements(src_tail->type);

        nir_intrinsic_instr *load =
            nir_intrinsic_instr_create(mem_ctx, nir_intrinsic_load_var);
        load->num_components = num_components;
        load->variables[0] = nir_deref_as_var(nir_copy_deref(load, &src_head->deref));
        nir_ssa_dest_init(&load->instr, &load->dest, num_components, NULL);

        nir_instr_insert_before(&copy_instr->instr, &load->instr);

        nir_intrinsic_instr *store =
            nir_intrinsic_instr_create(mem_ctx, nir_intrinsic_store_var);
        store->num_components = num_components;
        store->const_index[0] = (1 << num_components) - 1;
        store->variables[0] = nir_deref_as_var(nir_copy_deref(store, &dest_head->deref));

        store->src[0].is_ssa = true;
        store->src[0].ssa = &load->dest.ssa;

        nir_instr_insert_before(&copy_instr->instr, &store->instr);
    }
}
Example #22
0
static void
lower_sampler(nir_tex_instr *instr, struct gl_shader_program *shader_program,
              const struct gl_program *prog, void *mem_ctx)
{
   if (instr->sampler == NULL)
      return;

   /* Get the name and the offset */
   instr->sampler_index = 0;
   bool has_indirect = false;
   char *name = ralloc_strdup(mem_ctx, instr->sampler->var->name);

   for (nir_deref *deref = &instr->sampler->deref;
        deref->child; deref = deref->child) {
      switch (deref->child->deref_type) {
      case nir_deref_type_array: {
         nir_deref_array *deref_array = nir_deref_as_array(deref->child);

         /* XXX: We're assuming here that the indirect is the last array
          * thing we have.  This should be ok for now as we don't support
          * arrays_of_arrays yet.
          */
         assert(!has_indirect);

         instr->sampler_index *= glsl_get_length(deref->type);
         switch (deref_array->deref_array_type) {
         case nir_deref_array_type_direct:
            instr->sampler_index += deref_array->base_offset;
            if (deref_array->deref.child)
               ralloc_asprintf_append(&name, "[%u]", deref_array->base_offset);
            break;
         case nir_deref_array_type_indirect: {
            assert(!has_indirect);

            instr->src = reralloc(mem_ctx, instr->src, nir_tex_src,
                                  instr->num_srcs + 1);
            memset(&instr->src[instr->num_srcs], 0, sizeof *instr->src);
            instr->src[instr->num_srcs].src_type = nir_tex_src_sampler_offset;
            instr->num_srcs++;

            nir_instr_rewrite_src(&instr->instr,
                                  &instr->src[instr->num_srcs - 1].src,
                                  deref_array->indirect);

            instr->sampler_array_size = glsl_get_length(deref->type);

            nir_src empty;
            memset(&empty, 0, sizeof empty);
            nir_instr_rewrite_src(&instr->instr, &deref_array->indirect, empty);

            if (deref_array->deref.child)
               ralloc_strcat(&name, "[0]");
            break;
         }

         case nir_deref_array_type_wildcard:
            unreachable("Cannot copy samplers");
         default:
            unreachable("Invalid deref array type");
         }
         break;
      }

      case nir_deref_type_struct: {
         nir_deref_struct *deref_struct = nir_deref_as_struct(deref->child);
         const char *field = glsl_get_struct_elem_name(deref->type,
                                                       deref_struct->index);
         ralloc_asprintf_append(&name, ".%s", field);
         break;
      }

      default:
         unreachable("Invalid deref type");
         break;
      }
   }

   instr->sampler_index += get_sampler_index(shader_program, name, prog);

   instr->sampler = NULL;
}