Пример #1
0
const glsl_type *
glsl_channel_type(const glsl_type *t)
{
   switch (glsl_get_base_type(t)) {
   case GLSL_TYPE_ARRAY: {
      const glsl_type *base = glsl_channel_type(glsl_get_array_element(t));
      return glsl_array_type(base, glsl_get_length(t));
   }
   case GLSL_TYPE_UINT:
      return glsl_uint_type();
   case GLSL_TYPE_INT:
      return glsl_int_type();
   case GLSL_TYPE_FLOAT:
      return glsl_float_type();
   case GLSL_TYPE_BOOL:
      return glsl_bool_type();
   case GLSL_TYPE_DOUBLE:
      return glsl_double_type();
   case GLSL_TYPE_UINT64:
      return glsl_uint64_t_type();
   case GLSL_TYPE_INT64:
      return glsl_int64_t_type();
   case GLSL_TYPE_FLOAT16:
      return glsl_float16_t_type();
   case GLSL_TYPE_UINT16:
      return glsl_uint16_t_type();
   case GLSL_TYPE_INT16:
      return glsl_int16_t_type();
   default:
      unreachable("Unhandled base type glsl_channel_type()");
   }
}
Пример #2
0
static void
validate_deref_chain(nir_deref *deref, validate_state *state)
{
   assert(deref->child == NULL || ralloc_parent(deref->child) == deref);

   nir_deref *parent = NULL;
   while (deref != NULL) {
      switch (deref->deref_type) {
      case nir_deref_type_array:
         assert(deref->type == glsl_get_array_element(parent->type));
         if (nir_deref_as_array(deref)->deref_array_type ==
             nir_deref_array_type_indirect)
            validate_src(&nir_deref_as_array(deref)->indirect, state);
         break;

      case nir_deref_type_struct:
         assert(deref->type ==
                glsl_get_struct_field(parent->type,
                                      nir_deref_as_struct(deref)->index));
         break;

      case nir_deref_type_var:
         break;

      default:
         assert(!"Invalid deref type");
         break;
      }

      parent = deref;
      deref = deref->child;
   }
}
Пример #3
0
static unsigned
type_size(const struct glsl_type *type)
{
   unsigned int size, i;

   switch (glsl_get_base_type(type)) {
   case GLSL_TYPE_UINT:
   case GLSL_TYPE_INT:
   case GLSL_TYPE_FLOAT:
   case GLSL_TYPE_BOOL:
      return glsl_get_components(type);
   case GLSL_TYPE_ARRAY:
      return type_size(glsl_get_array_element(type)) * glsl_get_length(type);
   case GLSL_TYPE_STRUCT:
      size = 0;
      for (i = 0; i < glsl_get_length(type); i++) {
         size += type_size(glsl_get_struct_field(type, i));
      }
      return size;
   case GLSL_TYPE_SAMPLER:
      return 0;
   case GLSL_TYPE_ATOMIC_UINT:
      return 0;
   case GLSL_TYPE_INTERFACE:
      return 0;
   case GLSL_TYPE_IMAGE:
      return 0;
   case GLSL_TYPE_VOID:
   case GLSL_TYPE_ERROR:
   case GLSL_TYPE_DOUBLE:
      unreachable("not reached");
   }

   return 0;
}
Пример #4
0
/* Crawls a chain of array derefs and rewrites the types so that the
 * lengths stay the same but the terminal type is the one given by
 * tail_type.  This is useful for split structures.
 */
static void
rewrite_deref_types(nir_deref *deref, const struct glsl_type *type)
{
   deref->type = type;
   if (deref->child) {
      assert(deref->child->deref_type == nir_deref_type_array);
      assert(glsl_type_is_array(deref->type));
      rewrite_deref_types(deref->child, glsl_get_array_element(type));
   }
}
Пример #5
0
static void
validate_deref_chain(nir_deref *deref, nir_variable_mode mode,
                     validate_state *state)
{
   validate_assert(state, deref->child == NULL || ralloc_parent(deref->child) == deref);

   nir_deref *parent = NULL;
   while (deref != NULL) {
      switch (deref->deref_type) {
      case nir_deref_type_array:
         if (mode == nir_var_shared) {
            /* Shared variables have a bit more relaxed rules because we need
             * to be able to handle array derefs on vectors.  Fortunately,
             * nir_lower_io handles these just fine.
             */
            validate_assert(state, glsl_type_is_array(parent->type) ||
                                   glsl_type_is_matrix(parent->type) ||
                                   glsl_type_is_vector(parent->type));
         } else {
            /* Most of NIR cannot handle array derefs on vectors */
            validate_assert(state, glsl_type_is_array(parent->type) ||
                                   glsl_type_is_matrix(parent->type));
         }
         validate_assert(state, deref->type == glsl_get_array_element(parent->type));
         if (nir_deref_as_array(deref)->deref_array_type ==
             nir_deref_array_type_indirect)
            validate_src(&nir_deref_as_array(deref)->indirect, state, 32, 1);
         break;

      case nir_deref_type_struct:
         assume(parent); /* cannot happen: deref change starts w/ nir_deref_var */
         validate_assert(state, deref->type ==
                glsl_get_struct_field(parent->type,
                                      nir_deref_as_struct(deref)->index));
         break;

      case nir_deref_type_var:
         break;

      default:
         validate_assert(state, !"Invalid deref type");
         break;
      }

      parent = deref;
      deref = deref->child;
   }
}
Пример #6
0
/**
 * Mark an entire variable as used.  Caller must ensure that the variable
 * represents a shader input or output.
 */
static void
mark_whole_variable(nir_shader *shader, nir_variable *var, bool is_output_read)
{
   const struct glsl_type *type = var->type;

   if (nir_is_per_vertex_io(var, shader->info.stage)) {
      assert(glsl_type_is_array(type));
      type = glsl_get_array_element(type);
   }

   const unsigned slots =
      var->data.compact ? DIV_ROUND_UP(glsl_get_length(type), 4)
                        : glsl_count_attribute_slots(type, false);

   set_io_mask(shader, var, 0, slots, is_output_read);
}
Пример #7
0
/* Recursively constructs deref chains to split a copy instruction into
 * multiple (if needed) copy instructions with full-length deref chains.
 * External callers of this function should pass the tail and head of the
 * deref chains found as the source and destination of the copy instruction
 * into this function.
 *
 * \param  old_copy  The copy instruction we are splitting
 * \param  dest_head The head of the destination deref chain we are building
 * \param  src_head  The head of the source deref chain we are building
 * \param  dest_tail The tail of the destination deref chain we are building
 * \param  src_tail  The tail of the source deref chain we are building
 * \param  state     The current split_var_copies_state object
 */
static void
split_var_copy_instr(nir_intrinsic_instr *old_copy,
                     nir_deref *dest_head, nir_deref *src_head,
                     nir_deref *dest_tail, nir_deref *src_tail,
                     struct split_var_copies_state *state)
{
   assert(src_tail->type == dest_tail->type);

   /* Make sure these really are the tails of the deref chains */
   assert(dest_tail->child == NULL);
   assert(src_tail->child == NULL);

   switch (glsl_get_base_type(src_tail->type)) {
   case GLSL_TYPE_ARRAY: {
      /* Make a wildcard dereference */
      nir_deref_array *deref = nir_deref_array_create(state->dead_ctx);
      deref->deref.type = glsl_get_array_element(src_tail->type);
      deref->deref_array_type = nir_deref_array_type_wildcard;

      /* Set the tail of both as the newly created wildcard deref.  It is
       * safe to use the same wildcard in both places because a) we will be
       * copying it before we put it in an actual instruction and b)
       * everything that will potentially add another link in the deref
       * chain will also add the same thing to both chains.
       */
      src_tail->child = &deref->deref;
      dest_tail->child = &deref->deref;

      split_var_copy_instr(old_copy, dest_head, src_head,
                           dest_tail->child, src_tail->child, state);

      /* Set it back to the way we found it */
      src_tail->child = NULL;
      dest_tail->child = NULL;
      break;
   }

   case GLSL_TYPE_STRUCT:
      /* This is the only part that actually does any interesting
       * splitting.  For array types, we just use wildcards and resolve
       * them later.  For structure types, we need to emit one copy
       * instruction for every structure element.  Because we may have
       * structs inside structs, we just recurse and let the next level
       * take care of any additional structures.
       */
      for (unsigned i = 0; i < glsl_get_length(src_tail->type); i++) {
         nir_deref_struct *deref = nir_deref_struct_create(state->dead_ctx, i);
         deref->deref.type = glsl_get_struct_field(src_tail->type, i);

         /* Set the tail of both as the newly created structure deref.  It
          * is safe to use the same wildcard in both places because a) we
          * will be copying it before we put it in an actual instruction
          * and b) everything that will potentially add another link in the
          * deref chain will also add the same thing to both chains.
          */
         src_tail->child = &deref->deref;
         dest_tail->child = &deref->deref;

         split_var_copy_instr(old_copy, dest_head, src_head,
                              dest_tail->child, src_tail->child, state);
      }
      /* Set it back to the way we found it */
      src_tail->child = NULL;
      dest_tail->child = NULL;
      break;

   case GLSL_TYPE_UINT:
   case GLSL_TYPE_INT:
   case GLSL_TYPE_FLOAT:
   case GLSL_TYPE_BOOL:
      if (glsl_type_is_matrix(src_tail->type)) {
         nir_deref_array *deref = nir_deref_array_create(state->dead_ctx);
         deref->deref.type = glsl_get_column_type(src_tail->type);
         deref->deref_array_type = nir_deref_array_type_wildcard;

         /* Set the tail of both as the newly created wildcard deref.  It
          * is safe to use the same wildcard in both places because a) we
          * will be copying it before we put it in an actual instruction
          * and b) everything that will potentially add another link in the
          * deref chain will also add the same thing to both chains.
          */
         src_tail->child = &deref->deref;
         dest_tail->child = &deref->deref;

         split_var_copy_instr(old_copy, dest_head, src_head,
                              dest_tail->child, src_tail->child, state);

         /* Set it back to the way we found it */
         src_tail->child = NULL;
         dest_tail->child = NULL;
      } else {
         /* At this point, we have fully built our deref chains and can
          * actually add the new copy instruction.
          */
         nir_intrinsic_instr *new_copy =
            nir_intrinsic_instr_create(state->mem_ctx, nir_intrinsic_copy_var);

         /* We need to make copies because a) this deref chain actually
          * belongs to the copy instruction and b) the deref chains may
          * have some of the same links due to the way we constructed them
          */
         nir_deref *src = nir_copy_deref(new_copy, src_head);
         nir_deref *dest = nir_copy_deref(new_copy, dest_head);

         new_copy->variables[0] = nir_deref_as_var(dest);
         new_copy->variables[1] = nir_deref_as_var(src);

         /* Emit the copy instruction after the old instruction.  We'll
          * remove the old one later.
          */
         nir_instr_insert_after(&old_copy->instr, &new_copy->instr);
         state->progress = true;
      }
      break;

   case GLSL_TYPE_SAMPLER:
   case GLSL_TYPE_IMAGE:
   case GLSL_TYPE_ATOMIC_UINT:
   case GLSL_TYPE_INTERFACE:
   default:
      unreachable("Cannot copy these types");
   }
}
Пример #8
0
/**
 * Try to mark a portion of the given varying as used.  Caller must ensure
 * that the variable represents a shader input or output.
 *
 * If the index can't be interpreted as a constant, or some other problem
 * occurs, then nothing will be marked and false will be returned.
 */
static bool
try_mask_partial_io(nir_shader *shader, nir_variable *var,
                    nir_deref_instr *deref, bool is_output_read)
{
   const struct glsl_type *type = var->type;

   if (nir_is_per_vertex_io(var, shader->info.stage)) {
      assert(glsl_type_is_array(type));
      type = glsl_get_array_element(type);
   }

   /* The code below only handles:
    *
    * - Indexing into matrices
    * - Indexing into arrays of (arrays, matrices, vectors, or scalars)
    *
    * For now, we just give up if we see varying structs and arrays of structs
    * here marking the entire variable as used.
    */
   if (!(glsl_type_is_matrix(type) ||
         (glsl_type_is_array(type) && !var->data.compact &&
          (glsl_type_is_numeric(glsl_without_array(type)) ||
           glsl_type_is_boolean(glsl_without_array(type)))))) {

      /* If we don't know how to handle this case, give up and let the
       * caller mark the whole variable as used.
       */
      return false;
   }

   unsigned offset = get_io_offset(deref, false);
   if (offset == -1)
      return false;

   unsigned num_elems;
   unsigned elem_width = 1;
   unsigned mat_cols = 1;
   if (glsl_type_is_array(type)) {
      num_elems = glsl_get_aoa_size(type);
      if (glsl_type_is_matrix(glsl_without_array(type)))
         mat_cols = glsl_get_matrix_columns(glsl_without_array(type));
   } else {
      num_elems = glsl_get_matrix_columns(type);
   }

   /* double element width for double types that takes two slots */
   if (glsl_type_is_dual_slot(glsl_without_array(type)))
      elem_width *= 2;

   if (offset >= num_elems * elem_width * mat_cols) {
      /* Constant index outside the bounds of the matrix/array.  This could
       * arise as a result of constant folding of a legal GLSL program.
       *
       * Even though the spec says that indexing outside the bounds of a
       * matrix/array results in undefined behaviour, we don't want to pass
       * out-of-range values to set_io_mask() (since this could result in
       * slots that don't exist being marked as used), so just let the caller
       * mark the whole variable as used.
       */
      return false;
   }

   set_io_mask(shader, var, offset, elem_width, is_output_read);
   return true;
}
Пример #9
0
static void
_vtn_local_load_store(struct vtn_builder *b, bool load, nir_deref_var *deref,
                      nir_deref *tail, struct vtn_ssa_value *inout)
{
   /* The deref tail may contain a deref to select a component of a vector (in
    * other words, it might not be an actual tail) so we have to save it away
    * here since we overwrite it later.
    */
   nir_deref *old_child = tail->child;

   if (glsl_type_is_vector_or_scalar(tail->type)) {
      /* Terminate the deref chain in case there is one more link to pick
       * off a component of the vector.
       */
      tail->child = NULL;

      nir_intrinsic_op op = load ? nir_intrinsic_load_var :
                                   nir_intrinsic_store_var;

      nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->shader, op);
      intrin->variables[0] =
         nir_deref_as_var(nir_copy_deref(intrin, &deref->deref));
      intrin->num_components = glsl_get_vector_elements(tail->type);

      if (load) {
         nir_ssa_dest_init(&intrin->instr, &intrin->dest,
                           intrin->num_components,
                           glsl_get_bit_size(glsl_get_base_type(tail->type)),
                           NULL);
         inout->def = &intrin->dest.ssa;
      } else {
         nir_intrinsic_set_write_mask(intrin, (1 << intrin->num_components) - 1);
         intrin->src[0] = nir_src_for_ssa(inout->def);
      }

      nir_builder_instr_insert(&b->nb, &intrin->instr);
   } else if (glsl_get_base_type(tail->type) == GLSL_TYPE_ARRAY ||
              glsl_type_is_matrix(tail->type)) {
      unsigned elems = glsl_get_length(tail->type);
      nir_deref_array *deref_arr = nir_deref_array_create(b);
      deref_arr->deref_array_type = nir_deref_array_type_direct;
      deref_arr->deref.type = glsl_get_array_element(tail->type);
      tail->child = &deref_arr->deref;
      for (unsigned i = 0; i < elems; i++) {
         deref_arr->base_offset = i;
         _vtn_local_load_store(b, load, deref, tail->child, inout->elems[i]);
      }
   } else {
      assert(glsl_get_base_type(tail->type) == GLSL_TYPE_STRUCT);
      unsigned elems = glsl_get_length(tail->type);
      nir_deref_struct *deref_struct = nir_deref_struct_create(b, 0);
      tail->child = &deref_struct->deref;
      for (unsigned i = 0; i < elems; i++) {
         deref_struct->index = i;
         deref_struct->deref.type = glsl_get_struct_field(tail->type, i);
         _vtn_local_load_store(b, load, deref, tail->child, inout->elems[i]);
      }
   }

   tail->child = old_child;
}