Пример #1
0
    nir_foreach_instr_safe(instr, block) {
        if (instr->type == nir_instr_type_intrinsic) {
            nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
            if (intr->intrinsic == nir_intrinsic_load_var) {
                nir_deref_var *dvar = intr->variables[0];
                nir_variable *var = dvar->var;

                if (var->data.mode == nir_var_shader_in &&
                        var->data.location == VARYING_SLOT_POS) {
                    /* gl_FragCoord should not have array/struct deref's: */
                    assert(dvar->deref.child == NULL);
                    lower_fragcoord(state, intr);
                } else if (var->data.mode == nir_var_system_value &&
                           var->data.location == SYSTEM_VALUE_SAMPLE_POS) {
                    assert(dvar->deref.child == NULL);
                    lower_load_sample_pos(state, intr);
                }
            } else if (intr->intrinsic == nir_intrinsic_interp_var_at_offset) {
                lower_interp_var_at_offset(state, intr);
            }
        } else if (instr->type == nir_instr_type_alu) {
            nir_alu_instr *alu = nir_instr_as_alu(instr);
            if (alu->op == nir_op_fddy ||
                    alu->op == nir_op_fddy_fine ||
                    alu->op == nir_op_fddy_coarse)
                lower_fddy(state, alu);
        }
    }
static bool
constant_fold_block(nir_block *block, void *void_state)
{
   struct constant_fold_state *state = void_state;

   nir_foreach_instr_safe(block, instr) {
      switch (instr->type) {
      case nir_instr_type_alu:
         state->progress |= constant_fold_alu_instr(nir_instr_as_alu(instr),
                                                    state->mem_ctx);
         break;
      case nir_instr_type_intrinsic:
         state->progress |=
            constant_fold_intrinsic_instr(nir_instr_as_intrinsic(instr));
         break;
      case nir_instr_type_tex:
         state->progress |= constant_fold_tex_instr(nir_instr_as_tex(instr));
         break;
      default:
         /* Don't know how to constant fold */
         break;
      }
   }

   return true;
}
Пример #3
0
static bool
lower_vec_to_movs_block(nir_block *block, void *void_state)
{
   struct vec_to_movs_state *state = void_state;
   nir_function_impl *impl = state->impl;
   nir_shader *shader = impl->overload->function->shader;

   nir_foreach_instr_safe(block, instr) {
      if (instr->type != nir_instr_type_alu)
         continue;

      nir_alu_instr *vec = nir_instr_as_alu(instr);

      switch (vec->op) {
      case nir_op_vec2:
      case nir_op_vec3:
      case nir_op_vec4:
         break;
      default:
         continue; /* The loop */
      }

      /* Since we insert multiple MOVs, we have to be non-SSA. */
      assert(!vec->dest.dest.is_ssa);

      unsigned finished_write_mask = 0;

      /* First, emit a MOV for all the src channels that are in the
       * destination reg, in case other values we're populating in the dest
       * might overwrite them.
       */
      for (unsigned i = 0, src_idx = 0; i < 4; i++) {
         if (!(vec->dest.write_mask & (1 << i)))
            continue;

         if (src_matches_dest_reg(&vec->dest.dest, &vec->src[src_idx].src)) {
            finished_write_mask |= insert_mov(vec, i, src_idx, shader);
            break;
         }
         src_idx++;
      }

      /* Now, emit MOVs for all the other src channels. */
      for (unsigned i = 0, src_idx = 0; i < 4; i++) {
         if (!(vec->dest.write_mask & (1 << i)))
            continue;

         if (!(finished_write_mask & (1 << i)))
            finished_write_mask |= insert_mov(vec, i, src_idx, shader);

         src_idx++;
      }

      nir_instr_remove(&vec->instr);
      ralloc_free(vec);
      state->progress = true;
   }

   return true;
}
Пример #4
0
static void
validate_alu_dest(nir_alu_instr *instr, validate_state *state)
{
   nir_alu_dest *dest = &instr->dest;

   unsigned dest_size =
      dest->dest.is_ssa ? dest->dest.ssa.num_components
                        : dest->dest.reg.reg->num_components;
   bool is_packed = !dest->dest.is_ssa && dest->dest.reg.reg->is_packed;
   /*
    * validate that the instruction doesn't write to components not in the
    * register/SSA value
    */
   validate_assert(state, is_packed || !(dest->write_mask & ~((1 << dest_size) - 1)));

   /* validate that saturate is only ever used on instructions with
    * destinations of type float
    */
   nir_alu_instr *alu = nir_instr_as_alu(state->instr);
   validate_assert(state,
          (nir_alu_type_get_base_type(nir_op_infos[alu->op].output_type) ==
           nir_type_float) ||
          !dest->saturate);

   validate_dest(&dest->dest, state, 0, 0);
}
static bool
is_phi_src_scalarizable(nir_phi_src *src,
                        struct lower_phis_to_scalar_state *state)
{
   /* Don't know what to do with non-ssa sources */
   if (!src->src.is_ssa)
      return false;

   nir_instr *src_instr = src->src.ssa->parent_instr;
   switch (src_instr->type) {
   case nir_instr_type_alu: {
      nir_alu_instr *src_alu = nir_instr_as_alu(src_instr);

      /* ALU operations with output_size == 0 should be scalarized.  We
       * will also see a bunch of vecN operations from scalarizing ALU
       * operations and, since they can easily be copy-propagated, they
       * are ok too.
       */
      return nir_op_infos[src_alu->op].output_size == 0 ||
             src_alu->op == nir_op_vec2 ||
             src_alu->op == nir_op_vec3 ||
             src_alu->op == nir_op_vec4;
   }

   case nir_instr_type_phi:
      /* A phi is scalarizable if we're going to lower it */
      return should_lower_phi(nir_instr_as_phi(src_instr), state);

   case nir_instr_type_load_const:
      /* These are trivially scalarizable */
      return true;

   case nir_instr_type_intrinsic: {
      nir_intrinsic_instr *src_intrin = nir_instr_as_intrinsic(src_instr);

      switch (src_intrin->intrinsic) {
      case nir_intrinsic_load_var:
         return src_intrin->variables[0]->var->data.mode == nir_var_shader_in ||
                src_intrin->variables[0]->var->data.mode == nir_var_uniform;

      case nir_intrinsic_interp_var_at_centroid:
      case nir_intrinsic_interp_var_at_sample:
      case nir_intrinsic_interp_var_at_offset:
      case nir_intrinsic_load_uniform:
      case nir_intrinsic_load_uniform_indirect:
      case nir_intrinsic_load_ubo:
      case nir_intrinsic_load_ubo_indirect:
      case nir_intrinsic_load_input:
      case nir_intrinsic_load_input_indirect:
         return true;
      default:
         break;
      }
   }

   default:
      /* We can't scalarize this type of instruction */
      return false;
   }
}
Пример #6
0
static nir_alu_instr *
get_parent_mov(nir_ssa_def *ssa)
{
   if (ssa->parent_instr->type != nir_instr_type_alu)
      return NULL;

   nir_alu_instr *alu = nir_instr_as_alu(ssa->parent_instr);
   return (alu->op == nir_op_mov) ? alu : NULL;
}
Пример #7
0
static bool
convert_block(nir_block *block, void *state)
{
   nir_builder *b = state;

   nir_foreach_instr_safe(block, instr) {
      if (instr->type == nir_instr_type_alu)
         convert_instr(b, nir_instr_as_alu(instr));
   }

   return true;
}
Пример #8
0
static bool
opt_undef_block(nir_block *block, void *data)
{
   bool *progress = data;

   nir_foreach_instr_safe(block, instr) {
      if (instr->type == nir_instr_type_alu)
         if (opt_undef_alu(nir_instr_as_alu(instr)))
             (*progress) = true;
   }

   return true;
}
Пример #9
0
static void
init_instr(nir_instr *instr, struct exec_list *worklist)
{
   nir_alu_instr *alu_instr;
   nir_intrinsic_instr *intrin_instr;
   nir_tex_instr *tex_instr;

   /* We use the pass_flags to store the live/dead information.  In DCE, we
    * just treat it as a zero/non-zerl boolean for whether or not the
    * instruction is live.
    */
   instr->pass_flags = 0;

   switch (instr->type) {
   case nir_instr_type_call:
   case nir_instr_type_jump:
      worklist_push(worklist, instr);
      break;

   case nir_instr_type_alu:
      alu_instr = nir_instr_as_alu(instr);
      if (!alu_instr->dest.dest.is_ssa)
         worklist_push(worklist, instr);
      break;

   case nir_instr_type_intrinsic:
      intrin_instr = nir_instr_as_intrinsic(instr);
      if (nir_intrinsic_infos[intrin_instr->intrinsic].flags &
          NIR_INTRINSIC_CAN_ELIMINATE) {
         if (nir_intrinsic_infos[intrin_instr->intrinsic].has_dest &&
             !intrin_instr->dest.is_ssa) {
            worklist_push(worklist, instr);
         }
      } else {
         worklist_push(worklist, instr);
      }
      break;

   case nir_instr_type_tex:
      tex_instr = nir_instr_as_tex(instr);
      if (!tex_instr->dest.is_ssa)
         worklist_push(worklist, instr);
      break;

   default:
      break;
   }
}
Пример #10
0
static void
validate_instr(nir_instr *instr, validate_state *state)
{
   assert(instr->block == state->block);

   state->instr = instr;

   switch (instr->type) {
   case nir_instr_type_alu:
      validate_alu_instr(nir_instr_as_alu(instr), state);
      break;

   case nir_instr_type_call:
      validate_call_instr(nir_instr_as_call(instr), state);
      break;

   case nir_instr_type_intrinsic:
      validate_intrinsic_instr(nir_instr_as_intrinsic(instr), state);
      break;

   case nir_instr_type_tex:
      validate_tex_instr(nir_instr_as_tex(instr), state);
      break;

   case nir_instr_type_load_const:
      validate_load_const_instr(nir_instr_as_load_const(instr), state);
      break;

   case nir_instr_type_phi:
      validate_phi_instr(nir_instr_as_phi(instr), state);
      break;

   case nir_instr_type_ssa_undef:
      validate_ssa_undef_instr(nir_instr_as_ssa_undef(instr), state);
      break;

   case nir_instr_type_jump:
      break;

   default:
      assert(!"Invalid ALU instruction type");
      break;
   }

   state->instr = NULL;
}
Пример #11
0
 nir_foreach_instr(instr, block) {
    switch (instr->type) {
    case nir_instr_type_alu:
       gather_alu_info(nir_instr_as_alu(instr), shader);
       break;
    case nir_instr_type_intrinsic:
       gather_intrinsic_info(nir_instr_as_intrinsic(instr), shader, dead_ctx);
       break;
    case nir_instr_type_tex:
       gather_tex_info(nir_instr_as_tex(instr), shader);
       break;
    case nir_instr_type_call:
       assert(!"nir_shader_gather_info only works if functions are inlined");
       break;
    default:
       break;
    }
 }
Пример #12
0
static bool
constant_fold_block(nir_block *block, void *mem_ctx)
{
   bool progress = false;

   nir_foreach_instr_safe(instr, block) {
      switch (instr->type) {
      case nir_instr_type_alu:
         progress |= constant_fold_alu_instr(nir_instr_as_alu(instr), mem_ctx);
         break;
      case nir_instr_type_intrinsic:
         progress |=
            constant_fold_intrinsic_instr(nir_instr_as_intrinsic(instr));
         break;
      default:
         /* Don't know how to constant fold */
         break;
      }
   }

   return progress;
}
Пример #13
0
   nir_foreach_instr_safe(instr, block) {
      if (instr->type == nir_instr_type_intrinsic) {
         nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
         if (intr->intrinsic == nir_intrinsic_load_deref) {
            nir_deref_instr *deref = nir_src_as_deref(intr->src[0]);
            nir_variable *var = nir_deref_instr_get_variable(deref);

            if ((var->data.mode == nir_var_shader_in &&
                 var->data.location == VARYING_SLOT_POS) ||
                (var->data.mode == nir_var_system_value &&
                 var->data.location == SYSTEM_VALUE_FRAG_COORD)) {
               /* gl_FragCoord should not have array/struct derefs: */
               lower_fragcoord(state, intr, var);
            } else if (var->data.mode == nir_var_system_value &&
                       var->data.location == SYSTEM_VALUE_SAMPLE_POS) {
               lower_load_sample_pos(state, intr);
            } else if (var->data.mode == nir_var_shader_in &&
                       var->data.location == VARYING_SLOT_PNTC &&
                       state->shader->options->lower_wpos_pntc) {
               lower_load_pointcoord(state, intr);
            }
         } else if (intr->intrinsic == nir_intrinsic_load_frag_coord) {
            lower_fragcoord(state, intr, NULL);
         } else if (intr->intrinsic == nir_intrinsic_load_sample_pos) {
            lower_load_sample_pos(state, intr);
         } else if (intr->intrinsic == nir_intrinsic_interp_deref_at_offset) {
            lower_interp_deref_at_offset(state, intr);
         }
      } else if (instr->type == nir_instr_type_alu) {
         nir_alu_instr *alu = nir_instr_as_alu(instr);
         if (alu->op == nir_op_fddy ||
             alu->op == nir_op_fddy_fine ||
             alu->op == nir_op_fddy_coarse)
            lower_fddy(state, alu);
      }
   }
Пример #14
0
 nir_foreach_instr_safe(block, instr) {
    if (instr->type == nir_instr_type_alu)
       lower_alu_instr_scalar(nir_instr_as_alu(instr), data);
 }
Пример #15
0
static void
propagate_invariant_instr(nir_instr *instr, struct set *invariants)
{
   switch (instr->type) {
   case nir_instr_type_alu: {
      nir_alu_instr *alu = nir_instr_as_alu(instr);
      if (!dest_is_invariant(&alu->dest.dest, invariants))
         break;

      alu->exact = true;
      nir_foreach_src(instr, add_src_cb, invariants);
      break;
   }

   case nir_instr_type_tex: {
      nir_tex_instr *tex = nir_instr_as_tex(instr);
      if (dest_is_invariant(&tex->dest, invariants))
         nir_foreach_src(instr, add_src_cb, invariants);
      break;
   }

   case nir_instr_type_intrinsic: {
      nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
      switch (intrin->intrinsic) {
      case nir_intrinsic_copy_var:
         /* If the destination is invariant then so is the source */
         if (var_is_invariant(intrin->variables[0]->var, invariants))
            add_var(intrin->variables[1]->var, invariants);
         break;

      case nir_intrinsic_load_var:
         if (dest_is_invariant(&intrin->dest, invariants))
            add_var(intrin->variables[0]->var, invariants);
         break;

      case nir_intrinsic_store_var:
         if (var_is_invariant(intrin->variables[0]->var, invariants))
            add_src(&intrin->src[0], invariants);
         break;

      default:
         /* Nothing to do */
         break;
      }
   }

   case nir_instr_type_jump:
   case nir_instr_type_ssa_undef:
   case nir_instr_type_load_const:
      break; /* Nothing to do */

   case nir_instr_type_phi: {
      nir_phi_instr *phi = nir_instr_as_phi(instr);
      if (!dest_is_invariant(&phi->dest, invariants))
         break;

      nir_foreach_phi_src(src, phi) {
         add_src(&src->src, invariants);
         add_cf_node(&src->pred->cf_node, invariants);
      }
      break;
   }

   case nir_instr_type_call:
      unreachable("This pass must be run after function inlining");

   case nir_instr_type_parallel_copy:
   default:
      unreachable("Cannot have this instruction type");
   }
Пример #16
0
   nir_foreach_instr(block, instr) {
      switch (instr->type) {
      case nir_instr_type_alu: {
         /* For ALU instructions, the resolve status is handled in a
          * three-step process.
          *
          * 1) Look at the instruction type and sources and determine if it
          *    can be left unresolved.
          *
          * 2) Look at the destination and see if we have to resolve
          *    anyway.  (This is the case if this instruction is not the
          *    only instruction writing to a given register.)
          *
          * 3) If the instruction has a resolve status other than
          *    BOOL_UNRESOLVED or BOOL_NEEDS_RESOLVE then we walk through
          *    the sources and ensure that they are also resolved.  This
          *    ensures that we don't end up with any stray unresolved
          *    booleans going into ADDs or something like that.
          */

         uint8_t resolve_status;
         nir_alu_instr *alu = nir_instr_as_alu(instr);
         switch (alu->op) {
         case nir_op_bany2:
         case nir_op_bany3:
         case nir_op_bany4:
         case nir_op_ball_fequal2:
         case nir_op_ball_iequal2:
         case nir_op_ball_fequal3:
         case nir_op_ball_iequal3:
         case nir_op_ball_fequal4:
         case nir_op_ball_iequal4:
         case nir_op_bany_fnequal2:
         case nir_op_bany_inequal2:
         case nir_op_bany_fnequal3:
         case nir_op_bany_inequal3:
         case nir_op_bany_fnequal4:
         case nir_op_bany_inequal4:
            /* These are only implemented by the vec4 backend and its
             * implementation emits resolved booleans.  At some point in the
             * future, this may change and we'll have to remove some of the
             * above cases.
             */
            resolve_status = BRW_NIR_BOOLEAN_NO_RESOLVE;
            break;

         case nir_op_imov:
         case nir_op_inot:
            /* This is a single-source instruction.  Just copy the resolve
             * status from the source.
             */
            resolve_status = get_resolve_status_for_src(&alu->src[0].src);
            break;

         case nir_op_iand:
         case nir_op_ior:
         case nir_op_ixor: {
            uint8_t src0_status = get_resolve_status_for_src(&alu->src[0].src);
            uint8_t src1_status = get_resolve_status_for_src(&alu->src[1].src);

            if (src0_status == src1_status) {
               resolve_status = src0_status;
            } else if (src0_status == BRW_NIR_NON_BOOLEAN ||
                       src1_status == BRW_NIR_NON_BOOLEAN) {
               /* If one of the sources is a non-boolean then the whole
                * thing is a non-boolean.
                */
               resolve_status = BRW_NIR_NON_BOOLEAN;
            } else {
               /* At this point one of them is a true boolean and one is a
                * boolean that needs a resolve.  We could either resolve the
                * unresolved source or we could resolve here.  If we resolve
                * the unresolved source then we get two resolves for the price
                * of one.  Just set this one to BOOLEAN_NO_RESOLVE and we'll
                * let the code below force a resolve on the unresolved source.
                */
               resolve_status = BRW_NIR_BOOLEAN_NO_RESOLVE;
            }
            break;
         }

         default:
            if (nir_op_infos[alu->op].output_type == nir_type_bool) {
               /* This instructions will turn into a CMP when we actually emit
                * them so the result will have to be resolved before it can be
                * used.
                */
               resolve_status = BRW_NIR_BOOLEAN_UNRESOLVED;

               /* Even though the destination is allowed to be left
                * unresolved, the sources are treated as regular integers or
                * floats so they need to be resolved.
                */
               nir_foreach_src(instr, src_mark_needs_resolve, NULL);
            } else {
               resolve_status = BRW_NIR_NON_BOOLEAN;
            }
         }

         /* If the destination is SSA, go ahead allow unresolved booleans.
          * If the destination register doesn't have a well-defined parent_instr
          * we need to resolve immediately.
          */
         if (!alu->dest.dest.is_ssa &&
             resolve_status == BRW_NIR_BOOLEAN_UNRESOLVED) {
            resolve_status = BRW_NIR_BOOLEAN_NEEDS_RESOLVE;
         }

         instr->pass_flags = (instr->pass_flags & ~BRW_NIR_BOOLEAN_MASK) |
                             resolve_status;

         /* Finally, resolve sources if it's needed */
         switch (resolve_status) {
         case BRW_NIR_BOOLEAN_NEEDS_RESOLVE:
         case BRW_NIR_BOOLEAN_UNRESOLVED:
            /* This instruction is either unresolved or we're doing the
             * resolve here; leave the sources alone.
             */
            break;

         case BRW_NIR_BOOLEAN_NO_RESOLVE:
         case BRW_NIR_NON_BOOLEAN:
            nir_foreach_src(instr, src_mark_needs_resolve, NULL);
            break;

         default:
            unreachable("Invalid boolean flag");
         }

         break;
      }

      case nir_instr_type_load_const: {
         nir_load_const_instr *load = nir_instr_as_load_const(instr);

         /* For load_const instructions, it's a boolean exactly when it holds
          * one of the values NIR_TRUE or NIR_FALSE.
          *
          * Since load_const instructions don't have any sources, we don't
          * have to worry about resolving them.
          */
         instr->pass_flags &= ~BRW_NIR_BOOLEAN_MASK;
         if (load->value.u[0] == NIR_TRUE || load->value.u[0] == NIR_FALSE) {
            instr->pass_flags |= BRW_NIR_BOOLEAN_NO_RESOLVE;
         } else {
            instr->pass_flags |= BRW_NIR_NON_BOOLEAN;
         }
         continue;
      }

      default:
         /* Everything else is an unknown non-boolean value and needs to
          * have all sources resolved.
          */
         instr->pass_flags = (instr->pass_flags & ~BRW_NIR_BOOLEAN_MASK) |
                             BRW_NIR_NON_BOOLEAN;
         nir_foreach_src(instr, src_mark_needs_resolve, NULL);
         continue;
      }
   }
Пример #17
0
static bool
match_value(const nir_search_value *value, nir_alu_instr *instr, unsigned src,
            unsigned num_components, const uint8_t *swizzle,
            struct match_state *state)
{
    uint8_t new_swizzle[4];

    /* If the source is an explicitly sized source, then we need to reset
     * both the number of components and the swizzle.
     */
    if (nir_op_infos[instr->op].input_sizes[src] != 0) {
        num_components = nir_op_infos[instr->op].input_sizes[src];
        swizzle = identity_swizzle;
    }

    for (int i = 0; i < num_components; ++i)
        new_swizzle[i] = instr->src[src].swizzle[swizzle[i]];

    switch (value->type) {
    case nir_search_value_expression:
        if (!instr->src[src].src.is_ssa)
            return false;

        if (instr->src[src].src.ssa->parent_instr->type != nir_instr_type_alu)
            return false;

        return match_expression(nir_search_value_as_expression(value),
                                nir_instr_as_alu(instr->src[src].src.ssa->parent_instr),
                                num_components, new_swizzle, state);

    case nir_search_value_variable: {
        nir_search_variable *var = nir_search_value_as_variable(value);
        assert(var->variable < NIR_SEARCH_MAX_VARIABLES);

        if (state->variables_seen & (1 << var->variable)) {
            if (!nir_srcs_equal(state->variables[var->variable].src,
                                instr->src[src].src))
                return false;

            assert(!instr->src[src].abs && !instr->src[src].negate);

            for (int i = 0; i < num_components; ++i) {
                if (state->variables[var->variable].swizzle[i] != new_swizzle[i])
                    return false;
            }

            return true;
        } else {
            if (var->is_constant &&
                    instr->src[src].src.ssa->parent_instr->type != nir_instr_type_load_const)
                return false;

            if (var->type != nir_type_invalid) {
                if (instr->src[src].src.ssa->parent_instr->type != nir_instr_type_alu)
                    return false;

                nir_alu_instr *src_alu =
                    nir_instr_as_alu(instr->src[src].src.ssa->parent_instr);

                if (nir_op_infos[src_alu->op].output_type != var->type &&
                        !(var->type == nir_type_bool && alu_instr_is_bool(src_alu)))
                    return false;
            }

            state->variables_seen |= (1 << var->variable);
            state->variables[var->variable].src = instr->src[src].src;
            state->variables[var->variable].abs = false;
            state->variables[var->variable].negate = false;

            for (int i = 0; i < 4; ++i) {
                if (i < num_components)
                    state->variables[var->variable].swizzle[i] = new_swizzle[i];
                else
                    state->variables[var->variable].swizzle[i] = 0;
            }

            return true;
        }
    }

    case nir_search_value_constant: {
        nir_search_constant *const_val = nir_search_value_as_constant(value);

        if (!instr->src[src].src.is_ssa)
            return false;

        if (instr->src[src].src.ssa->parent_instr->type != nir_instr_type_load_const)
            return false;

        nir_load_const_instr *load =
            nir_instr_as_load_const(instr->src[src].src.ssa->parent_instr);

        switch (nir_op_infos[instr->op].input_types[src]) {
        case nir_type_float:
            for (unsigned i = 0; i < num_components; ++i) {
                if (load->value.f[new_swizzle[i]] != const_val->data.f)
                    return false;
            }
            return true;
        case nir_type_int:
        case nir_type_unsigned:
        case nir_type_bool:
            for (unsigned i = 0; i < num_components; ++i) {
                if (load->value.i[new_swizzle[i]] != const_val->data.i)
                    return false;
            }
            return true;
        default:
            unreachable("Invalid alu source type");
        }
    }

    default:
        unreachable("Invalid search value type");
    }
}