static void
lower_load_const_instr_scalar(nir_load_const_instr *lower)
{
   if (lower->def.num_components == 1)
      return;

   nir_builder b;
   nir_builder_init(&b, nir_cf_node_get_function(&lower->instr.block->cf_node));
   b.cursor = nir_before_instr(&lower->instr);

   /* Emit the individual loads. */
   nir_ssa_def *loads[4];
   for (unsigned i = 0; i < lower->def.num_components; i++) {
      nir_load_const_instr *load_comp = nir_load_const_instr_create(b.shader, 1);
      load_comp->value.u[0] = lower->value.u[i];
      nir_builder_instr_insert(&b, &load_comp->instr);
      loads[i] = &load_comp->def;
   }

   /* Batch things back together into a vector. */
   nir_ssa_def *vec = nir_vec(&b, loads, lower->def.num_components);

   /* Replace the old load with a reference to our reconstructed vector. */
   nir_ssa_def_rewrite_uses(&lower->def, nir_src_for_ssa(vec));
   nir_instr_remove(&lower->instr);
}
static void
lower_load_store(nir_builder *b,
                 nir_intrinsic_instr *intrin,
                 glsl_type_size_align_func size_align)
{
   b->cursor = nir_before_instr(&intrin->instr);

   nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
   nir_variable *var = nir_deref_instr_get_variable(deref);

   nir_ssa_def *offset =
      nir_iadd_imm(b, nir_build_deref_offset(b, deref, size_align),
                      var->data.location);

   unsigned align, UNUSED size;
   size_align(deref->type, &size, &align);

   if (intrin->intrinsic == nir_intrinsic_load_deref) {
      nir_intrinsic_instr *load =
         nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_scratch);
      load->num_components = intrin->num_components;
      load->src[0] = nir_src_for_ssa(offset);
      nir_intrinsic_set_align(load, align, 0);
      nir_ssa_dest_init(&load->instr, &load->dest,
                        intrin->dest.ssa.num_components,
                        intrin->dest.ssa.bit_size, NULL);
      nir_builder_instr_insert(b, &load->instr);

      nir_ssa_def *value = &load->dest.ssa;
      if (glsl_type_is_boolean(deref->type))
         value = nir_b2i32(b, value);

      nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
                               nir_src_for_ssa(&load->dest.ssa));
   } else {
      assert(intrin->intrinsic == nir_intrinsic_store_deref);

      assert(intrin->src[1].is_ssa);
      nir_ssa_def *value = intrin->src[1].ssa;
      if (glsl_type_is_boolean(deref->type))
         value = nir_i2b(b, value);

      nir_intrinsic_instr *store =
         nir_intrinsic_instr_create(b->shader, nir_intrinsic_store_scratch);
      store->num_components = intrin->num_components;
      store->src[0] = nir_src_for_ssa(value);
      store->src[1] = nir_src_for_ssa(offset);
      nir_intrinsic_set_write_mask(store, nir_intrinsic_write_mask(intrin));
      nir_intrinsic_set_align(store, align, 0);
      nir_builder_instr_insert(b, &store->instr);
   }

   nir_instr_remove(&intrin->instr);
   nir_deref_instr_remove_if_unused(deref);
}
static bool
remove_phis_block(nir_block *block, void *state)
{
   bool *progress = state;

   nir_foreach_instr_safe(block, instr) {
      if (instr->type != nir_instr_type_phi)
         break;

      nir_phi_instr *phi = nir_instr_as_phi(instr);

      nir_ssa_def *def = NULL;
      bool srcs_same = true;

      nir_foreach_phi_src(phi, src) {
         assert(src->src.is_ssa);

         /* For phi nodes at the beginning of loops, we may encounter some
          * sources from backedges that point back to the destination of the
          * same phi, i.e. something like:
          *
          * a = phi(a, b, ...)
          *
          * We can safely ignore these sources, since if all of the normal
          * sources point to the same definition, then that definition must
          * still dominate the phi node, and the phi will still always take
          * the value of that definition.
          */
         if (src->src.ssa == &phi->dest.ssa)
            continue;
         
         if (def == NULL) {
            def  = src->src.ssa;
         } else {
            if (src->src.ssa != def) {
               srcs_same = false;
               break;
            }
         }
      }

      if (!srcs_same)
         continue;

      /* We must have found at least one definition, since there must be at
       * least one forward edge.
       */
      assert(def != NULL);

      assert(phi->dest.is_ssa);
      nir_ssa_def_rewrite_uses(&phi->dest.ssa, nir_src_for_ssa(def));
      nir_instr_remove(instr);

      *progress = true;
   }
Exemple #4
0
static void
opt_constant_if(nir_if *if_stmt, bool condition)
{
   /* First, we need to remove any phi nodes after the if by rewriting uses to
    * point to the correct source.
    */
   nir_block *after = nir_cf_node_as_block(nir_cf_node_next(&if_stmt->cf_node));
   nir_block *last_block =
      nir_cf_node_as_block(condition ? nir_if_last_then_node(if_stmt)
                                     : nir_if_last_else_node(if_stmt));

   nir_foreach_instr_safe(after, instr) {
      if (instr->type != nir_instr_type_phi)
         break;

      nir_phi_instr *phi = nir_instr_as_phi(instr);
      nir_ssa_def *def = NULL;
      nir_foreach_phi_src(phi, phi_src) {
         if (phi_src->pred != last_block)
            continue;

         assert(phi_src->src.is_ssa);
         def = phi_src->src.ssa;
      }

      assert(def);
      assert(phi->dest.is_ssa);
      nir_ssa_def_rewrite_uses(&phi->dest.ssa, nir_src_for_ssa(def));
      nir_instr_remove(instr);
   }

   /* The control flow list we're about to paste in may include a jump at the
    * end, and in that case we have to delete the rest of the control flow
    * list after the if since it's unreachable and the validator will balk if
    * we don't.
    */

   if (!exec_list_is_empty(&last_block->instr_list)) {
      nir_instr *last_instr = nir_block_last_instr(last_block);
      if (last_instr->type == nir_instr_type_jump)
         remove_after_cf_node(&if_stmt->cf_node);
   }

   /* Finally, actually paste in the then or else branch and delete the if. */
   struct exec_list *cf_list = condition ? &if_stmt->then_list
                                         : &if_stmt->else_list;

   nir_cf_list list;
   nir_cf_extract(&list, nir_before_cf_list(cf_list),
                  nir_after_cf_list(cf_list));
   nir_cf_reinsert(&list, nir_after_cf_node(&if_stmt->cf_node));
   nir_cf_node_remove(&if_stmt->cf_node);
}
static bool
constant_fold_alu_instr(nir_alu_instr *instr, void *mem_ctx)
{
   nir_const_value src[4];

   if (!instr->dest.dest.is_ssa)
      return false;

   for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
      if (!instr->src[i].src.is_ssa)
         return false;

      nir_instr *src_instr = instr->src[i].src.ssa->parent_instr;

      if (src_instr->type != nir_instr_type_load_const)
         return false;
      nir_load_const_instr* load_const = nir_instr_as_load_const(src_instr);

      for (unsigned j = 0; j < nir_ssa_alu_instr_src_components(instr, i);
           j++) {
         src[i].u[j] = load_const->value.u[instr->src[i].swizzle[j]];
      }

      /* We shouldn't have any source modifiers in the optimization loop. */
      assert(!instr->src[i].abs && !instr->src[i].negate);
   }

   /* We shouldn't have any saturate modifiers in the optimization loop. */
   assert(!instr->dest.saturate);

   nir_const_value dest =
      nir_eval_const_opcode(instr->op, instr->dest.dest.ssa.num_components,
                            src);

   nir_load_const_instr *new_instr =
      nir_load_const_instr_create(mem_ctx,
                                  instr->dest.dest.ssa.num_components);

   new_instr->value = dest;

   nir_instr_insert_before(&instr->instr, &new_instr->instr);

   nir_ssa_def_rewrite_uses(&instr->dest.dest.ssa, nir_src_for_ssa(&new_instr->def),
                            mem_ctx);

   nir_instr_remove(&instr->instr);
   ralloc_free(instr);

   return true;
}
Exemple #6
0
nir_alu_instr *
nir_replace_instr(nir_alu_instr *instr, const nir_search_expression *search,
                  const nir_search_value *replace, void *mem_ctx)
{
    uint8_t swizzle[4] = { 0, 0, 0, 0 };

    for (unsigned i = 0; i < instr->dest.dest.ssa.num_components; ++i)
        swizzle[i] = i;

    assert(instr->dest.dest.is_ssa);

    struct match_state state;
    state.variables_seen = 0;

    if (!match_expression(search, instr, instr->dest.dest.ssa.num_components,
                          swizzle, &state))
        return NULL;

    /* Inserting a mov may be unnecessary.  However, it's much easier to
     * simply let copy propagation clean this up than to try to go through
     * and rewrite swizzles ourselves.
     */
    nir_alu_instr *mov = nir_alu_instr_create(mem_ctx, nir_op_imov);
    mov->dest.write_mask = instr->dest.write_mask;
    nir_ssa_dest_init(&mov->instr, &mov->dest.dest,
                      instr->dest.dest.ssa.num_components, NULL);

    mov->src[0] = construct_value(replace, nir_op_infos[instr->op].output_type,
                                  instr->dest.dest.ssa.num_components, &state,
                                  &instr->instr, mem_ctx);
    nir_instr_insert_before(&instr->instr, &mov->instr);

    nir_ssa_def_rewrite_uses(&instr->dest.dest.ssa,
                             nir_src_for_ssa(&mov->dest.dest.ssa), mem_ctx);

    /* We know this one has no more uses because we just rewrote them all,
     * so we can remove it.  The rest of the matched expression, however, we
     * don't know so much about.  We'll just let dead code clean them up.
     */
    nir_instr_remove(&instr->instr);

    return mov;
}
static bool
lower_indirect_block(nir_block *block, nir_builder *b,
                     nir_variable_mode modes)
{
   bool progress = false;

   nir_foreach_instr_safe(instr, block) {
      if (instr->type != nir_instr_type_intrinsic)
         continue;

      nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
      if (intrin->intrinsic != nir_intrinsic_load_var &&
          intrin->intrinsic != nir_intrinsic_store_var)
         continue;

      if (!deref_has_indirect(intrin->variables[0]))
         continue;

      /* Only lower variables whose mode is in the mask */
      if (!(modes & intrin->variables[0]->var->data.mode))
         continue;

      b->cursor = nir_before_instr(&intrin->instr);

      if (intrin->intrinsic == nir_intrinsic_load_var) {
         nir_ssa_def *result;
         emit_load_store(b, intrin, intrin->variables[0],
                         &intrin->variables[0]->deref, &result, NULL);
         nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(result));
      } else {
         assert(intrin->src[0].is_ssa);
         emit_load_store(b, intrin, intrin->variables[0],
                         &intrin->variables[0]->deref, NULL, intrin->src[0].ssa);
      }
      nir_instr_remove(&intrin->instr);
      progress = true;
   }

   return progress;
}
static void
lower_reduction(nir_alu_instr *instr, nir_op chan_op, nir_op merge_op,
                void *mem_ctx)
{
   unsigned num_components = nir_op_infos[instr->op].input_sizes[0];

   nir_ssa_def *last = NULL;
   for (unsigned i = 0; i < num_components; i++) {
      nir_alu_instr *chan = nir_alu_instr_create(mem_ctx, chan_op);
      nir_alu_ssa_dest_init(chan, 1);
      nir_alu_src_copy(&chan->src[0], &instr->src[0], mem_ctx);
      chan->src[0].swizzle[0] = chan->src[0].swizzle[i];
      if (nir_op_infos[chan_op].num_inputs > 1) {
         assert(nir_op_infos[chan_op].num_inputs == 2);
         nir_alu_src_copy(&chan->src[1], &instr->src[1], mem_ctx);
         chan->src[1].swizzle[0] = chan->src[1].swizzle[i];
      }

      nir_instr_insert_before(&instr->instr, &chan->instr);

      if (i == 0) {
         last = &chan->dest.dest.ssa;
      } else {
         nir_alu_instr *merge = nir_alu_instr_create(mem_ctx, merge_op);
         nir_alu_ssa_dest_init(merge, 1);
         merge->dest.write_mask = 1;
         merge->src[0].src = nir_src_for_ssa(last);
         merge->src[1].src = nir_src_for_ssa(&chan->dest.dest.ssa);
         nir_instr_insert_before(&instr->instr, &merge->instr);
         last = &merge->dest.dest.ssa;
      }
   }

   assert(instr->dest.write_mask == 1);
   nir_ssa_def_rewrite_uses(&instr->dest.dest.ssa, nir_src_for_ssa(last),
                            mem_ctx);
   nir_instr_remove(&instr->instr);
}
Exemple #9
0
static void
vc4_nir_lower_txf_ms_instr(struct vc4_compile *c, nir_builder *b,
                           nir_tex_instr *txf_ms)
{
    if (txf_ms->op != nir_texop_txf_ms)
        return;

    b->cursor = nir_before_instr(&txf_ms->instr);

    nir_tex_instr *txf = nir_tex_instr_create(c->s, 1);
    txf->op = nir_texop_txf;
    txf->sampler = txf_ms->sampler;
    txf->sampler_index = txf_ms->sampler_index;
    txf->coord_components = txf_ms->coord_components;
    txf->is_shadow = txf_ms->is_shadow;
    txf->is_new_style_shadow = txf_ms->is_new_style_shadow;

    nir_ssa_def *coord = NULL, *sample_index = NULL;
    for (int i = 0; i < txf_ms->num_srcs; i++) {
        assert(txf_ms->src[i].src.is_ssa);

        switch (txf_ms->src[i].src_type) {
        case nir_tex_src_coord:
            coord = txf_ms->src[i].src.ssa;
            break;
        case nir_tex_src_ms_index:
            sample_index = txf_ms->src[i].src.ssa;
            break;
        default:
            unreachable("Unknown txf_ms src\n");
        }
    }
    assert(coord);
    assert(sample_index);

    nir_ssa_def *x = nir_channel(b, coord, 0);
    nir_ssa_def *y = nir_channel(b, coord, 1);

    uint32_t tile_w = 32;
    uint32_t tile_h = 32;
    uint32_t tile_w_shift = 5;
    uint32_t tile_h_shift = 5;
    uint32_t tile_size = (tile_h * tile_w *
                          VC4_MAX_SAMPLES * sizeof(uint32_t));
    unsigned unit = txf_ms->sampler_index;
    uint32_t w = align(c->key->tex[unit].msaa_width, tile_w);
    uint32_t w_tiles = w / tile_w;

    nir_ssa_def *x_tile = nir_ushr(b, x, nir_imm_int(b, tile_w_shift));
    nir_ssa_def *y_tile = nir_ushr(b, y, nir_imm_int(b, tile_h_shift));
    nir_ssa_def *tile_addr = nir_iadd(b,
                                      nir_imul(b, x_tile,
                                              nir_imm_int(b, tile_size)),
                                      nir_imul(b, y_tile,
                                              nir_imm_int(b, (w_tiles *
                                                      tile_size))));
    nir_ssa_def *x_subspan = nir_iand(b, x,
                                      nir_imm_int(b, (tile_w - 1) & ~1));
    nir_ssa_def *y_subspan = nir_iand(b, y,
                                      nir_imm_int(b, (tile_h - 1) & ~1));
    nir_ssa_def *subspan_addr = nir_iadd(b,
                                         nir_imul(b, x_subspan,
                                                 nir_imm_int(b, 2 * VC4_MAX_SAMPLES * sizeof(uint32_t))),
                                         nir_imul(b, y_subspan,
                                                 nir_imm_int(b,
                                                         tile_w *
                                                         VC4_MAX_SAMPLES *
                                                         sizeof(uint32_t))));

    nir_ssa_def *pixel_addr = nir_ior(b,
                                      nir_iand(b,
                                              nir_ishl(b, x,
                                                      nir_imm_int(b, 2)),
                                              nir_imm_int(b, (1 << 2))),
                                      nir_iand(b,
                                              nir_ishl(b, y,
                                                      nir_imm_int(b, 3)),
                                              nir_imm_int(b, (1 << 3))));

    nir_ssa_def *sample_addr = nir_ishl(b, sample_index, nir_imm_int(b, 4));

    nir_ssa_def *addr = nir_iadd(b,
                                 nir_ior(b, sample_addr, pixel_addr),
                                 nir_iadd(b, subspan_addr, tile_addr));

    txf->src[0].src_type = nir_tex_src_coord;
    txf->src[0].src = nir_src_for_ssa(nir_vec2(b, addr, nir_imm_int(b, 0)));
    nir_ssa_dest_init(&txf->instr, &txf->dest, 4, NULL);
    nir_builder_instr_insert(b, &txf->instr);
    nir_ssa_def_rewrite_uses(&txf_ms->dest.ssa,
                             nir_src_for_ssa(&txf->dest.ssa));
    nir_instr_remove(&txf_ms->instr);
}
/* see emit_wpos_adjustment() in st_mesa_to_tgsi.c */
static void
emit_wpos_adjustment(lower_wpos_ytransform_state *state,
                     nir_intrinsic_instr *intr,
                     bool invert, float adjX, float adjY[2])
{
    nir_builder *b = &state->b;
    nir_variable *fragcoord = intr->variables[0]->var;
    nir_ssa_def *wpostrans, *wpos_temp, *wpos_temp_y, *wpos_input;

    assert(intr->dest.is_ssa);

    b->cursor = nir_before_instr(&intr->instr);

    wpostrans = get_transform(state);
    wpos_input = nir_load_var(b, fragcoord);

    /* First, apply the coordinate shift: */
    if (adjX || adjY[0] || adjY[1]) {
        if (adjY[0] != adjY[1]) {
            /* Adjust the y coordinate by adjY[1] or adjY[0] respectively
             * depending on whether inversion is actually going to be applied
             * or not, which is determined by testing against the inversion
             * state variable used below, which will be either +1 or -1.
             */
            nir_ssa_def *adj_temp;

            adj_temp = nir_cmp(b,
                               nir_channel(b, wpostrans, invert ? 2 : 0),
                               nir_imm_vec4(b, adjX, adjY[0], 0.0f, 0.0f),
                               nir_imm_vec4(b, adjX, adjY[1], 0.0f, 0.0f));

            wpos_temp = nir_fadd(b, wpos_input, adj_temp);
        } else {
            wpos_temp = nir_fadd(b,
                                 wpos_input,
                                 nir_imm_vec4(b, adjX, adjY[0], 0.0f, 0.0f));
        }
        wpos_input = wpos_temp;
    } else {
        /* MOV wpos_temp, input[wpos]
         */
        wpos_temp = wpos_input;
    }

    /* Now the conditional y flip: STATE_FB_WPOS_Y_TRANSFORM.xy/zw will be
     * inversion/identity, or the other way around if we're drawing to an FBO.
     */
    if (invert) {
        /* wpos_temp.y = wpos_input * wpostrans.xxxx + wpostrans.yyyy */
        wpos_temp_y = nir_fadd(b, nir_fmul(b, nir_channel(b, wpos_temp, 1),
                                           nir_channel(b, wpostrans, 0)),
                               nir_channel(b, wpostrans, 1));
    } else {
        /* wpos_temp.y = wpos_input * wpostrans.zzzz + wpostrans.wwww */
        wpos_temp_y = nir_fadd(b, nir_fmul(b, nir_channel(b, wpos_temp, 1),
                                           nir_channel(b, wpostrans, 2)),
                               nir_channel(b, wpostrans, 3));
    }

    wpos_temp = nir_vec4(b,
                         nir_channel(b, wpos_temp, 0),
                         wpos_temp_y,
                         nir_channel(b, wpos_temp, 2),
                         nir_channel(b, wpos_temp, 3));

    nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_src_for_ssa(wpos_temp));
}
Exemple #11
0
static void
convert_instr(nir_builder *bld, nir_alu_instr *alu)
{
   nir_ssa_def *numer, *denom, *af, *bf, *a, *b, *q, *r;
   nir_op op = alu->op;
   bool is_signed;

   if ((op != nir_op_idiv) &&
       (op != nir_op_udiv) &&
       (op != nir_op_umod))
      return;

   is_signed = (op == nir_op_idiv);

   bld->cursor = nir_before_instr(&alu->instr);

   numer = nir_ssa_for_alu_src(bld, alu, 0);
   denom = nir_ssa_for_alu_src(bld, alu, 1);

   if (is_signed) {
      af = nir_i2f(bld, numer);
      bf = nir_i2f(bld, denom);
      af = nir_fabs(bld, af);
      bf = nir_fabs(bld, bf);
      a  = nir_iabs(bld, numer);
      b  = nir_iabs(bld, denom);
   } else {
      af = nir_u2f(bld, numer);
      bf = nir_u2f(bld, denom);
      a  = numer;
      b  = denom;
   }

   /* get first result: */
   bf = nir_frcp(bld, bf);
   bf = nir_isub(bld, bf, nir_imm_int(bld, 2));  /* yes, really */
   q  = nir_fmul(bld, af, bf);

   if (is_signed) {
      q = nir_f2i(bld, q);
   } else {
      q = nir_f2u(bld, q);
   }

   /* get error of first result: */
   r = nir_imul(bld, q, b);
   r = nir_isub(bld, a, r);
   r = nir_u2f(bld, r);
   r = nir_fmul(bld, r, bf);
   r = nir_f2u(bld, r);

   /* add quotients: */
   q = nir_iadd(bld, q, r);

   /* correction: if modulus >= divisor, add 1 */
   r = nir_imul(bld, q, b);
   r = nir_isub(bld, a, r);

   r = nir_uge(bld, r, b);
   r = nir_b2i(bld, r);

   q = nir_iadd(bld, q, r);
   if (is_signed)  {
      /* fix the sign: */
      r = nir_ixor(bld, numer, denom);
      r = nir_ushr(bld, r, nir_imm_int(bld, 31));
      r = nir_i2b(bld, r);
      b = nir_ineg(bld, q);
      q = nir_bcsel(bld, r, b, q);
   }

   if (op == nir_op_umod) {
      /* division result in q */
      r = nir_imul(bld, q, b);
      q = nir_isub(bld, a, r);
   }

   assert(alu->dest.dest.is_ssa);
   nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(q));
}
static void
lower_alu_instr_scalar(nir_alu_instr *instr, void *mem_ctx)
{
   unsigned num_src = nir_op_infos[instr->op].num_inputs;
   unsigned i, chan;

   assert(instr->dest.dest.is_ssa);
   assert(instr->dest.write_mask != 0);

#define LOWER_REDUCTION(name, chan, merge) \
   case name##2: \
   case name##3: \
   case name##4: \
      lower_reduction(instr, chan, merge, mem_ctx); \
      break;

   switch (instr->op) {
   case nir_op_vec4:
   case nir_op_vec3:
   case nir_op_vec2:
      /* We don't need to scalarize these ops, they're the ones generated to
       * group up outputs into a value that can be SSAed.
       */
      return;

      LOWER_REDUCTION(nir_op_fdot, nir_op_fmul, nir_op_fadd);
      LOWER_REDUCTION(nir_op_ball_fequal, nir_op_feq, nir_op_iand);
      LOWER_REDUCTION(nir_op_ball_iequal, nir_op_ieq, nir_op_iand);
      LOWER_REDUCTION(nir_op_bany_fnequal, nir_op_fne, nir_op_ior);
      LOWER_REDUCTION(nir_op_bany_inequal, nir_op_ine, nir_op_ior);
      LOWER_REDUCTION(nir_op_fall_equal, nir_op_seq, nir_op_fand);
      LOWER_REDUCTION(nir_op_fany_nequal, nir_op_sne, nir_op_for);
      LOWER_REDUCTION(nir_op_ball, nir_op_imov, nir_op_iand);
      LOWER_REDUCTION(nir_op_bany, nir_op_imov, nir_op_ior);
      LOWER_REDUCTION(nir_op_fall, nir_op_fmov, nir_op_fand);
      LOWER_REDUCTION(nir_op_fany, nir_op_fmov, nir_op_for);

   default:
      break;
   }

   if (instr->dest.dest.ssa.num_components == 1)
      return;

   unsigned num_components = instr->dest.dest.ssa.num_components;
   static const nir_op nir_op_map[] = {nir_op_vec2, nir_op_vec3, nir_op_vec4};
   nir_alu_instr *vec_instr =
      nir_alu_instr_create(mem_ctx, nir_op_map[num_components - 2]);
   nir_alu_ssa_dest_init(vec_instr, num_components);

   for (chan = 0; chan < 4; chan++) {
      if (!(instr->dest.write_mask & (1 << chan)))
         continue;

      nir_alu_instr *lower = nir_alu_instr_create(mem_ctx, instr->op);
      for (i = 0; i < num_src; i++) {
         /* We only handle same-size-as-dest (input_sizes[] == 0) or scalar
          * args (input_sizes[] == 1).
          */
         assert(nir_op_infos[instr->op].input_sizes[i] < 2);
         unsigned src_chan = (nir_op_infos[instr->op].input_sizes[i] == 1 ?
                              0 : chan);

         nir_alu_src_copy(&lower->src[i], &instr->src[i], mem_ctx);
         for (int j = 0; j < 4; j++)
            lower->src[i].swizzle[j] = instr->src[i].swizzle[src_chan];
      }

      nir_alu_ssa_dest_init(lower, 1);
      lower->dest.saturate = instr->dest.saturate;
      vec_instr->src[chan].src = nir_src_for_ssa(&lower->dest.dest.ssa);

      nir_instr_insert_before(&instr->instr, &lower->instr);
   }

   nir_instr_insert_before(&instr->instr, &vec_instr->instr);

   nir_ssa_def_rewrite_uses(&instr->dest.dest.ssa,
                            nir_src_for_ssa(&vec_instr->dest.dest.ssa),
                            mem_ctx);

   nir_instr_remove(&instr->instr);
}
static bool
lower_locals_to_regs_block(nir_block *block,
                           struct locals_to_regs_state *state)
{
   nir_builder *b = &state->builder;

   nir_foreach_instr_safe(instr, block) {
      if (instr->type != nir_instr_type_intrinsic)
         continue;

      nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);

      switch (intrin->intrinsic) {
      case nir_intrinsic_load_deref: {
         nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
         if (deref->mode != nir_var_function_temp)
            continue;

         b->cursor = nir_before_instr(&intrin->instr);

         nir_alu_instr *mov = nir_alu_instr_create(b->shader, nir_op_imov);
         mov->src[0].src = get_deref_reg_src(deref, state);
         mov->dest.write_mask = (1 << intrin->num_components) - 1;
         if (intrin->dest.is_ssa) {
            nir_ssa_dest_init(&mov->instr, &mov->dest.dest,
                              intrin->num_components,
                              intrin->dest.ssa.bit_size, NULL);
            nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
                                     nir_src_for_ssa(&mov->dest.dest.ssa));
         } else {
            nir_dest_copy(&mov->dest.dest, &intrin->dest, &mov->instr);
         }
         nir_builder_instr_insert(b, &mov->instr);

         nir_instr_remove(&intrin->instr);
         state->progress = true;
         break;
      }

      case nir_intrinsic_store_deref: {
         nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
         if (deref->mode != nir_var_function_temp)
            continue;

         b->cursor = nir_before_instr(&intrin->instr);

         nir_src reg_src = get_deref_reg_src(deref, state);

         nir_alu_instr *mov = nir_alu_instr_create(b->shader, nir_op_imov);
         nir_src_copy(&mov->src[0].src, &intrin->src[1], mov);
         mov->dest.write_mask = nir_intrinsic_write_mask(intrin);
         mov->dest.dest.is_ssa = false;
         mov->dest.dest.reg.reg = reg_src.reg.reg;
         mov->dest.dest.reg.base_offset = reg_src.reg.base_offset;
         mov->dest.dest.reg.indirect = reg_src.reg.indirect;

         nir_builder_instr_insert(b, &mov->instr);

         nir_instr_remove(&intrin->instr);
         state->progress = true;
         break;
      }

      case nir_intrinsic_copy_deref:
         unreachable("There should be no copies whatsoever at this point");
         break;

      default:
         continue;
      }
   }

   return true;
}
Exemple #14
0
static void
lower_instr(nir_intrinsic_instr *instr,
            lower_atomic_state *state)
{
   nir_intrinsic_op op;
   switch (instr->intrinsic) {
   case nir_intrinsic_atomic_counter_read_var:
      op = nir_intrinsic_atomic_counter_read;
      break;

   case nir_intrinsic_atomic_counter_inc_var:
      op = nir_intrinsic_atomic_counter_inc;
      break;

   case nir_intrinsic_atomic_counter_dec_var:
      op = nir_intrinsic_atomic_counter_dec;
      break;

   default:
      return;
   }

   if (instr->variables[0]->var->data.mode != nir_var_uniform &&
       instr->variables[0]->var->data.mode != nir_var_shader_storage)
      return; /* atomics passed as function arguments can't be lowered */

   void *mem_ctx = ralloc_parent(instr);
   unsigned uniform_loc = instr->variables[0]->var->data.location;

   nir_intrinsic_instr *new_instr = nir_intrinsic_instr_create(mem_ctx, op);
   new_instr->const_index[0] =
      state->shader_program->UniformStorage[uniform_loc].opaque[state->shader->stage].index;

   nir_load_const_instr *offset_const = nir_load_const_instr_create(mem_ctx, 1);
   offset_const->value.u[0] = instr->variables[0]->var->data.atomic.offset;

   nir_instr_insert_before(&instr->instr, &offset_const->instr);

   nir_ssa_def *offset_def = &offset_const->def;

   nir_deref *tail = &instr->variables[0]->deref;
   while (tail->child != NULL) {
      assert(tail->child->deref_type == nir_deref_type_array);
      nir_deref_array *deref_array = nir_deref_as_array(tail->child);
      tail = tail->child;

      unsigned child_array_elements = tail->child != NULL ?
         glsl_get_aoa_size(tail->type) : 1;

      offset_const->value.u[0] += deref_array->base_offset *
         child_array_elements * ATOMIC_COUNTER_SIZE;

      if (deref_array->deref_array_type == nir_deref_array_type_indirect) {
         nir_load_const_instr *atomic_counter_size =
               nir_load_const_instr_create(mem_ctx, 1);
         atomic_counter_size->value.u[0] = child_array_elements * ATOMIC_COUNTER_SIZE;
         nir_instr_insert_before(&instr->instr, &atomic_counter_size->instr);

         nir_alu_instr *mul = nir_alu_instr_create(mem_ctx, nir_op_imul);
         nir_ssa_dest_init(&mul->instr, &mul->dest.dest, 1, NULL);
         mul->dest.write_mask = 0x1;
         nir_src_copy(&mul->src[0].src, &deref_array->indirect, mul);
         mul->src[1].src.is_ssa = true;
         mul->src[1].src.ssa = &atomic_counter_size->def;
         nir_instr_insert_before(&instr->instr, &mul->instr);

         nir_alu_instr *add = nir_alu_instr_create(mem_ctx, nir_op_iadd);
         nir_ssa_dest_init(&add->instr, &add->dest.dest, 1, NULL);
         add->dest.write_mask = 0x1;
         add->src[0].src.is_ssa = true;
         add->src[0].src.ssa = &mul->dest.dest.ssa;
         add->src[1].src.is_ssa = true;
         add->src[1].src.ssa = offset_def;
         nir_instr_insert_before(&instr->instr, &add->instr);

         offset_def = &add->dest.dest.ssa;
      }
   }

   new_instr->src[0].is_ssa = true;
   new_instr->src[0].ssa = offset_def;

   if (instr->dest.is_ssa) {
      nir_ssa_dest_init(&new_instr->instr, &new_instr->dest,
                        instr->dest.ssa.num_components, NULL);
      nir_ssa_def_rewrite_uses(&instr->dest.ssa,
                               nir_src_for_ssa(&new_instr->dest.ssa));
   } else {
      nir_dest_copy(&new_instr->dest, &instr->dest, mem_ctx);
   }

   nir_instr_insert_before(&instr->instr, &new_instr->instr);
   nir_instr_remove(&instr->instr);
}
Exemple #15
0
   nir_foreach_instr_safe(instr, block) {
      if (instr->type != nir_instr_type_intrinsic)
         continue;

      nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);

      switch (intrin->intrinsic) {
      case nir_intrinsic_load_var: {
         if (intrin->variables[0]->var->data.mode != nir_var_local)
            continue;

         nir_alu_instr *mov = nir_alu_instr_create(state->shader, nir_op_imov);
         mov->src[0].src = get_deref_reg_src(intrin->variables[0],
                                             &intrin->instr, state);
         mov->dest.write_mask = (1 << intrin->num_components) - 1;
         if (intrin->dest.is_ssa) {
            nir_ssa_dest_init(&mov->instr, &mov->dest.dest,
                              intrin->num_components,
                              intrin->dest.ssa.bit_size, NULL);
            nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
                                     nir_src_for_ssa(&mov->dest.dest.ssa));
         } else {
            nir_dest_copy(&mov->dest.dest, &intrin->dest, &mov->instr);
         }
         nir_instr_insert_before(&intrin->instr, &mov->instr);

         nir_instr_remove(&intrin->instr);
         state->progress = true;
         break;
      }

      case nir_intrinsic_store_var: {
         if (intrin->variables[0]->var->data.mode != nir_var_local)
            continue;

         nir_src reg_src = get_deref_reg_src(intrin->variables[0],
                                             &intrin->instr, state);

         nir_alu_instr *mov = nir_alu_instr_create(state->shader, nir_op_imov);
         nir_src_copy(&mov->src[0].src, &intrin->src[0], mov);
         mov->dest.write_mask = nir_intrinsic_write_mask(intrin);
         mov->dest.dest.is_ssa = false;
         mov->dest.dest.reg.reg = reg_src.reg.reg;
         mov->dest.dest.reg.base_offset = reg_src.reg.base_offset;
         mov->dest.dest.reg.indirect = reg_src.reg.indirect;

         nir_instr_insert_before(&intrin->instr, &mov->instr);

         nir_instr_remove(&intrin->instr);
         state->progress = true;
         break;
      }

      case nir_intrinsic_copy_var:
         unreachable("There should be no copies whatsoever at this point");
         break;

      default:
         continue;
      }
   }
static bool
constant_fold_alu_instr(nir_alu_instr *instr, void *mem_ctx)
{
   nir_const_value src[NIR_MAX_VEC_COMPONENTS];

   if (!instr->dest.dest.is_ssa)
      return false;

   /* In the case that any outputs/inputs have unsized types, then we need to
    * guess the bit-size. In this case, the validator ensures that all
    * bit-sizes match so we can just take the bit-size from first
    * output/input with an unsized type. If all the outputs/inputs are sized
    * then we don't need to guess the bit-size at all because the code we
    * generate for constant opcodes in this case already knows the sizes of
    * the types involved and does not need the provided bit-size for anything
    * (although it still requires to receive a valid bit-size).
    */
   unsigned bit_size = 0;
   if (!nir_alu_type_get_type_size(nir_op_infos[instr->op].output_type))
      bit_size = instr->dest.dest.ssa.bit_size;

   for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
      if (!instr->src[i].src.is_ssa)
         return false;

      if (bit_size == 0 &&
          !nir_alu_type_get_type_size(nir_op_infos[instr->op].input_sizes[i])) {
         bit_size = instr->src[i].src.ssa->bit_size;
      }

      nir_instr *src_instr = instr->src[i].src.ssa->parent_instr;

      if (src_instr->type != nir_instr_type_load_const)
         return false;
      nir_load_const_instr* load_const = nir_instr_as_load_const(src_instr);

      for (unsigned j = 0; j < nir_ssa_alu_instr_src_components(instr, i);
           j++) {
         switch(load_const->def.bit_size) {
         case 64:
            src[i].u64[j] = load_const->value.u64[instr->src[i].swizzle[j]];
            break;
         case 32:
            src[i].u32[j] = load_const->value.u32[instr->src[i].swizzle[j]];
            break;
         case 16:
            src[i].u16[j] = load_const->value.u16[instr->src[i].swizzle[j]];
            break;
         case 8:
            src[i].u8[j] = load_const->value.u8[instr->src[i].swizzle[j]];
            break;
         default:
            unreachable("Invalid bit size");
         }
      }

      /* We shouldn't have any source modifiers in the optimization loop. */
      assert(!instr->src[i].abs && !instr->src[i].negate);
   }

   if (bit_size == 0)
      bit_size = 32;

   /* We shouldn't have any saturate modifiers in the optimization loop. */
   assert(!instr->dest.saturate);

   nir_const_value dest =
      nir_eval_const_opcode(instr->op, instr->dest.dest.ssa.num_components,
                            bit_size, src);

   nir_load_const_instr *new_instr =
      nir_load_const_instr_create(mem_ctx,
                                  instr->dest.dest.ssa.num_components,
                                  instr->dest.dest.ssa.bit_size);

   new_instr->value = dest;

   nir_instr_insert_before(&instr->instr, &new_instr->instr);

   nir_ssa_def_rewrite_uses(&instr->dest.dest.ssa,
                            nir_src_for_ssa(&new_instr->def));

   nir_instr_remove(&instr->instr);
   ralloc_free(instr);

   return true;
}
Exemple #17
0
static bool
remove_phis_block(nir_block *block, nir_builder *b)
{
   bool progress = false;

   nir_foreach_instr_safe(instr, block) {
      if (instr->type != nir_instr_type_phi)
         break;

      nir_phi_instr *phi = nir_instr_as_phi(instr);

      nir_ssa_def *def = NULL;
      nir_alu_instr *mov = NULL;
      bool srcs_same = true;

      nir_foreach_phi_src(src, phi) {
         assert(src->src.is_ssa);

         /* For phi nodes at the beginning of loops, we may encounter some
          * sources from backedges that point back to the destination of the
          * same phi, i.e. something like:
          *
          * a = phi(a, b, ...)
          *
          * We can safely ignore these sources, since if all of the normal
          * sources point to the same definition, then that definition must
          * still dominate the phi node, and the phi will still always take
          * the value of that definition.
          */
         if (src->src.ssa == &phi->dest.ssa)
            continue;
         
         if (def == NULL) {
            def  = src->src.ssa;
            mov = get_parent_mov(def);
         } else {
            if (src->src.ssa != def && !matching_mov(mov, src->src.ssa)) {
               srcs_same = false;
               break;
            }
         }
      }

      if (!srcs_same)
         continue;

      /* We must have found at least one definition, since there must be at
       * least one forward edge.
       */
      assert(def != NULL);

      if (mov) {
         /* If the sources were all movs from the same source with the same
          * swizzle, then we can't just pick a random move because it may not
          * dominate the phi node. Instead, we need to emit our own move after
          * the phi which uses the shared source, and rewrite uses of the phi
          * to use the move instead. This is ok, because while the movs may
          * not all dominate the phi node, their shared source does.
          */

         b->cursor = nir_after_phis(block);
         def = nir_mov_alu(b, mov->src[0], def->num_components);
      }

      assert(phi->dest.is_ssa);
      nir_ssa_def_rewrite_uses(&phi->dest.ssa, nir_src_for_ssa(def));
      nir_instr_remove(instr);

      progress = true;
   }
static void
lower_instr(nir_intrinsic_instr *instr, nir_function_impl *impl)
{
   nir_intrinsic_op op;
   switch (instr->intrinsic) {
   case nir_intrinsic_atomic_counter_read_var:
      op = nir_intrinsic_atomic_counter_read;
      break;

   case nir_intrinsic_atomic_counter_inc_var:
      op = nir_intrinsic_atomic_counter_inc;
      break;

   case nir_intrinsic_atomic_counter_dec_var:
      op = nir_intrinsic_atomic_counter_dec;
      break;

   default:
      return;
   }

   if (instr->variables[0]->var->data.mode != nir_var_uniform)
      return; /* atomics passed as function arguments can't be lowered */

   void *mem_ctx = ralloc_parent(instr);

   nir_intrinsic_instr *new_instr = nir_intrinsic_instr_create(mem_ctx, op);
   new_instr->const_index[0] =
      (int) instr->variables[0]->var->data.atomic.buffer_index;

   nir_load_const_instr *offset_const = nir_load_const_instr_create(mem_ctx, 1);
   offset_const->value.u[0] = instr->variables[0]->var->data.atomic.offset;

   nir_instr_insert_before(&instr->instr, &offset_const->instr);

   nir_ssa_def *offset_def = &offset_const->def;

   if (instr->variables[0]->deref.child != NULL) {
      assert(instr->variables[0]->deref.child->deref_type ==
             nir_deref_type_array);
      nir_deref_array *deref_array =
         nir_deref_as_array(instr->variables[0]->deref.child);
      assert(deref_array->deref.child == NULL);

      offset_const->value.u[0] +=
         deref_array->base_offset * ATOMIC_COUNTER_SIZE;

      if (deref_array->deref_array_type == nir_deref_array_type_indirect) {
         nir_load_const_instr *atomic_counter_size =
               nir_load_const_instr_create(mem_ctx, 1);
         atomic_counter_size->value.u[0] = ATOMIC_COUNTER_SIZE;
         nir_instr_insert_before(&instr->instr, &atomic_counter_size->instr);

         nir_alu_instr *mul = nir_alu_instr_create(mem_ctx, nir_op_imul);
         nir_ssa_dest_init(&mul->instr, &mul->dest.dest, 1, NULL);
         mul->dest.write_mask = 0x1;
         nir_src_copy(&mul->src[0].src, &deref_array->indirect, mem_ctx);
         mul->src[1].src.is_ssa = true;
         mul->src[1].src.ssa = &atomic_counter_size->def;
         nir_instr_insert_before(&instr->instr, &mul->instr);

         nir_alu_instr *add = nir_alu_instr_create(mem_ctx, nir_op_iadd);
         nir_ssa_dest_init(&add->instr, &add->dest.dest, 1, NULL);
         add->dest.write_mask = 0x1;
         add->src[0].src.is_ssa = true;
         add->src[0].src.ssa = &mul->dest.dest.ssa;
         add->src[1].src.is_ssa = true;
         add->src[1].src.ssa = &offset_const->def;
         nir_instr_insert_before(&instr->instr, &add->instr);

         offset_def = &add->dest.dest.ssa;
      }
   }

   new_instr->src[0].is_ssa = true;
   new_instr->src[0].ssa = offset_def;;

   if (instr->dest.is_ssa) {
      nir_ssa_dest_init(&new_instr->instr, &new_instr->dest,
                        instr->dest.ssa.num_components, NULL);
      nir_ssa_def_rewrite_uses(&instr->dest.ssa,
                               nir_src_for_ssa(&new_instr->dest.ssa),
                               mem_ctx);
   } else {
      nir_dest_copy(&new_instr->dest, &instr->dest, mem_ctx);
   }

   nir_instr_insert_before(&instr->instr, &new_instr->instr);
   nir_instr_remove(&instr->instr);
}
Exemple #19
0
static bool
nir_lower_io_block(nir_block *block, void *void_state)
{
   struct lower_io_state *state = void_state;

   nir_foreach_instr_safe(block, instr) {
      if (instr->type != nir_instr_type_intrinsic)
         continue;

      nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);

      switch (intrin->intrinsic) {
      case nir_intrinsic_load_var: {
         nir_variable_mode mode = intrin->variables[0]->var->data.mode;
         if (mode != nir_var_shader_in && mode != nir_var_uniform)
            continue;

         bool has_indirect = deref_has_indirect(intrin->variables[0]);

         /* Figure out the opcode */
         nir_intrinsic_op load_op;
         switch (mode) {
         case nir_var_shader_in:
            load_op = has_indirect ? nir_intrinsic_load_input_indirect :
                                     nir_intrinsic_load_input;
            break;
         case nir_var_uniform:
            load_op = has_indirect ? nir_intrinsic_load_uniform_indirect :
                                     nir_intrinsic_load_uniform;
            break;
         default:
            unreachable("Unknown variable mode");
         }

         nir_intrinsic_instr *load = nir_intrinsic_instr_create(state->mem_ctx,
                                                                load_op);
         load->num_components = intrin->num_components;

         nir_src indirect;
         unsigned offset = get_io_offset(intrin->variables[0],
                                         &intrin->instr, &indirect, state);
         offset += intrin->variables[0]->var->data.driver_location;

         load->const_index[0] = offset;

         if (has_indirect)
            load->src[0] = indirect;

         if (intrin->dest.is_ssa) {
            nir_ssa_dest_init(&load->instr, &load->dest,
                              intrin->num_components, NULL);
            nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
                                     nir_src_for_ssa(&load->dest.ssa),
                                     state->mem_ctx);
         } else {
            nir_dest_copy(&load->dest, &intrin->dest, state->mem_ctx);
         }

         nir_instr_insert_before(&intrin->instr, &load->instr);
         nir_instr_remove(&intrin->instr);
         break;
      }

      case nir_intrinsic_store_var: {
         if (intrin->variables[0]->var->data.mode != nir_var_shader_out)
            continue;

         bool has_indirect = deref_has_indirect(intrin->variables[0]);

         nir_intrinsic_op store_op;
         if (has_indirect) {
            store_op = nir_intrinsic_store_output_indirect;
         } else {
            store_op = nir_intrinsic_store_output;
         }

         nir_intrinsic_instr *store = nir_intrinsic_instr_create(state->mem_ctx,
                                                                 store_op);
         store->num_components = intrin->num_components;

         nir_src indirect;
         unsigned offset = get_io_offset(intrin->variables[0],
                                         &intrin->instr, &indirect, state);
         offset += intrin->variables[0]->var->data.driver_location;

         store->const_index[0] = offset;

         nir_src_copy(&store->src[0], &intrin->src[0], state->mem_ctx);

         if (has_indirect)
            store->src[1] = indirect;

         nir_instr_insert_before(&intrin->instr, &store->instr);
         nir_instr_remove(&intrin->instr);
         break;
      }

      default:
         break;
      }
   }

   return true;
}