예제 #1
0
파일: vtn_glsl450.c 프로젝트: etnaviv/mesa
static nir_ssa_def *
build_frexp(nir_builder *b, nir_ssa_def *x, nir_ssa_def **exponent)
{
   nir_ssa_def *abs_x = nir_fabs(b, x);
   nir_ssa_def *zero = nir_imm_float(b, 0.0f);

   /* Single-precision floating-point values are stored as
    *   1 sign bit;
    *   8 exponent bits;
    *   23 mantissa bits.
    *
    * An exponent shift of 23 will shift the mantissa out, leaving only the
    * exponent and sign bit (which itself may be zero, if the absolute value
    * was taken before the bitcast and shift.
    */
   nir_ssa_def *exponent_shift = nir_imm_int(b, 23);
   nir_ssa_def *exponent_bias = nir_imm_int(b, -126);

   nir_ssa_def *sign_mantissa_mask = nir_imm_int(b, 0x807fffffu);

   /* Exponent of floating-point values in the range [0.5, 1.0). */
   nir_ssa_def *exponent_value = nir_imm_int(b, 0x3f000000u);

   nir_ssa_def *is_not_zero = nir_fne(b, abs_x, zero);

   *exponent =
      nir_iadd(b, nir_ushr(b, abs_x, exponent_shift),
                  nir_bcsel(b, is_not_zero, exponent_bias, zero));

   return nir_ior(b, nir_iand(b, x, sign_mantissa_mask),
                     nir_bcsel(b, is_not_zero, exponent_value, zero));
}
예제 #2
0
파일: vtn_variables.c 프로젝트: menpo/mesa
nir_ssa_def *
vtn_access_chain_to_offset(struct vtn_builder *b,
                           struct vtn_access_chain *chain,
                           nir_ssa_def **index_out, struct vtn_type **type_out,
                           unsigned *end_idx_out, bool stop_at_matrix)
{
   unsigned idx = 0;
   struct vtn_type *type;
   *index_out = get_vulkan_resource_index(b, chain, &type, &idx);

   nir_ssa_def *offset = nir_imm_int(&b->nb, 0);
   for (; idx < chain->length; idx++) {
      enum glsl_base_type base_type = glsl_get_base_type(type->type);
      switch (base_type) {
      case GLSL_TYPE_UINT:
      case GLSL_TYPE_INT:
      case GLSL_TYPE_FLOAT:
      case GLSL_TYPE_DOUBLE:
      case GLSL_TYPE_BOOL:
         /* Some users may not want matrix or vector derefs */
         if (stop_at_matrix)
            goto end;
         /* Fall through */

      case GLSL_TYPE_ARRAY:
         offset = nir_iadd(&b->nb, offset,
                           vtn_access_link_as_ssa(b, chain->link[idx],
                                                  type->stride));

         type = type->array_element;
         break;

      case GLSL_TYPE_STRUCT: {
         assert(chain->link[idx].mode == vtn_access_mode_literal);
         unsigned member = chain->link[idx].id;
         offset = nir_iadd(&b->nb, offset,
                           nir_imm_int(&b->nb, type->offsets[member]));
         type = type->members[member];
         break;
      }

      default:
         unreachable("Invalid type for deref");
      }
   }

end:
   *type_out = type;
   if (end_idx_out)
      *end_idx_out = idx;

   return offset;
}
예제 #3
0
파일: vtn_variables.c 프로젝트: menpo/mesa
static nir_ssa_def *
vtn_access_link_as_ssa(struct vtn_builder *b, struct vtn_access_link link,
                       unsigned stride)
{
   assert(stride > 0);
   if (link.mode == vtn_access_mode_literal) {
      return nir_imm_int(&b->nb, link.id * stride);
   } else if (stride == 1) {
      return vtn_ssa_value(b, link.id)->def;
   } else {
      return nir_imul(&b->nb, vtn_ssa_value(b, link.id)->def,
                              nir_imm_int(&b->nb, stride));
   }
}
static nir_src
get_deref_reg_src(nir_deref_instr *deref, struct locals_to_regs_state *state)
{
   nir_builder *b = &state->builder;

   nir_src src;

   src.is_ssa = false;
   src.reg.reg = get_reg_for_deref(deref, state);
   src.reg.base_offset = 0;
   src.reg.indirect = NULL;

   /* It is possible for a user to create a shader that has an array with a
    * single element and then proceed to access it indirectly.  Indirectly
    * accessing a non-array register is not allowed in NIR.  In order to
    * handle this case we just convert it to a direct reference.
    */
   if (src.reg.reg->num_array_elems == 0)
      return src;

   unsigned inner_array_size = 1;
   for (const nir_deref_instr *d = deref; d; d = nir_deref_instr_parent(d)) {
      if (d->deref_type != nir_deref_type_array)
         continue;

      if (nir_src_is_const(d->arr.index) && !src.reg.indirect) {
         src.reg.base_offset += nir_src_as_uint(d->arr.index) *
                                inner_array_size;
      } else {
         if (src.reg.indirect) {
            assert(src.reg.base_offset == 0);
         } else {
            src.reg.indirect = ralloc(b->shader, nir_src);
            *src.reg.indirect =
               nir_src_for_ssa(nir_imm_int(b, src.reg.base_offset));
            src.reg.base_offset = 0;
         }

         assert(src.reg.indirect->is_ssa);
         nir_ssa_def *index = nir_i2i(b, nir_ssa_for_src(b, d->arr.index, 1), 32);
         src.reg.indirect->ssa =
            nir_iadd(b, src.reg.indirect->ssa,
                        nir_imul(b, index, nir_imm_int(b, inner_array_size)));
      }

      inner_array_size *= glsl_get_length(nir_deref_instr_parent(d)->type);
   }

   return src;
}
예제 #5
0
static nir_ssa_def *
sample_plane(nir_builder *b, nir_tex_instr *tex, int plane)
{
   assert(tex->dest.is_ssa);
   assert(nir_tex_instr_dest_size(tex) == 4);
   assert(nir_alu_type_get_base_type(tex->dest_type) == nir_type_float);
   assert(tex->op == nir_texop_tex);
   assert(tex->coord_components == 2);

   nir_tex_instr *plane_tex =
      nir_tex_instr_create(b->shader, tex->num_srcs + 1);
   for (unsigned i = 0; i < tex->num_srcs; i++) {
      nir_src_copy(&plane_tex->src[i].src, &tex->src[i].src, plane_tex);
      plane_tex->src[i].src_type = tex->src[i].src_type;
   }
   plane_tex->src[tex->num_srcs].src = nir_src_for_ssa(nir_imm_int(b, plane));
   plane_tex->src[tex->num_srcs].src_type = nir_tex_src_plane;
   plane_tex->op = nir_texop_tex;
   plane_tex->sampler_dim = GLSL_SAMPLER_DIM_2D;
   plane_tex->dest_type = nir_type_float;
   plane_tex->coord_components = 2;

   plane_tex->texture_index = tex->texture_index;
   plane_tex->sampler_index = tex->sampler_index;

   nir_ssa_dest_init(&plane_tex->instr, &plane_tex->dest, 4, 32, NULL);

   nir_builder_instr_insert(b, &plane_tex->instr);

   return &plane_tex->dest.ssa;
}
예제 #6
0
static nir_ssa_def *
get_texture_size(nir_builder *b, nir_tex_instr *tex)
{
   b->cursor = nir_before_instr(&tex->instr);

   /* RECT textures should not be array: */
   assert(!tex->is_array);

   nir_tex_instr *txs;

   txs = nir_tex_instr_create(b->shader, 1);
   txs->op = nir_texop_txs;
   txs->sampler_dim = GLSL_SAMPLER_DIM_RECT;
   txs->texture_index = tex->texture_index;
   txs->dest_type = nir_type_int;

   /* only single src, the lod: */
   txs->src[0].src = nir_src_for_ssa(nir_imm_int(b, 0));
   txs->src[0].src_type = nir_tex_src_lod;

   nir_ssa_dest_init(&txs->instr, &txs->dest, 2, 32, NULL);
   nir_builder_instr_insert(b, &txs->instr);

   return nir_i2f(b, &txs->dest.ssa);
}
예제 #7
0
static nir_ssa_def *
get_texture_size(nir_builder *b, nir_tex_instr *tex)
{
   b->cursor = nir_before_instr(&tex->instr);

   nir_tex_instr *txs;

   txs = nir_tex_instr_create(b->shader, 1);
   txs->op = nir_texop_txs;
   txs->sampler_dim = tex->sampler_dim;
   txs->is_array = tex->is_array;
   txs->is_shadow = tex->is_shadow;
   txs->is_new_style_shadow = tex->is_new_style_shadow;
   txs->texture_index = tex->texture_index;
   txs->texture = nir_deref_var_clone(tex->texture, txs);
   txs->sampler_index = tex->sampler_index;
   txs->sampler = nir_deref_var_clone(tex->sampler, txs);
   txs->dest_type = nir_type_int;

   /* only single src, the lod: */
   txs->src[0].src = nir_src_for_ssa(nir_imm_int(b, 0));
   txs->src[0].src_type = nir_tex_src_lod;

   nir_ssa_dest_init(&txs->instr, &txs->dest, tex->coord_components, 32, NULL);
   nir_builder_instr_insert(b, &txs->instr);

   return nir_i2f(b, &txs->dest.ssa);
}
예제 #8
0
static nir_ssa_def *
sample_plane(nir_builder *b, nir_tex_instr *tex, int plane)
{
   assert(tex->dest.is_ssa);
   assert(nir_tex_instr_dest_size(tex) == 4);
   assert(nir_alu_type_get_base_type(tex->dest_type) == nir_type_float);
   assert(tex->op == nir_texop_tex);
   assert(tex->coord_components == 2);

   nir_tex_instr *plane_tex = nir_tex_instr_create(b->shader, 2);
   nir_src_copy(&plane_tex->src[0].src, &tex->src[0].src, plane_tex);
   plane_tex->src[0].src_type = nir_tex_src_coord;
   plane_tex->src[1].src = nir_src_for_ssa(nir_imm_int(b, plane));
   plane_tex->src[1].src_type = nir_tex_src_plane;
   plane_tex->op = nir_texop_tex;
   plane_tex->sampler_dim = 2;
   plane_tex->dest_type = nir_type_float;
   plane_tex->coord_components = 2;

   plane_tex->texture_index = tex->texture_index;
   plane_tex->texture = (nir_deref_var *)
      nir_copy_deref(plane_tex, &tex->texture->deref);
   plane_tex->sampler_index = tex->sampler_index;
   plane_tex->sampler = (nir_deref_var *)
      nir_copy_deref(plane_tex, &tex->sampler->deref);

   nir_ssa_dest_init(&plane_tex->instr, &plane_tex->dest, 4, 32, NULL);

   nir_builder_instr_insert(b, &plane_tex->instr);

   return &plane_tex->dest.ssa;
}
예제 #9
0
파일: vtn_variables.c 프로젝트: menpo/mesa
static nir_ssa_def *
get_vulkan_resource_index(struct vtn_builder *b, struct vtn_access_chain *chain,
                          struct vtn_type **type, unsigned *chain_idx)
{
   /* Push constants have no explicit binding */
   if (chain->var->mode == vtn_variable_mode_push_constant) {
      *chain_idx = 0;
      *type = chain->var->type;
      return NULL;
   }

   nir_ssa_def *array_index;
   if (glsl_type_is_array(chain->var->type->type)) {
      assert(chain->length > 0);
      array_index = vtn_access_link_as_ssa(b, chain->link[0], 1);
      *chain_idx = 1;
      *type = chain->var->type->array_element;
   } else {
      array_index = nir_imm_int(&b->nb, 0);
      *chain_idx = 0;
      *type = chain->var->type;
   }

   nir_intrinsic_instr *instr =
      nir_intrinsic_instr_create(b->nb.shader,
                                 nir_intrinsic_vulkan_resource_index);
   instr->src[0] = nir_src_for_ssa(array_index);
   nir_intrinsic_set_desc_set(instr, chain->var->descriptor_set);
   nir_intrinsic_set_binding(instr, chain->var->binding);

   nir_ssa_dest_init(&instr->instr, &instr->dest, 1, 32, NULL);
   nir_builder_instr_insert(&b->nb, &instr->instr);

   return &instr->dest.ssa;
}
static void
build_color_shaders(struct nir_shader **out_vs,
                    struct nir_shader **out_fs,
                    uint32_t frag_output)
{
	nir_builder vs_b;
	nir_builder fs_b;

	nir_builder_init_simple_shader(&vs_b, NULL, MESA_SHADER_VERTEX, NULL);
	nir_builder_init_simple_shader(&fs_b, NULL, MESA_SHADER_FRAGMENT, NULL);

	vs_b.shader->info.name = ralloc_strdup(vs_b.shader, "meta_clear_color_vs");
	fs_b.shader->info.name = ralloc_strdup(fs_b.shader, "meta_clear_color_fs");

	const struct glsl_type *position_type = glsl_vec4_type();
	const struct glsl_type *color_type = glsl_vec4_type();

	nir_variable *vs_out_pos =
		nir_variable_create(vs_b.shader, nir_var_shader_out, position_type,
				    "gl_Position");
	vs_out_pos->data.location = VARYING_SLOT_POS;

	nir_intrinsic_instr *in_color_load = nir_intrinsic_instr_create(fs_b.shader, nir_intrinsic_load_push_constant);
	nir_intrinsic_set_base(in_color_load, 0);
	nir_intrinsic_set_range(in_color_load, 16);
	in_color_load->src[0] = nir_src_for_ssa(nir_imm_int(&fs_b, 0));
	in_color_load->num_components = 4;
	nir_ssa_dest_init(&in_color_load->instr, &in_color_load->dest, 4, 32, "clear color");
	nir_builder_instr_insert(&fs_b, &in_color_load->instr);

	nir_variable *fs_out_color =
		nir_variable_create(fs_b.shader, nir_var_shader_out, color_type,
				    "f_color");
	fs_out_color->data.location = FRAG_RESULT_DATA0 + frag_output;

	nir_store_var(&fs_b, fs_out_color, &in_color_load->dest.ssa, 0xf);

	nir_ssa_def *outvec = radv_meta_gen_rect_vertices(&vs_b);
	nir_store_var(&vs_b, vs_out_pos, outvec, 0xf);

	const struct glsl_type *layer_type = glsl_int_type();
	nir_variable *vs_out_layer =
		nir_variable_create(vs_b.shader, nir_var_shader_out, layer_type,
				    "v_layer");
	vs_out_layer->data.location = VARYING_SLOT_LAYER;
	vs_out_layer->data.interpolation = INTERP_MODE_FLAT;
	nir_ssa_def *inst_id = nir_load_system_value(&vs_b, nir_intrinsic_load_instance_id, 0);
	nir_ssa_def *base_instance = nir_load_system_value(&vs_b, nir_intrinsic_load_base_instance, 0);

	nir_ssa_def *layer_id = nir_iadd(&vs_b, inst_id, base_instance);
	nir_store_var(&vs_b, vs_out_layer, layer_id, 0x1);

	*out_vs = vs_b.shader;
	*out_fs = fs_b.shader;
}
예제 #11
0
static nir_ssa_def *
get_texture_size(nir_builder *b, nir_tex_instr *tex)
{
   b->cursor = nir_before_instr(&tex->instr);

   nir_tex_instr *txs;

   unsigned num_srcs = 1; /* One for the LOD */
   for (unsigned i = 0; i < tex->num_srcs; i++) {
      if (tex->src[i].src_type == nir_tex_src_texture_deref ||
          tex->src[i].src_type == nir_tex_src_sampler_deref ||
          tex->src[i].src_type == nir_tex_src_texture_offset ||
          tex->src[i].src_type == nir_tex_src_sampler_offset ||
          tex->src[i].src_type == nir_tex_src_texture_handle ||
          tex->src[i].src_type == nir_tex_src_sampler_handle)
         num_srcs++;
   }

   txs = nir_tex_instr_create(b->shader, num_srcs);
   txs->op = nir_texop_txs;
   txs->sampler_dim = tex->sampler_dim;
   txs->is_array = tex->is_array;
   txs->is_shadow = tex->is_shadow;
   txs->is_new_style_shadow = tex->is_new_style_shadow;
   txs->texture_index = tex->texture_index;
   txs->sampler_index = tex->sampler_index;
   txs->dest_type = nir_type_int;

   unsigned idx = 0;
   for (unsigned i = 0; i < tex->num_srcs; i++) {
      if (tex->src[i].src_type == nir_tex_src_texture_deref ||
          tex->src[i].src_type == nir_tex_src_sampler_deref ||
          tex->src[i].src_type == nir_tex_src_texture_offset ||
          tex->src[i].src_type == nir_tex_src_sampler_offset ||
          tex->src[i].src_type == nir_tex_src_texture_handle ||
          tex->src[i].src_type == nir_tex_src_sampler_handle) {
         nir_src_copy(&txs->src[idx].src, &tex->src[i].src, txs);
         txs->src[idx].src_type = tex->src[i].src_type;
         idx++;
      }
   }
   /* Add in an LOD because some back-ends require it */
   txs->src[idx].src = nir_src_for_ssa(nir_imm_int(b, 0));
   txs->src[idx].src_type = nir_tex_src_lod;

   nir_ssa_dest_init(&txs->instr, &txs->dest,
                     nir_tex_instr_dest_size(txs), 32, NULL);
   nir_builder_instr_insert(b, &txs->instr);

   return nir_i2f32(b, &txs->dest.ssa);
}
예제 #12
0
/* Calculate the sampler index based on array indicies and also
 * calculate the base uniform location for struct members.
 */
static void
calc_sampler_offsets(nir_deref *tail, nir_tex_instr *instr,
                     unsigned *array_elements, nir_ssa_def **indirect,
                     nir_builder *b, unsigned *location)
{
   if (tail->child == NULL)
      return;

   switch (tail->child->deref_type) {
   case nir_deref_type_array: {
      nir_deref_array *deref_array = nir_deref_as_array(tail->child);

      assert(deref_array->deref_array_type != nir_deref_array_type_wildcard);

      calc_sampler_offsets(tail->child, instr, array_elements,
                           indirect, b, location);
      instr->sampler_index += deref_array->base_offset * *array_elements;

      if (deref_array->deref_array_type == nir_deref_array_type_indirect) {
         nir_ssa_def *mul =
            nir_imul(b, nir_imm_int(b, *array_elements),
                     nir_ssa_for_src(b, deref_array->indirect, 1));

         nir_instr_rewrite_src(&instr->instr, &deref_array->indirect,
                               NIR_SRC_INIT);

         if (*indirect) {
            *indirect = nir_iadd(b, *indirect, mul);
         } else {
            *indirect = mul;
         }
      }

      *array_elements *= glsl_get_length(tail->type);
       break;
   }

   case nir_deref_type_struct: {
      nir_deref_struct *deref_struct = nir_deref_as_struct(tail->child);
      *location += glsl_get_record_location_offset(tail->type, deref_struct->index);
      calc_sampler_offsets(tail->child, instr, array_elements,
                           indirect, b, location);
      break;
   }

   default:
      unreachable("Invalid deref type");
      break;
   }
}
static nir_shader *
build_resolve_fragment_shader(struct radv_device *dev, bool is_integer, int samples)
{
	nir_builder b;
	char name[64];
	const struct glsl_type *vec2 = glsl_vector_type(GLSL_TYPE_FLOAT, 2);
	const struct glsl_type *vec4 = glsl_vec4_type();
	const struct glsl_type *sampler_type = glsl_sampler_type(GLSL_SAMPLER_DIM_MS,
								 false,
								 false,
								 GLSL_TYPE_FLOAT);

	snprintf(name, 64, "meta_resolve_fs-%d-%s", samples, is_integer ? "int" : "float");
	nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_FRAGMENT, NULL);
	b.shader->info.name = ralloc_strdup(b.shader, name);

	nir_variable *input_img = nir_variable_create(b.shader, nir_var_uniform,
						      sampler_type, "s_tex");
	input_img->data.descriptor_set = 0;
	input_img->data.binding = 0;

	nir_variable *fs_pos_in = nir_variable_create(b.shader, nir_var_shader_in, vec2, "fs_pos_in");
	fs_pos_in->data.location = VARYING_SLOT_POS;

	nir_variable *color_out = nir_variable_create(b.shader, nir_var_shader_out,
						      vec4, "f_color");
	color_out->data.location = FRAG_RESULT_DATA0;

	nir_ssa_def *pos_in = nir_load_var(&b, fs_pos_in);
	nir_intrinsic_instr *src_offset = nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_push_constant);
	nir_intrinsic_set_base(src_offset, 0);
	nir_intrinsic_set_range(src_offset, 8);
	src_offset->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
	src_offset->num_components = 2;
	nir_ssa_dest_init(&src_offset->instr, &src_offset->dest, 2, 32, "src_offset");
	nir_builder_instr_insert(&b, &src_offset->instr);

	nir_ssa_def *pos_int = nir_f2i32(&b, pos_in);

	nir_ssa_def *img_coord = nir_channels(&b, nir_iadd(&b, pos_int, &src_offset->dest.ssa), 0x3);
	nir_variable *color = nir_local_variable_create(b.impl, glsl_vec4_type(), "color");

	radv_meta_build_resolve_shader_core(&b, is_integer, samples, input_img,
	                                    color, img_coord);

	nir_ssa_def *outval = nir_load_var(&b, color);
	nir_store_var(&b, color_out, outval, 0xf);
	return b.shader;
}
예제 #14
0
파일: brw_nir.c 프로젝트: etnaviv/mesa
   nir_foreach_instr_safe(instr, block) {
      if (instr->type != nir_instr_type_intrinsic)
         continue;

      nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);

      if ((mode == nir_var_shader_in && is_input(intrin)) ||
          (mode == nir_var_shader_out && is_output(intrin))) {
         nir_src *offset = nir_get_io_offset_src(intrin);
         nir_const_value *const_offset = nir_src_as_const_value(*offset);

         if (const_offset) {
            intrin->const_index[0] += const_offset->u32[0];
            b->cursor = nir_before_instr(&intrin->instr);
            nir_instr_rewrite_src(&intrin->instr, offset,
                                  nir_src_for_ssa(nir_imm_int(b, 0)));
         }
      }
   }
예제 #15
0
파일: vtn_variables.c 프로젝트: menpo/mesa
static void
_vtn_load_store_tail(struct vtn_builder *b, nir_intrinsic_op op, bool load,
                     nir_ssa_def *index, nir_ssa_def *offset,
                     struct vtn_ssa_value **inout, const struct glsl_type *type)
{
   nir_intrinsic_instr *instr = nir_intrinsic_instr_create(b->nb.shader, op);
   instr->num_components = glsl_get_vector_elements(type);

   int src = 0;
   if (!load) {
      nir_intrinsic_set_write_mask(instr, (1 << instr->num_components) - 1);
      instr->src[src++] = nir_src_for_ssa((*inout)->def);
   }

   /* We set the base and size for push constant load to the entire push
    * constant block for now.
    */
   if (op == nir_intrinsic_load_push_constant) {
      nir_intrinsic_set_base(instr, 0);
      nir_intrinsic_set_range(instr, 128);
   }

   if (index)
      instr->src[src++] = nir_src_for_ssa(index);

   instr->src[src++] = nir_src_for_ssa(offset);

   if (load) {
      nir_ssa_dest_init(&instr->instr, &instr->dest,
                        instr->num_components,
                        glsl_get_bit_size(glsl_get_base_type(type)), NULL);
      (*inout)->def = &instr->dest.ssa;
   }

   nir_builder_instr_insert(&b->nb, &instr->instr);

   if (load && glsl_get_base_type(type) == GLSL_TYPE_BOOL)
      (*inout)->def = nir_ine(&b->nb, (*inout)->def, nir_imm_int(&b->nb, 0));
}
예제 #16
0
static nir_shader *
build_buffer_fill_shader(struct radv_device *dev)
{
	nir_builder b;

	nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_COMPUTE, NULL);
	b.shader->info->name = ralloc_strdup(b.shader, "meta_buffer_fill");
	b.shader->info->cs.local_size[0] = 64;
	b.shader->info->cs.local_size[1] = 1;
	b.shader->info->cs.local_size[2] = 1;

	nir_ssa_def *invoc_id = nir_load_system_value(&b, nir_intrinsic_load_local_invocation_id, 0);
	nir_ssa_def *wg_id = nir_load_system_value(&b, nir_intrinsic_load_work_group_id, 0);
	nir_ssa_def *block_size = nir_imm_ivec4(&b,
						b.shader->info->cs.local_size[0],
						b.shader->info->cs.local_size[1],
						b.shader->info->cs.local_size[2], 0);

	nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);

	nir_ssa_def *offset = nir_imul(&b, global_id, nir_imm_int(&b, 16));
	offset = nir_swizzle(&b, offset, (unsigned[]) {0, 0, 0, 0}, 1, false);
static void
emit_indirect_load_store(nir_builder *b, nir_intrinsic_instr *orig_instr,
                         nir_deref_var *deref, nir_deref *arr_parent,
                         int start, int end,
                         nir_ssa_def **dest, nir_ssa_def *src)
{
   assert(arr_parent->child &&
          arr_parent->child->deref_type == nir_deref_type_array);
   nir_deref_array *arr = nir_deref_as_array(arr_parent->child);
   assert(arr->deref_array_type == nir_deref_array_type_indirect);
   assert(arr->indirect.is_ssa);

   assert(start < end);
   if (start == end - 1) {
      /* Base case.  Just emit the load/store op */
      nir_deref_array direct = *arr;
      direct.deref_array_type = nir_deref_array_type_direct;
      direct.base_offset += start;
      direct.indirect = NIR_SRC_INIT;

      arr_parent->child = &direct.deref;
      emit_load_store(b, orig_instr, deref, &arr->deref, dest, src);
      arr_parent->child = &arr->deref;
   } else {
      int mid = start + (end - start) / 2;

      nir_ssa_def *then_dest, *else_dest;

      nir_if *if_stmt = nir_if_create(b->shader);
      if_stmt->condition = nir_src_for_ssa(nir_ilt(b, arr->indirect.ssa,
                                                      nir_imm_int(b, mid)));
      nir_cf_node_insert(b->cursor, &if_stmt->cf_node);

      b->cursor = nir_after_cf_list(&if_stmt->then_list);
      emit_indirect_load_store(b, orig_instr, deref, arr_parent,
                               start, mid, &then_dest, src);

      b->cursor = nir_after_cf_list(&if_stmt->else_list);
      emit_indirect_load_store(b, orig_instr, deref, arr_parent,
                               mid, end, &else_dest, src);

      b->cursor = nir_after_cf_node(&if_stmt->cf_node);

      if (src == NULL) {
         /* We're a load.  We need to insert a phi node */
         nir_phi_instr *phi = nir_phi_instr_create(b->shader);
         unsigned bit_size = then_dest->bit_size;
         nir_ssa_dest_init(&phi->instr, &phi->dest,
                           then_dest->num_components, bit_size, NULL);

         nir_phi_src *src0 = ralloc(phi, nir_phi_src);
         src0->pred = nir_cf_node_as_block(nir_if_last_then_node(if_stmt));
         src0->src = nir_src_for_ssa(then_dest);
         exec_list_push_tail(&phi->srcs, &src0->node);

         nir_phi_src *src1 = ralloc(phi, nir_phi_src);
         src1->pred = nir_cf_node_as_block(nir_if_last_else_node(if_stmt));
         src1->src = nir_src_for_ssa(else_dest);
         exec_list_push_tail(&phi->srcs, &src1->node);

         nir_builder_instr_insert(b, &phi->instr);
         *dest = &phi->dest.ssa;
      }
   }
}
예제 #18
0
static void
convert_instr(nir_builder *bld, nir_alu_instr *alu)
{
   nir_ssa_def *numer, *denom, *af, *bf, *a, *b, *q, *r;
   nir_op op = alu->op;
   bool is_signed;

   if ((op != nir_op_idiv) &&
       (op != nir_op_udiv) &&
       (op != nir_op_umod))
      return;

   is_signed = (op == nir_op_idiv);

   bld->cursor = nir_before_instr(&alu->instr);

   numer = nir_ssa_for_alu_src(bld, alu, 0);
   denom = nir_ssa_for_alu_src(bld, alu, 1);

   if (is_signed) {
      af = nir_i2f(bld, numer);
      bf = nir_i2f(bld, denom);
      af = nir_fabs(bld, af);
      bf = nir_fabs(bld, bf);
      a  = nir_iabs(bld, numer);
      b  = nir_iabs(bld, denom);
   } else {
      af = nir_u2f(bld, numer);
      bf = nir_u2f(bld, denom);
      a  = numer;
      b  = denom;
   }

   /* get first result: */
   bf = nir_frcp(bld, bf);
   bf = nir_isub(bld, bf, nir_imm_int(bld, 2));  /* yes, really */
   q  = nir_fmul(bld, af, bf);

   if (is_signed) {
      q = nir_f2i(bld, q);
   } else {
      q = nir_f2u(bld, q);
   }

   /* get error of first result: */
   r = nir_imul(bld, q, b);
   r = nir_isub(bld, a, r);
   r = nir_u2f(bld, r);
   r = nir_fmul(bld, r, bf);
   r = nir_f2u(bld, r);

   /* add quotients: */
   q = nir_iadd(bld, q, r);

   /* correction: if modulus >= divisor, add 1 */
   r = nir_imul(bld, q, b);
   r = nir_isub(bld, a, r);

   r = nir_uge(bld, r, b);
   r = nir_b2i(bld, r);

   q = nir_iadd(bld, q, r);
   if (is_signed)  {
      /* fix the sign: */
      r = nir_ixor(bld, numer, denom);
      r = nir_ushr(bld, r, nir_imm_int(bld, 31));
      r = nir_i2b(bld, r);
      b = nir_ineg(bld, q);
      q = nir_bcsel(bld, r, b, q);
   }

   if (op == nir_op_umod) {
      /* division result in q */
      r = nir_imul(bld, q, b);
      q = nir_isub(bld, a, r);
   }

   assert(alu->dest.dest.is_ssa);
   nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(q));
}
예제 #19
0
void
vtn_handle_alu(struct vtn_builder *b, SpvOp opcode,
               const uint32_t *w, unsigned count)
{
   struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
   const struct glsl_type *type =
      vtn_value(b, w[1], vtn_value_type_type)->type->type;

   vtn_foreach_decoration(b, val, handle_no_contraction, NULL);

   /* Collect the various SSA sources */
   const unsigned num_inputs = count - 3;
   struct vtn_ssa_value *vtn_src[4] = { NULL, };
   for (unsigned i = 0; i < num_inputs; i++)
      vtn_src[i] = vtn_ssa_value(b, w[i + 3]);

   if (glsl_type_is_matrix(vtn_src[0]->type) ||
       (num_inputs >= 2 && glsl_type_is_matrix(vtn_src[1]->type))) {
      vtn_handle_matrix_alu(b, opcode, val, vtn_src[0], vtn_src[1]);
      b->nb.exact = false;
      return;
   }

   val->ssa = vtn_create_ssa_value(b, type);
   nir_ssa_def *src[4] = { NULL, };
   for (unsigned i = 0; i < num_inputs; i++) {
      assert(glsl_type_is_vector_or_scalar(vtn_src[i]->type));
      src[i] = vtn_src[i]->def;
   }

   switch (opcode) {
   case SpvOpAny:
      if (src[0]->num_components == 1) {
         val->ssa->def = nir_imov(&b->nb, src[0]);
      } else {
         nir_op op;
         switch (src[0]->num_components) {
         case 2:  op = nir_op_bany_inequal2; break;
         case 3:  op = nir_op_bany_inequal3; break;
         case 4:  op = nir_op_bany_inequal4; break;
         default: unreachable("invalid number of components");
         }
         val->ssa->def = nir_build_alu(&b->nb, op, src[0],
                                       nir_imm_int(&b->nb, NIR_FALSE),
                                       NULL, NULL);
      }
      break;

   case SpvOpAll:
      if (src[0]->num_components == 1) {
         val->ssa->def = nir_imov(&b->nb, src[0]);
      } else {
         nir_op op;
         switch (src[0]->num_components) {
         case 2:  op = nir_op_ball_iequal2;  break;
         case 3:  op = nir_op_ball_iequal3;  break;
         case 4:  op = nir_op_ball_iequal4;  break;
         default: unreachable("invalid number of components");
         }
         val->ssa->def = nir_build_alu(&b->nb, op, src[0],
                                       nir_imm_int(&b->nb, NIR_TRUE),
                                       NULL, NULL);
      }
      break;

   case SpvOpOuterProduct: {
      for (unsigned i = 0; i < src[1]->num_components; i++) {
         val->ssa->elems[i]->def =
            nir_fmul(&b->nb, src[0], nir_channel(&b->nb, src[1], i));
      }
      break;
   }

   case SpvOpDot:
      val->ssa->def = nir_fdot(&b->nb, src[0], src[1]);
      break;

   case SpvOpIAddCarry:
      assert(glsl_type_is_struct(val->ssa->type));
      val->ssa->elems[0]->def = nir_iadd(&b->nb, src[0], src[1]);
      val->ssa->elems[1]->def = nir_uadd_carry(&b->nb, src[0], src[1]);
      break;

   case SpvOpISubBorrow:
      assert(glsl_type_is_struct(val->ssa->type));
      val->ssa->elems[0]->def = nir_isub(&b->nb, src[0], src[1]);
      val->ssa->elems[1]->def = nir_usub_borrow(&b->nb, src[0], src[1]);
      break;

   case SpvOpUMulExtended:
      assert(glsl_type_is_struct(val->ssa->type));
      val->ssa->elems[0]->def = nir_imul(&b->nb, src[0], src[1]);
      val->ssa->elems[1]->def = nir_umul_high(&b->nb, src[0], src[1]);
      break;

   case SpvOpSMulExtended:
      assert(glsl_type_is_struct(val->ssa->type));
      val->ssa->elems[0]->def = nir_imul(&b->nb, src[0], src[1]);
      val->ssa->elems[1]->def = nir_imul_high(&b->nb, src[0], src[1]);
      break;

   case SpvOpFwidth:
      val->ssa->def = nir_fadd(&b->nb,
                               nir_fabs(&b->nb, nir_fddx(&b->nb, src[0])),
                               nir_fabs(&b->nb, nir_fddy(&b->nb, src[0])));
      break;
   case SpvOpFwidthFine:
      val->ssa->def = nir_fadd(&b->nb,
                               nir_fabs(&b->nb, nir_fddx_fine(&b->nb, src[0])),
                               nir_fabs(&b->nb, nir_fddy_fine(&b->nb, src[0])));
      break;
   case SpvOpFwidthCoarse:
      val->ssa->def = nir_fadd(&b->nb,
                               nir_fabs(&b->nb, nir_fddx_coarse(&b->nb, src[0])),
                               nir_fabs(&b->nb, nir_fddy_coarse(&b->nb, src[0])));
      break;

   case SpvOpVectorTimesScalar:
      /* The builder will take care of splatting for us. */
      val->ssa->def = nir_fmul(&b->nb, src[0], src[1]);
      break;

   case SpvOpIsNan:
      val->ssa->def = nir_fne(&b->nb, src[0], src[0]);
      break;

   case SpvOpIsInf:
      val->ssa->def = nir_feq(&b->nb, nir_fabs(&b->nb, src[0]),
                                      nir_imm_float(&b->nb, INFINITY));
      break;

   case SpvOpFUnordEqual:
   case SpvOpFUnordNotEqual:
   case SpvOpFUnordLessThan:
   case SpvOpFUnordGreaterThan:
   case SpvOpFUnordLessThanEqual:
   case SpvOpFUnordGreaterThanEqual: {
      bool swap;
      nir_alu_type src_alu_type = nir_get_nir_type_for_glsl_type(vtn_src[0]->type);
      nir_alu_type dst_alu_type = nir_get_nir_type_for_glsl_type(type);
      nir_op op = vtn_nir_alu_op_for_spirv_opcode(opcode, &swap, src_alu_type, dst_alu_type);

      if (swap) {
         nir_ssa_def *tmp = src[0];
         src[0] = src[1];
         src[1] = tmp;
      }

      val->ssa->def =
         nir_ior(&b->nb,
                 nir_build_alu(&b->nb, op, src[0], src[1], NULL, NULL),
                 nir_ior(&b->nb,
                         nir_fne(&b->nb, src[0], src[0]),
                         nir_fne(&b->nb, src[1], src[1])));
      break;
   }

   case SpvOpFOrdEqual:
   case SpvOpFOrdNotEqual:
   case SpvOpFOrdLessThan:
   case SpvOpFOrdGreaterThan:
   case SpvOpFOrdLessThanEqual:
   case SpvOpFOrdGreaterThanEqual: {
      bool swap;
      nir_alu_type src_alu_type = nir_get_nir_type_for_glsl_type(vtn_src[0]->type);
      nir_alu_type dst_alu_type = nir_get_nir_type_for_glsl_type(type);
      nir_op op = vtn_nir_alu_op_for_spirv_opcode(opcode, &swap, src_alu_type, dst_alu_type);

      if (swap) {
         nir_ssa_def *tmp = src[0];
         src[0] = src[1];
         src[1] = tmp;
      }

      val->ssa->def =
         nir_iand(&b->nb,
                  nir_build_alu(&b->nb, op, src[0], src[1], NULL, NULL),
                  nir_iand(&b->nb,
                          nir_feq(&b->nb, src[0], src[0]),
                          nir_feq(&b->nb, src[1], src[1])));
      break;
   }

   default: {
      bool swap;
      nir_alu_type src_alu_type = nir_get_nir_type_for_glsl_type(vtn_src[0]->type);
      nir_alu_type dst_alu_type = nir_get_nir_type_for_glsl_type(type);
      nir_op op = vtn_nir_alu_op_for_spirv_opcode(opcode, &swap, src_alu_type, dst_alu_type);

      if (swap) {
         nir_ssa_def *tmp = src[0];
         src[0] = src[1];
         src[1] = tmp;
      }

      val->ssa->def = nir_build_alu(&b->nb, op, src[0], src[1], src[2], src[3]);
      break;
   } /* default */
   }

   b->nb.exact = false;
}
예제 #20
0
파일: vtn_variables.c 프로젝트: menpo/mesa
static void
_vtn_block_load_store(struct vtn_builder *b, nir_intrinsic_op op, bool load,
                      nir_ssa_def *index, nir_ssa_def *offset,
                      struct vtn_access_chain *chain, unsigned chain_idx,
                      struct vtn_type *type, struct vtn_ssa_value **inout)
{
   if (chain && chain_idx >= chain->length)
      chain = NULL;

   if (load && chain == NULL && *inout == NULL)
      *inout = vtn_create_ssa_value(b, type->type);

   enum glsl_base_type base_type = glsl_get_base_type(type->type);
   switch (base_type) {
   case GLSL_TYPE_UINT:
   case GLSL_TYPE_INT:
   case GLSL_TYPE_FLOAT:
   case GLSL_TYPE_BOOL:
      /* This is where things get interesting.  At this point, we've hit
       * a vector, a scalar, or a matrix.
       */
      if (glsl_type_is_matrix(type->type)) {
         if (chain == NULL) {
            /* Loading the whole matrix */
            struct vtn_ssa_value *transpose;
            unsigned num_ops, vec_width;
            if (type->row_major) {
               num_ops = glsl_get_vector_elements(type->type);
               vec_width = glsl_get_matrix_columns(type->type);
               if (load) {
                  const struct glsl_type *transpose_type =
                     glsl_matrix_type(base_type, vec_width, num_ops);
                  *inout = vtn_create_ssa_value(b, transpose_type);
               } else {
                  transpose = vtn_ssa_transpose(b, *inout);
                  inout = &transpose;
               }
            } else {
               num_ops = glsl_get_matrix_columns(type->type);
               vec_width = glsl_get_vector_elements(type->type);
            }

            for (unsigned i = 0; i < num_ops; i++) {
               nir_ssa_def *elem_offset =
                  nir_iadd(&b->nb, offset,
                           nir_imm_int(&b->nb, i * type->stride));
               _vtn_load_store_tail(b, op, load, index, elem_offset,
                                    &(*inout)->elems[i],
                                    glsl_vector_type(base_type, vec_width));
            }

            if (load && type->row_major)
               *inout = vtn_ssa_transpose(b, *inout);
         } else if (type->row_major) {
            /* Row-major but with an access chiain. */
            nir_ssa_def *col_offset =
               vtn_access_link_as_ssa(b, chain->link[chain_idx],
                                      type->array_element->stride);
            offset = nir_iadd(&b->nb, offset, col_offset);

            if (chain_idx + 1 < chain->length) {
               /* Picking off a single element */
               nir_ssa_def *row_offset =
                  vtn_access_link_as_ssa(b, chain->link[chain_idx + 1],
                                         type->stride);
               offset = nir_iadd(&b->nb, offset, row_offset);
               if (load)
                  *inout = vtn_create_ssa_value(b, glsl_scalar_type(base_type));
               _vtn_load_store_tail(b, op, load, index, offset, inout,
                                    glsl_scalar_type(base_type));
            } else {
               /* Grabbing a column; picking one element off each row */
               unsigned num_comps = glsl_get_vector_elements(type->type);
               const struct glsl_type *column_type =
                  glsl_get_column_type(type->type);

               nir_ssa_def *comps[4];
               for (unsigned i = 0; i < num_comps; i++) {
                  nir_ssa_def *elem_offset =
                     nir_iadd(&b->nb, offset,
                              nir_imm_int(&b->nb, i * type->stride));

                  struct vtn_ssa_value *comp, temp_val;
                  if (!load) {
                     temp_val.def = nir_channel(&b->nb, (*inout)->def, i);
                     temp_val.type = glsl_scalar_type(base_type);
                  }
                  comp = &temp_val;
                  _vtn_load_store_tail(b, op, load, index, elem_offset,
                                       &comp, glsl_scalar_type(base_type));
                  comps[i] = comp->def;
               }

               if (load) {
                  if (*inout == NULL)
                     *inout = vtn_create_ssa_value(b, column_type);

                  (*inout)->def = nir_vec(&b->nb, comps, num_comps);
               }
            }
         } else {
            /* Column-major with a deref. Fall through to array case. */
            nir_ssa_def *col_offset =
               vtn_access_link_as_ssa(b, chain->link[chain_idx], type->stride);
            offset = nir_iadd(&b->nb, offset, col_offset);

            _vtn_block_load_store(b, op, load, index, offset,
                                  chain, chain_idx + 1,
                                  type->array_element, inout);
         }
      } else if (chain == NULL) {
         /* Single whole vector */
         assert(glsl_type_is_vector_or_scalar(type->type));
         _vtn_load_store_tail(b, op, load, index, offset, inout, type->type);
      } else {
         /* Single component of a vector. Fall through to array case. */
         nir_ssa_def *elem_offset =
            vtn_access_link_as_ssa(b, chain->link[chain_idx], type->stride);
         offset = nir_iadd(&b->nb, offset, elem_offset);

         _vtn_block_load_store(b, op, load, index, offset, NULL, 0,
                               type->array_element, inout);
      }
      return;

   case GLSL_TYPE_ARRAY: {
      unsigned elems = glsl_get_length(type->type);
      for (unsigned i = 0; i < elems; i++) {
         nir_ssa_def *elem_off =
            nir_iadd(&b->nb, offset, nir_imm_int(&b->nb, i * type->stride));
         _vtn_block_load_store(b, op, load, index, elem_off, NULL, 0,
                               type->array_element, &(*inout)->elems[i]);
      }
      return;
   }

   case GLSL_TYPE_STRUCT: {
      unsigned elems = glsl_get_length(type->type);
      for (unsigned i = 0; i < elems; i++) {
         nir_ssa_def *elem_off =
            nir_iadd(&b->nb, offset, nir_imm_int(&b->nb, type->offsets[i]));
         _vtn_block_load_store(b, op, load, index, elem_off, NULL, 0,
                               type->members[i], &(*inout)->elems[i]);
      }
      return;
   }

   default:
      unreachable("Invalid block member type");
   }
}
예제 #21
0
	nir_ssa_def *invoc_id = nir_load_system_value(&b, nir_intrinsic_load_local_invocation_id, 0);
	nir_ssa_def *wg_id = nir_load_system_value(&b, nir_intrinsic_load_work_group_id, 0);
	nir_ssa_def *block_size = nir_imm_ivec4(&b,
						b.shader->info->cs.local_size[0],
						b.shader->info->cs.local_size[1],
						b.shader->info->cs.local_size[2], 0);

	nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);

	nir_ssa_def *offset = nir_imul(&b, global_id, nir_imm_int(&b, 16));
	offset = nir_swizzle(&b, offset, (unsigned[]) {0, 0, 0, 0}, 1, false);

	nir_intrinsic_instr *dst_buf = nir_intrinsic_instr_create(b.shader,
	                                                          nir_intrinsic_vulkan_resource_index);
	dst_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
	nir_intrinsic_set_desc_set(dst_buf, 0);
	nir_intrinsic_set_binding(dst_buf, 0);
	nir_ssa_dest_init(&dst_buf->instr, &dst_buf->dest, 1, 32, NULL);
	nir_builder_instr_insert(&b, &dst_buf->instr);

	nir_intrinsic_instr *load = nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_push_constant);
	load->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
	load->num_components = 1;
	nir_ssa_dest_init(&load->instr, &load->dest, 1, 32, "fill_value");
	nir_builder_instr_insert(&b, &load->instr);

	nir_ssa_def *swizzled_load = nir_swizzle(&b, &load->dest.ssa, (unsigned[]) { 0, 0, 0, 0}, 4, false);

	nir_intrinsic_instr *store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
	store->src[0] = nir_src_for_ssa(swizzled_load);
예제 #22
0
static nir_shader *
build_resolve_compute_shader(struct radv_device *dev, bool is_integer, int samples)
{
	nir_builder b;
	char name[64];
	nir_if *outer_if = NULL;
	const struct glsl_type *sampler_type = glsl_sampler_type(GLSL_SAMPLER_DIM_MS,
								 false,
								 false,
								 GLSL_TYPE_FLOAT);
	const struct glsl_type *img_type = glsl_sampler_type(GLSL_SAMPLER_DIM_2D,
							     false,
							     false,
							     GLSL_TYPE_FLOAT);
	snprintf(name, 64, "meta_resolve_cs-%d-%s", samples, is_integer ? "int" : "float");
	nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_COMPUTE, NULL);
	b.shader->info->name = ralloc_strdup(b.shader, name);
	b.shader->info->cs.local_size[0] = 16;
	b.shader->info->cs.local_size[1] = 16;
	b.shader->info->cs.local_size[2] = 1;

	nir_variable *input_img = nir_variable_create(b.shader, nir_var_uniform,
						      sampler_type, "s_tex");
	input_img->data.descriptor_set = 0;
	input_img->data.binding = 0;

	nir_variable *output_img = nir_variable_create(b.shader, nir_var_uniform,
						       img_type, "out_img");
	output_img->data.descriptor_set = 0;
	output_img->data.binding = 1;
	nir_ssa_def *invoc_id = nir_load_system_value(&b, nir_intrinsic_load_local_invocation_id, 0);
	nir_ssa_def *wg_id = nir_load_system_value(&b, nir_intrinsic_load_work_group_id, 0);
	nir_ssa_def *block_size = nir_imm_ivec4(&b,
						b.shader->info->cs.local_size[0],
						b.shader->info->cs.local_size[1],
						b.shader->info->cs.local_size[2], 0);

	nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);

	nir_intrinsic_instr *src_offset = nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_push_constant);
	src_offset->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
	src_offset->num_components = 2;
	nir_ssa_dest_init(&src_offset->instr, &src_offset->dest, 2, 32, "src_offset");
	nir_builder_instr_insert(&b, &src_offset->instr);

	nir_intrinsic_instr *dst_offset = nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_push_constant);
	dst_offset->src[0] = nir_src_for_ssa(nir_imm_int(&b, 8));
	dst_offset->num_components = 2;
	nir_ssa_dest_init(&dst_offset->instr, &dst_offset->dest, 2, 32, "dst_offset");
	nir_builder_instr_insert(&b, &dst_offset->instr);

	nir_ssa_def *img_coord = nir_iadd(&b, global_id, &src_offset->dest.ssa);
	/* do a txf_ms on each sample */
	nir_ssa_def *tmp;

	nir_tex_instr *tex = nir_tex_instr_create(b.shader, 2);
	tex->sampler_dim = GLSL_SAMPLER_DIM_MS;
	tex->op = nir_texop_txf_ms;
	tex->src[0].src_type = nir_tex_src_coord;
	tex->src[0].src = nir_src_for_ssa(img_coord);
	tex->src[1].src_type = nir_tex_src_ms_index;
	tex->src[1].src = nir_src_for_ssa(nir_imm_int(&b, 0));
	tex->dest_type = nir_type_float;
	tex->is_array = false;
	tex->coord_components = 2;
	tex->texture = nir_deref_var_create(tex, input_img);
	tex->sampler = NULL;

	nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, "tex");
	nir_builder_instr_insert(&b, &tex->instr);

	tmp = &tex->dest.ssa;
	nir_variable *color =
		nir_local_variable_create(b.impl, glsl_vec4_type(), "color");

	if (!is_integer && samples > 1) {
		nir_tex_instr *tex_all_same = nir_tex_instr_create(b.shader, 1);
		tex_all_same->sampler_dim = GLSL_SAMPLER_DIM_MS;
		tex_all_same->op = nir_texop_samples_identical;
		tex_all_same->src[0].src_type = nir_tex_src_coord;
		tex_all_same->src[0].src = nir_src_for_ssa(img_coord);
		tex_all_same->dest_type = nir_type_float;
		tex_all_same->is_array = false;
		tex_all_same->coord_components = 2;
		tex_all_same->texture = nir_deref_var_create(tex_all_same, input_img);
		tex_all_same->sampler = NULL;

		nir_ssa_dest_init(&tex_all_same->instr, &tex_all_same->dest, 1, 32, "tex");
		nir_builder_instr_insert(&b, &tex_all_same->instr);

		nir_ssa_def *all_same = nir_ine(&b, &tex_all_same->dest.ssa, nir_imm_int(&b, 0));
		nir_if *if_stmt = nir_if_create(b.shader);
		if_stmt->condition = nir_src_for_ssa(all_same);
		nir_cf_node_insert(b.cursor, &if_stmt->cf_node);

		b.cursor = nir_after_cf_list(&if_stmt->then_list);
		for (int i = 1; i < samples; i++) {
			nir_tex_instr *tex_add = nir_tex_instr_create(b.shader, 2);
			tex_add->sampler_dim = GLSL_SAMPLER_DIM_MS;
			tex_add->op = nir_texop_txf_ms;
			tex_add->src[0].src_type = nir_tex_src_coord;
			tex_add->src[0].src = nir_src_for_ssa(img_coord);
			tex_add->src[1].src_type = nir_tex_src_ms_index;
			tex_add->src[1].src = nir_src_for_ssa(nir_imm_int(&b, i));
			tex_add->dest_type = nir_type_float;
			tex_add->is_array = false;
			tex_add->coord_components = 2;
			tex_add->texture = nir_deref_var_create(tex_add, input_img);
			tex_add->sampler = NULL;

			nir_ssa_dest_init(&tex_add->instr, &tex_add->dest, 4, 32, "tex");
			nir_builder_instr_insert(&b, &tex_add->instr);

			tmp = nir_fadd(&b, tmp, &tex_add->dest.ssa);
		}

		tmp = nir_fdiv(&b, tmp, nir_imm_float(&b, samples));
		nir_store_var(&b, color, tmp, 0xf);
		b.cursor = nir_after_cf_list(&if_stmt->else_list);
		outer_if = if_stmt;
	}
	nir_store_var(&b, color, &tex->dest.ssa, 0xf);

	if (outer_if)
		b.cursor = nir_after_cf_node(&outer_if->cf_node);

	nir_ssa_def *newv = nir_load_var(&b, color);
	nir_ssa_def *coord = nir_iadd(&b, global_id, &dst_offset->dest.ssa);
	nir_intrinsic_instr *store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_image_store);
	store->src[0] = nir_src_for_ssa(coord);
	store->src[1] = nir_src_for_ssa(nir_ssa_undef(&b, 1, 32));
	store->src[2] = nir_src_for_ssa(newv);
	store->variables[0] = nir_deref_var_create(store, output_img);
	nir_builder_instr_insert(&b, &store->instr);
	return b.shader;
}
예제 #23
0
static nir_shader *
create_passthrough_tcs(void *mem_ctx, const struct brw_compiler *compiler,
                       const nir_shader_compiler_options *options,
                       const struct brw_tcs_prog_key *key)
{
   nir_builder b;
   nir_builder_init_simple_shader(&b, mem_ctx, MESA_SHADER_TESS_CTRL,
                                  options);
   nir_shader *nir = b.shader;
   nir_variable *var;
   nir_intrinsic_instr *load;
   nir_intrinsic_instr *store;
   nir_ssa_def *zero = nir_imm_int(&b, 0);
   nir_ssa_def *invoc_id =
      nir_load_system_value(&b, nir_intrinsic_load_invocation_id, 0);

   nir->info->inputs_read = key->outputs_written;
   nir->info->outputs_written = key->outputs_written;
   nir->info->tcs.vertices_out = key->input_vertices;
   nir->info->name = ralloc_strdup(nir, "passthrough");
   nir->num_uniforms = 8 * sizeof(uint32_t);

   var = nir_variable_create(nir, nir_var_uniform, glsl_vec4_type(), "hdr_0");
   var->data.location = 0;
   var = nir_variable_create(nir, nir_var_uniform, glsl_vec4_type(), "hdr_1");
   var->data.location = 1;

   /* Write the patch URB header. */
   for (int i = 0; i <= 1; i++) {
      load = nir_intrinsic_instr_create(nir, nir_intrinsic_load_uniform);
      load->num_components = 4;
      load->src[0] = nir_src_for_ssa(zero);
      nir_ssa_dest_init(&load->instr, &load->dest, 4, 32, NULL);
      nir_intrinsic_set_base(load, i * 4 * sizeof(uint32_t));
      nir_builder_instr_insert(&b, &load->instr);

      store = nir_intrinsic_instr_create(nir, nir_intrinsic_store_output);
      store->num_components = 4;
      store->src[0] = nir_src_for_ssa(&load->dest.ssa);
      store->src[1] = nir_src_for_ssa(zero);
      nir_intrinsic_set_base(store, VARYING_SLOT_TESS_LEVEL_INNER - i);
      nir_intrinsic_set_write_mask(store, WRITEMASK_XYZW);
      nir_builder_instr_insert(&b, &store->instr);
   }

   /* Copy inputs to outputs. */
   uint64_t varyings = key->outputs_written;

   while (varyings != 0) {
      const int varying = ffsll(varyings) - 1;

      load = nir_intrinsic_instr_create(nir,
                                        nir_intrinsic_load_per_vertex_input);
      load->num_components = 4;
      load->src[0] = nir_src_for_ssa(invoc_id);
      load->src[1] = nir_src_for_ssa(zero);
      nir_ssa_dest_init(&load->instr, &load->dest, 4, 32, NULL);
      nir_intrinsic_set_base(load, varying);
      nir_builder_instr_insert(&b, &load->instr);

      store = nir_intrinsic_instr_create(nir,
                                         nir_intrinsic_store_per_vertex_output);
      store->num_components = 4;
      store->src[0] = nir_src_for_ssa(&load->dest.ssa);
      store->src[1] = nir_src_for_ssa(invoc_id);
      store->src[2] = nir_src_for_ssa(zero);
      nir_intrinsic_set_base(store, varying);
      nir_intrinsic_set_write_mask(store, WRITEMASK_XYZW);
      nir_builder_instr_insert(&b, &store->instr);

      varyings &= ~BITFIELD64_BIT(varying);
   }

   nir_validate_shader(nir);

   nir = brw_preprocess_nir(compiler, nir);

   return nir;
}
예제 #24
0
static void
vc4_nir_lower_txf_ms_instr(struct vc4_compile *c, nir_builder *b,
                           nir_tex_instr *txf_ms)
{
    if (txf_ms->op != nir_texop_txf_ms)
        return;

    b->cursor = nir_before_instr(&txf_ms->instr);

    nir_tex_instr *txf = nir_tex_instr_create(c->s, 1);
    txf->op = nir_texop_txf;
    txf->sampler = txf_ms->sampler;
    txf->sampler_index = txf_ms->sampler_index;
    txf->coord_components = txf_ms->coord_components;
    txf->is_shadow = txf_ms->is_shadow;
    txf->is_new_style_shadow = txf_ms->is_new_style_shadow;

    nir_ssa_def *coord = NULL, *sample_index = NULL;
    for (int i = 0; i < txf_ms->num_srcs; i++) {
        assert(txf_ms->src[i].src.is_ssa);

        switch (txf_ms->src[i].src_type) {
        case nir_tex_src_coord:
            coord = txf_ms->src[i].src.ssa;
            break;
        case nir_tex_src_ms_index:
            sample_index = txf_ms->src[i].src.ssa;
            break;
        default:
            unreachable("Unknown txf_ms src\n");
        }
    }
    assert(coord);
    assert(sample_index);

    nir_ssa_def *x = nir_channel(b, coord, 0);
    nir_ssa_def *y = nir_channel(b, coord, 1);

    uint32_t tile_w = 32;
    uint32_t tile_h = 32;
    uint32_t tile_w_shift = 5;
    uint32_t tile_h_shift = 5;
    uint32_t tile_size = (tile_h * tile_w *
                          VC4_MAX_SAMPLES * sizeof(uint32_t));
    unsigned unit = txf_ms->sampler_index;
    uint32_t w = align(c->key->tex[unit].msaa_width, tile_w);
    uint32_t w_tiles = w / tile_w;

    nir_ssa_def *x_tile = nir_ushr(b, x, nir_imm_int(b, tile_w_shift));
    nir_ssa_def *y_tile = nir_ushr(b, y, nir_imm_int(b, tile_h_shift));
    nir_ssa_def *tile_addr = nir_iadd(b,
                                      nir_imul(b, x_tile,
                                              nir_imm_int(b, tile_size)),
                                      nir_imul(b, y_tile,
                                              nir_imm_int(b, (w_tiles *
                                                      tile_size))));
    nir_ssa_def *x_subspan = nir_iand(b, x,
                                      nir_imm_int(b, (tile_w - 1) & ~1));
    nir_ssa_def *y_subspan = nir_iand(b, y,
                                      nir_imm_int(b, (tile_h - 1) & ~1));
    nir_ssa_def *subspan_addr = nir_iadd(b,
                                         nir_imul(b, x_subspan,
                                                 nir_imm_int(b, 2 * VC4_MAX_SAMPLES * sizeof(uint32_t))),
                                         nir_imul(b, y_subspan,
                                                 nir_imm_int(b,
                                                         tile_w *
                                                         VC4_MAX_SAMPLES *
                                                         sizeof(uint32_t))));

    nir_ssa_def *pixel_addr = nir_ior(b,
                                      nir_iand(b,
                                              nir_ishl(b, x,
                                                      nir_imm_int(b, 2)),
                                              nir_imm_int(b, (1 << 2))),
                                      nir_iand(b,
                                              nir_ishl(b, y,
                                                      nir_imm_int(b, 3)),
                                              nir_imm_int(b, (1 << 3))));

    nir_ssa_def *sample_addr = nir_ishl(b, sample_index, nir_imm_int(b, 4));

    nir_ssa_def *addr = nir_iadd(b,
                                 nir_ior(b, sample_addr, pixel_addr),
                                 nir_iadd(b, subspan_addr, tile_addr));

    txf->src[0].src_type = nir_tex_src_coord;
    txf->src[0].src = nir_src_for_ssa(nir_vec2(b, addr, nir_imm_int(b, 0)));
    nir_ssa_dest_init(&txf->instr, &txf->dest, 4, NULL);
    nir_builder_instr_insert(b, &txf->instr);
    nir_ssa_def_rewrite_uses(&txf_ms->dest.ssa,
                             nir_src_for_ssa(&txf->dest.ssa));
    nir_instr_remove(&txf_ms->instr);
}
예제 #25
0
static nir_shader *
build_nir_itob_compute_shader(struct radv_device *dev)
{
    nir_builder b;
    const struct glsl_type *sampler_type = glsl_sampler_type(GLSL_SAMPLER_DIM_2D,
                                           false,
                                           false,
                                           GLSL_TYPE_FLOAT);
    const struct glsl_type *img_type = glsl_sampler_type(GLSL_SAMPLER_DIM_BUF,
                                       false,
                                       false,
                                       GLSL_TYPE_FLOAT);
    nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_COMPUTE, NULL);
    b.shader->info->name = ralloc_strdup(b.shader, "meta_itob_cs");
    b.shader->info->cs.local_size[0] = 16;
    b.shader->info->cs.local_size[1] = 16;
    b.shader->info->cs.local_size[2] = 1;
    nir_variable *input_img = nir_variable_create(b.shader, nir_var_uniform,
                              sampler_type, "s_tex");
    input_img->data.descriptor_set = 0;
    input_img->data.binding = 0;

    nir_variable *output_img = nir_variable_create(b.shader, nir_var_uniform,
                               img_type, "out_img");
    output_img->data.descriptor_set = 0;
    output_img->data.binding = 1;

    nir_ssa_def *invoc_id = nir_load_system_value(&b, nir_intrinsic_load_local_invocation_id, 0);
    nir_ssa_def *wg_id = nir_load_system_value(&b, nir_intrinsic_load_work_group_id, 0);
    nir_ssa_def *block_size = nir_imm_ivec4(&b,
                                            b.shader->info->cs.local_size[0],
                                            b.shader->info->cs.local_size[1],
                                            b.shader->info->cs.local_size[2], 0);

    nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);



    nir_intrinsic_instr *offset = nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_push_constant);
    offset->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
    offset->num_components = 2;
    nir_ssa_dest_init(&offset->instr, &offset->dest, 2, 32, "offset");
    nir_builder_instr_insert(&b, &offset->instr);

    nir_intrinsic_instr *stride = nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_push_constant);
    stride->src[0] = nir_src_for_ssa(nir_imm_int(&b, 8));
    stride->num_components = 1;
    nir_ssa_dest_init(&stride->instr, &stride->dest, 1, 32, "stride");
    nir_builder_instr_insert(&b, &stride->instr);

    nir_ssa_def *img_coord = nir_iadd(&b, global_id, &offset->dest.ssa);

    nir_tex_instr *tex = nir_tex_instr_create(b.shader, 2);
    tex->sampler_dim = GLSL_SAMPLER_DIM_2D;
    tex->op = nir_texop_txf;
    tex->src[0].src_type = nir_tex_src_coord;
    tex->src[0].src = nir_src_for_ssa(img_coord);
    tex->src[1].src_type = nir_tex_src_lod;
    tex->src[1].src = nir_src_for_ssa(nir_imm_int(&b, 0));
    tex->dest_type = nir_type_float;
    tex->is_array = false;
    tex->coord_components = 2;
    tex->texture = nir_deref_var_create(tex, input_img);
    tex->sampler = NULL;

    nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, "tex");
    nir_builder_instr_insert(&b, &tex->instr);

    nir_ssa_def *pos_x = nir_channel(&b, global_id, 0);
    nir_ssa_def *pos_y = nir_channel(&b, global_id, 1);

    nir_ssa_def *tmp = nir_imul(&b, pos_y, &stride->dest.ssa);
    tmp = nir_iadd(&b, tmp, pos_x);

    nir_ssa_def *coord = nir_vec4(&b, tmp, tmp, tmp, tmp);

    nir_ssa_def *outval = &tex->dest.ssa;
    nir_intrinsic_instr *store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_image_store);
    store->src[0] = nir_src_for_ssa(coord);
    store->src[1] = nir_src_for_ssa(nir_ssa_undef(&b, 1, 32));
    store->src[2] = nir_src_for_ssa(outval);
    store->variables[0] = nir_deref_var_create(store, output_img);

    nir_builder_instr_insert(&b, &store->instr);
    return b.shader;
}
예제 #26
0
static void *vc4_get_yuv_vs(struct pipe_context *pctx)
{
   struct vc4_context *vc4 = vc4_context(pctx);
   struct pipe_screen *pscreen = pctx->screen;

   if (vc4->yuv_linear_blit_vs)
           return vc4->yuv_linear_blit_vs;

   const struct nir_shader_compiler_options *options =
           pscreen->get_compiler_options(pscreen,
                                         PIPE_SHADER_IR_NIR,
                                         PIPE_SHADER_VERTEX);

   nir_builder b;
   nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_VERTEX, options);
   b.shader->info.name = ralloc_strdup(b.shader, "linear_blit_vs");

   const struct glsl_type *vec4 = glsl_vec4_type();
   nir_variable *pos_in = nir_variable_create(b.shader, nir_var_shader_in,
                                              vec4, "pos");

   nir_variable *pos_out = nir_variable_create(b.shader, nir_var_shader_out,
                                               vec4, "gl_Position");
   pos_out->data.location = VARYING_SLOT_POS;

   nir_store_var(&b, pos_out, nir_load_var(&b, pos_in), 0xf);

   struct pipe_shader_state shader_tmpl = {
           .type = PIPE_SHADER_IR_NIR,
           .ir.nir = b.shader,
   };

   vc4->yuv_linear_blit_vs = pctx->create_vs_state(pctx, &shader_tmpl);

   return vc4->yuv_linear_blit_vs;
}

static void *vc4_get_yuv_fs(struct pipe_context *pctx, int cpp)
{
   struct vc4_context *vc4 = vc4_context(pctx);
   struct pipe_screen *pscreen = pctx->screen;
   struct pipe_shader_state **cached_shader;
   const char *name;

   if (cpp == 1) {
           cached_shader = &vc4->yuv_linear_blit_fs_8bit;
           name = "linear_blit_8bit_fs";
   } else {
           cached_shader = &vc4->yuv_linear_blit_fs_16bit;
           name = "linear_blit_16bit_fs";
   }

   if (*cached_shader)
           return *cached_shader;

   const struct nir_shader_compiler_options *options =
           pscreen->get_compiler_options(pscreen,
                                         PIPE_SHADER_IR_NIR,
                                         PIPE_SHADER_FRAGMENT);

   nir_builder b;
   nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_FRAGMENT, options);
   b.shader->info.name = ralloc_strdup(b.shader, name);

   const struct glsl_type *vec4 = glsl_vec4_type();
   const struct glsl_type *glsl_int = glsl_int_type();

   nir_variable *color_out = nir_variable_create(b.shader, nir_var_shader_out,
                                                 vec4, "f_color");
   color_out->data.location = FRAG_RESULT_COLOR;

   nir_variable *pos_in = nir_variable_create(b.shader, nir_var_shader_in,
                                              vec4, "pos");
   pos_in->data.location = VARYING_SLOT_POS;
   nir_ssa_def *pos = nir_load_var(&b, pos_in);

   nir_ssa_def *one = nir_imm_int(&b, 1);
   nir_ssa_def *two = nir_imm_int(&b, 2);

   nir_ssa_def *x = nir_f2i32(&b, nir_channel(&b, pos, 0));
   nir_ssa_def *y = nir_f2i32(&b, nir_channel(&b, pos, 1));

   nir_variable *stride_in = nir_variable_create(b.shader, nir_var_uniform,
                                                 glsl_int, "stride");
   nir_ssa_def *stride = nir_load_var(&b, stride_in);

   nir_ssa_def *x_offset;
   nir_ssa_def *y_offset;
   if (cpp == 1) {
           nir_ssa_def *intra_utile_x_offset =
                   nir_ishl(&b, nir_iand(&b, x, one), two);
           nir_ssa_def *inter_utile_x_offset =
                   nir_ishl(&b, nir_iand(&b, x, nir_imm_int(&b, ~3)), one);

           x_offset = nir_iadd(&b,
                               intra_utile_x_offset,
                               inter_utile_x_offset);
           y_offset = nir_imul(&b,
                               nir_iadd(&b,
                                        nir_ishl(&b, y, one),
                                        nir_ushr(&b, nir_iand(&b, x, two), one)),
                               stride);
   } else {
           x_offset = nir_ishl(&b, x, two);
           y_offset = nir_imul(&b, y, stride);
   }

   nir_intrinsic_instr *load =
           nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ubo);
   load->num_components = 1;
   nir_ssa_dest_init(&load->instr, &load->dest, load->num_components, 32, NULL);
   load->src[0] = nir_src_for_ssa(one);
   load->src[1] = nir_src_for_ssa(nir_iadd(&b, x_offset, y_offset));
   nir_builder_instr_insert(&b, &load->instr);

   nir_store_var(&b, color_out,
                 nir_unpack_unorm_4x8(&b, &load->dest.ssa),
                 0xf);

   struct pipe_shader_state shader_tmpl = {
           .type = PIPE_SHADER_IR_NIR,
           .ir.nir = b.shader,
   };

   *cached_shader = pctx->create_fs_state(pctx, &shader_tmpl);

   return *cached_shader;
}

static bool
vc4_yuv_blit(struct pipe_context *pctx, const struct pipe_blit_info *info)
{
        struct vc4_context *vc4 = vc4_context(pctx);
        struct vc4_resource *src = vc4_resource(info->src.resource);
        struct vc4_resource *dst = vc4_resource(info->dst.resource);
        bool ok;

        if (src->tiled)
                return false;
        if (src->base.format != PIPE_FORMAT_R8_UNORM &&
            src->base.format != PIPE_FORMAT_R8G8_UNORM)
                return false;

        /* YUV blits always turn raster-order to tiled */
        assert(dst->base.format == src->base.format);
        assert(dst->tiled);

        /* Always 1:1 and at the origin */
        assert(info->src.box.x == 0 && info->dst.box.x == 0);
        assert(info->src.box.y == 0 && info->dst.box.y == 0);
        assert(info->src.box.width == info->dst.box.width);
        assert(info->src.box.height == info->dst.box.height);

        if ((src->slices[info->src.level].offset & 3) ||
            (src->slices[info->src.level].stride & 3)) {
                perf_debug("YUV-blit src texture offset/stride misaligned: 0x%08x/%d\n",
                           src->slices[info->src.level].offset,
                           src->slices[info->src.level].stride);
                goto fallback;
        }

        vc4_blitter_save(vc4);

        /* Create a renderable surface mapping the T-tiled shadow buffer.
         */
        struct pipe_surface dst_tmpl;
        util_blitter_default_dst_texture(&dst_tmpl, info->dst.resource,
                                         info->dst.level, info->dst.box.z);
        dst_tmpl.format = PIPE_FORMAT_RGBA8888_UNORM;
        struct pipe_surface *dst_surf =
                pctx->create_surface(pctx, info->dst.resource, &dst_tmpl);
        if (!dst_surf) {
                fprintf(stderr, "Failed to create YUV dst surface\n");
                util_blitter_unset_running_flag(vc4->blitter);
                return false;
        }
        dst_surf->width /= 2;
        if (dst->cpp == 1)
                dst_surf->height /= 2;

        /* Set the constant buffer. */
        uint32_t stride = src->slices[info->src.level].stride;
        struct pipe_constant_buffer cb_uniforms = {
                .user_buffer = &stride,
                .buffer_size = sizeof(stride),
        };
        pctx->set_constant_buffer(pctx, PIPE_SHADER_FRAGMENT, 0, &cb_uniforms);
        struct pipe_constant_buffer cb_src = {
                .buffer = info->src.resource,
                .buffer_offset = src->slices[info->src.level].offset,
                .buffer_size = (src->bo->size -
                                src->slices[info->src.level].offset),
        };
        pctx->set_constant_buffer(pctx, PIPE_SHADER_FRAGMENT, 1, &cb_src);

        /* Unbind the textures, to make sure we don't try to recurse into the
         * shadow blit.
         */
        pctx->set_sampler_views(pctx, PIPE_SHADER_FRAGMENT, 0, 0, NULL);
        pctx->bind_sampler_states(pctx, PIPE_SHADER_FRAGMENT, 0, 0, NULL);

        util_blitter_custom_shader(vc4->blitter, dst_surf,
                                   vc4_get_yuv_vs(pctx),
                                   vc4_get_yuv_fs(pctx, src->cpp));

        util_blitter_restore_textures(vc4->blitter);
        util_blitter_restore_constant_buffer_state(vc4->blitter);
        /* Restore cb1 (util_blitter doesn't handle this one). */
        struct pipe_constant_buffer cb_disabled = { 0 };
        pctx->set_constant_buffer(pctx, PIPE_SHADER_FRAGMENT, 1, &cb_disabled);

        pipe_surface_reference(&dst_surf, NULL);

        return true;

fallback:
        /* Do an immediate SW fallback, since the render blit path
         * would just recurse.
         */
        ok = util_try_blit_via_copy_region(pctx, info);
        assert(ok); (void)ok;

        return true;
}

static bool
vc4_render_blit(struct pipe_context *ctx, struct pipe_blit_info *info)
{
        struct vc4_context *vc4 = vc4_context(ctx);

        if (!util_blitter_is_blit_supported(vc4->blitter, info)) {
                fprintf(stderr, "blit unsupported %s -> %s\n",
                    util_format_short_name(info->src.resource->format),
                    util_format_short_name(info->dst.resource->format));
                return false;
        }

        /* Enable the scissor, so we get a minimal set of tiles rendered. */
        if (!info->scissor_enable) {
                info->scissor_enable = true;
                info->scissor.minx = info->dst.box.x;
                info->scissor.miny = info->dst.box.y;
                info->scissor.maxx = info->dst.box.x + info->dst.box.width;
                info->scissor.maxy = info->dst.box.y + info->dst.box.height;
        }

        vc4_blitter_save(vc4);
        util_blitter_blit(vc4->blitter, info);

        return true;
}

/* Optimal hardware path for blitting pixels.
 * Scaling, format conversion, up- and downsampling (resolve) are allowed.
 */
void
vc4_blit(struct pipe_context *pctx, const struct pipe_blit_info *blit_info)
{
        struct pipe_blit_info info = *blit_info;

        if (vc4_yuv_blit(pctx, blit_info))
                return;

        if (vc4_tile_blit(pctx, blit_info))
                return;

        if (info.mask & PIPE_MASK_S) {
                if (util_try_blit_via_copy_region(pctx, &info))
                        return;

                info.mask &= ~PIPE_MASK_S;
                fprintf(stderr, "cannot blit stencil, skipping\n");
        }

        if (vc4_render_blit(pctx, &info))
                return;

        fprintf(stderr, "Unsupported blit\n");
}