Ejemplo n.º 1
0
static void r600_blit_decompress_depth_in_place(struct r600_context *rctx,
                                                struct r600_texture *texture,
						bool is_stencil_sampler,
                                                unsigned first_level, unsigned last_level,
                                                unsigned first_layer, unsigned last_layer)
{
	struct pipe_surface *zsurf, surf_tmpl = {{0}};
	unsigned layer, max_layer, checked_last_layer, level;
	unsigned *dirty_level_mask;

	/* Enable decompression in DB_RENDER_CONTROL */
	if (is_stencil_sampler) {
		rctx->db_misc_state.flush_stencil_inplace = true;
		dirty_level_mask = &texture->stencil_dirty_level_mask;
	} else {
		rctx->db_misc_state.flush_depth_inplace = true;
		dirty_level_mask = &texture->dirty_level_mask;
	}
	r600_mark_atom_dirty(rctx, &rctx->db_misc_state.atom);

	surf_tmpl.format = texture->resource.b.b.format;

	for (level = first_level; level <= last_level; level++) {
		if (!(*dirty_level_mask & (1 << level)))
			continue;

		surf_tmpl.u.tex.level = level;

		/* The smaller the mipmap level, the less layers there are
		 * as far as 3D textures are concerned. */
		max_layer = util_max_layer(&texture->resource.b.b, level);
		checked_last_layer = last_layer < max_layer ? last_layer : max_layer;

		for (layer = first_layer; layer <= checked_last_layer; layer++) {
			surf_tmpl.u.tex.first_layer = layer;
			surf_tmpl.u.tex.last_layer = layer;

			zsurf = rctx->b.b.create_surface(&rctx->b.b, &texture->resource.b.b, &surf_tmpl);

			r600_blitter_begin(&rctx->b.b, R600_DECOMPRESS);
			util_blitter_custom_depth_stencil(rctx->blitter, zsurf, NULL, ~0,
							  rctx->custom_dsa_flush, 1.0f);
			r600_blitter_end(&rctx->b.b);

			pipe_surface_reference(&zsurf, NULL);
		}

		/* The texture will always be dirty if some layers or samples aren't flushed.
		 * I don't think this case occurs often though. */
		if (first_layer == 0 && last_layer == max_layer) {
			*dirty_level_mask &= ~(1 << level);
		}
	}

	/* Disable decompression in DB_RENDER_CONTROL */
	rctx->db_misc_state.flush_depth_inplace = false;
	rctx->db_misc_state.flush_stencil_inplace = false;
	r600_mark_atom_dirty(rctx, &rctx->db_misc_state.atom);
}
Ejemplo n.º 2
0
static void evergreen_cs_set_vertex_buffer(struct r600_context *rctx,
					   unsigned vb_index,
					   unsigned offset,
					   struct pipe_resource *buffer)
{
	struct r600_vertexbuf_state *state = &rctx->cs_vertex_buffer_state;
	struct pipe_vertex_buffer *vb = &state->vb[vb_index];
	vb->stride = 1;
	vb->buffer_offset = offset;
	vb->buffer = buffer;
	vb->user_buffer = NULL;

	/* The vertex instructions in the compute shaders use the texture cache,
	 * so we need to invalidate it. */
	rctx->b.flags |= R600_CONTEXT_INV_VERTEX_CACHE;
	state->enabled_mask |= 1 << vb_index;
	state->dirty_mask |= 1 << vb_index;
	r600_mark_atom_dirty(rctx, &state->atom);
}
Ejemplo n.º 3
0
void r600_begin_new_cs(struct r600_context *ctx)
{
	unsigned shader;

	ctx->b.flags = 0;
	ctx->b.gtt = 0;
	ctx->b.vram = 0;

	/* Begin a new CS. */
	r600_emit_command_buffer(ctx->b.gfx.cs, &ctx->start_cs_cmd);

	/* Re-emit states. */
	r600_mark_atom_dirty(ctx, &ctx->alphatest_state.atom);
	r600_mark_atom_dirty(ctx, &ctx->blend_color.atom);
	r600_mark_atom_dirty(ctx, &ctx->cb_misc_state.atom);
	r600_mark_atom_dirty(ctx, &ctx->clip_misc_state.atom);
	r600_mark_atom_dirty(ctx, &ctx->clip_state.atom);
	r600_mark_atom_dirty(ctx, &ctx->db_misc_state.atom);
	r600_mark_atom_dirty(ctx, &ctx->db_state.atom);
	r600_mark_atom_dirty(ctx, &ctx->framebuffer.atom);
	r600_mark_atom_dirty(ctx, &ctx->hw_shader_stages[R600_HW_STAGE_PS].atom);
	r600_mark_atom_dirty(ctx, &ctx->poly_offset_state.atom);
	r600_mark_atom_dirty(ctx, &ctx->vgt_state.atom);
	r600_mark_atom_dirty(ctx, &ctx->sample_mask.atom);
	ctx->scissor.dirty_mask = (1 << R600_MAX_VIEWPORTS) - 1;
	ctx->scissor.atom.num_dw = R600_MAX_VIEWPORTS * 4;
	r600_mark_atom_dirty(ctx, &ctx->scissor.atom);
	ctx->viewport.dirty_mask = (1 << R600_MAX_VIEWPORTS) - 1;
	ctx->viewport.atom.num_dw = R600_MAX_VIEWPORTS * 8;
	r600_mark_atom_dirty(ctx, &ctx->viewport.atom);
	if (ctx->b.chip_class <= EVERGREEN) {
		r600_mark_atom_dirty(ctx, &ctx->config_state.atom);
	}
	r600_mark_atom_dirty(ctx, &ctx->stencil_ref.atom);
	r600_mark_atom_dirty(ctx, &ctx->vertex_fetch_shader.atom);
	r600_mark_atom_dirty(ctx, &ctx->hw_shader_stages[R600_HW_STAGE_ES].atom);
	r600_mark_atom_dirty(ctx, &ctx->shader_stages.atom);
	if (ctx->gs_shader) {
		r600_mark_atom_dirty(ctx, &ctx->hw_shader_stages[R600_HW_STAGE_GS].atom);
		r600_mark_atom_dirty(ctx, &ctx->gs_rings.atom);
	}
	if (ctx->tes_shader) {
		r600_mark_atom_dirty(ctx, &ctx->hw_shader_stages[EG_HW_STAGE_HS].atom);
		r600_mark_atom_dirty(ctx, &ctx->hw_shader_stages[EG_HW_STAGE_LS].atom);
	}
	r600_mark_atom_dirty(ctx, &ctx->hw_shader_stages[R600_HW_STAGE_VS].atom);
	r600_mark_atom_dirty(ctx, &ctx->b.streamout.enable_atom);
	r600_mark_atom_dirty(ctx, &ctx->b.render_cond_atom);

	if (ctx->blend_state.cso)
		r600_mark_atom_dirty(ctx, &ctx->blend_state.atom);
	if (ctx->dsa_state.cso)
		r600_mark_atom_dirty(ctx, &ctx->dsa_state.atom);
	if (ctx->rasterizer_state.cso)
		r600_mark_atom_dirty(ctx, &ctx->rasterizer_state.atom);

	if (ctx->b.chip_class <= R700) {
		r600_mark_atom_dirty(ctx, &ctx->seamless_cube_map.atom);
	}

	ctx->vertex_buffer_state.dirty_mask = ctx->vertex_buffer_state.enabled_mask;
	r600_vertex_buffers_dirty(ctx);

	/* Re-emit shader resources. */
	for (shader = 0; shader < PIPE_SHADER_TYPES; shader++) {
		struct r600_constbuf_state *constbuf = &ctx->constbuf_state[shader];
		struct r600_textures_info *samplers = &ctx->samplers[shader];

		constbuf->dirty_mask = constbuf->enabled_mask;
		samplers->views.dirty_mask = samplers->views.enabled_mask;
		samplers->states.dirty_mask = samplers->states.enabled_mask;

		r600_constant_buffers_dirty(ctx, constbuf);
		r600_sampler_views_dirty(ctx, &samplers->views);
		r600_sampler_states_dirty(ctx, &samplers->states);
	}

	r600_postflush_resume_features(&ctx->b);

	/* Re-emit the draw state. */
	ctx->last_primitive_type = -1;
	ctx->last_start_instance = -1;

	ctx->b.initial_gfx_cs_size = ctx->b.gfx.cs->cdw;
}
Ejemplo n.º 4
0
static void r600_clear(struct pipe_context *ctx, unsigned buffers,
		       const union pipe_color_union *color,
		       double depth, unsigned stencil)
{
	struct r600_context *rctx = (struct r600_context *)ctx;
	struct pipe_framebuffer_state *fb = &rctx->framebuffer.state;

	if (buffers & PIPE_CLEAR_COLOR && rctx->b.chip_class >= EVERGREEN) {
		evergreen_do_fast_color_clear(&rctx->b, fb, &rctx->framebuffer.atom,
					      &buffers, NULL, color);
		if (!buffers)
			return; /* all buffers have been fast cleared */
	}

	if (buffers & PIPE_CLEAR_COLOR) {
		int i;

		/* These buffers cannot use fast clear, make sure to disable expansion. */
		for (i = 0; i < fb->nr_cbufs; i++) {
			struct r600_texture *tex;

			/* If not clearing this buffer, skip. */
			if (!(buffers & (PIPE_CLEAR_COLOR0 << i)))
				continue;

			if (!fb->cbufs[i])
				continue;

			tex = (struct r600_texture *)fb->cbufs[i]->texture;
			if (tex->fmask.size == 0)
				tex->dirty_level_mask &= ~(1 << fb->cbufs[i]->u.tex.level);
		}
	}

	/* if hyperz enabled just clear hyperz */
	if (fb->zsbuf && (buffers & PIPE_CLEAR_DEPTH)) {
		struct r600_texture *rtex;
		unsigned level = fb->zsbuf->u.tex.level;

		rtex = (struct r600_texture*)fb->zsbuf->texture;

		/* We can't use hyperz fast clear if each slice of a texture
		 * array are clear to different value. To simplify code just
		 * disable fast clear for texture array.
		 */
		/* Only use htile for first level */
		if (rtex->htile_buffer && !level &&
                   fb->zsbuf->u.tex.first_layer == 0 &&
                   fb->zsbuf->u.tex.last_layer == util_max_layer(&rtex->resource.b.b, level)) {
			if (rtex->depth_clear_value != depth) {
				rtex->depth_clear_value = depth;
				r600_mark_atom_dirty(rctx, &rctx->db_state.atom);
			}
			rctx->db_misc_state.htile_clear = true;
			r600_mark_atom_dirty(rctx, &rctx->db_misc_state.atom);
		}
	}

	r600_blitter_begin(ctx, R600_CLEAR);
	util_blitter_clear(rctx->blitter, fb->width, fb->height,
			   util_framebuffer_get_num_layers(fb),
			   buffers, color, depth, stencil);
	r600_blitter_end(ctx);

	/* disable fast clear */
	if (rctx->db_misc_state.htile_clear) {
		rctx->db_misc_state.htile_clear = false;
		r600_mark_atom_dirty(rctx, &rctx->db_misc_state.atom);
	}
}
Ejemplo n.º 5
0
static void r600_blit_decompress_depth(struct pipe_context *ctx,
				       struct r600_texture *texture,
				       struct r600_texture *staging,
				       unsigned first_level, unsigned last_level,
				       unsigned first_layer, unsigned last_layer,
				       unsigned first_sample, unsigned last_sample)
{
	struct r600_context *rctx = (struct r600_context *)ctx;
	unsigned layer, level, sample, checked_last_layer, max_layer, max_sample;
	struct r600_texture *flushed_depth_texture = staging ?
			staging : texture->flushed_depth_texture;
	const struct util_format_description *desc =
		util_format_description(texture->resource.b.b.format);
	float depth;

	if (!staging && !texture->dirty_level_mask)
		return;

	max_sample = u_max_sample(&texture->resource.b.b);

	/* XXX Decompressing MSAA depth textures is broken on R6xx.
	 * There is also a hardlock if CMASK and FMASK are not present.
	 * Just skip this until we find out how to fix it. */
	if (rctx->b.chip_class == R600 && max_sample > 0) {
		texture->dirty_level_mask = 0;
		return;
	}

	if (rctx->b.family == CHIP_RV610 || rctx->b.family == CHIP_RV630 ||
	    rctx->b.family == CHIP_RV620 || rctx->b.family == CHIP_RV635)
		depth = 0.0f;
	else
		depth = 1.0f;

	/* Enable decompression in DB_RENDER_CONTROL */
	rctx->db_misc_state.flush_depthstencil_through_cb = true;
	rctx->db_misc_state.copy_depth = util_format_has_depth(desc);
	rctx->db_misc_state.copy_stencil = util_format_has_stencil(desc);
	rctx->db_misc_state.copy_sample = first_sample;
	r600_mark_atom_dirty(rctx, &rctx->db_misc_state.atom);

	for (level = first_level; level <= last_level; level++) {
		if (!staging && !(texture->dirty_level_mask & (1 << level)))
			continue;

		/* The smaller the mipmap level, the less layers there are
		 * as far as 3D textures are concerned. */
		max_layer = util_max_layer(&texture->resource.b.b, level);
		checked_last_layer = last_layer < max_layer ? last_layer : max_layer;

		for (layer = first_layer; layer <= checked_last_layer; layer++) {
			for (sample = first_sample; sample <= last_sample; sample++) {
				struct pipe_surface *zsurf, *cbsurf, surf_tmpl;

				if (sample != rctx->db_misc_state.copy_sample) {
					rctx->db_misc_state.copy_sample = sample;
					r600_mark_atom_dirty(rctx, &rctx->db_misc_state.atom);
				}

				surf_tmpl.format = texture->resource.b.b.format;
				surf_tmpl.u.tex.level = level;
				surf_tmpl.u.tex.first_layer = layer;
				surf_tmpl.u.tex.last_layer = layer;

				zsurf = ctx->create_surface(ctx, &texture->resource.b.b, &surf_tmpl);

				surf_tmpl.format = flushed_depth_texture->resource.b.b.format;
				cbsurf = ctx->create_surface(ctx,
						&flushed_depth_texture->resource.b.b, &surf_tmpl);

				r600_blitter_begin(ctx, R600_DECOMPRESS);
				util_blitter_custom_depth_stencil(rctx->blitter, zsurf, cbsurf, 1 << sample,
								  rctx->custom_dsa_flush, depth);
				r600_blitter_end(ctx);

				pipe_surface_reference(&zsurf, NULL);
				pipe_surface_reference(&cbsurf, NULL);
			}
		}

		/* The texture will always be dirty if some layers or samples aren't flushed.
		 * I don't think this case occurs often though. */
		if (!staging &&
		    first_layer == 0 && last_layer == max_layer &&
		    first_sample == 0 && last_sample == max_sample) {
			texture->dirty_level_mask &= ~(1 << level);
		}
	}

	/* reenable compression in DB_RENDER_CONTROL */
	rctx->db_misc_state.flush_depthstencil_through_cb = false;
	r600_mark_atom_dirty(rctx, &rctx->db_misc_state.atom);
}
Ejemplo n.º 6
0
void r600_begin_new_cs(struct r600_context *ctx)
{
	unsigned shader;

	if (ctx->is_debug) {
		uint32_t zero = 0;

		/* Create a buffer used for writing trace IDs and initialize it to 0. */
		assert(!ctx->trace_buf);
		ctx->trace_buf = (struct r600_resource*)
			pipe_buffer_create(ctx->b.b.screen, 0,
					   PIPE_USAGE_STAGING, 4);
		if (ctx->trace_buf)
			pipe_buffer_write_nooverlap(&ctx->b.b, &ctx->trace_buf->b.b,
						    0, sizeof(zero), &zero);
		ctx->trace_id = 0;
	}

	if (ctx->trace_buf)
		eg_trace_emit(ctx);

	ctx->b.flags = 0;
	ctx->b.gtt = 0;
	ctx->b.vram = 0;

	/* Begin a new CS. */
	r600_emit_command_buffer(ctx->b.gfx.cs, &ctx->start_cs_cmd);

	/* Re-emit states. */
	r600_mark_atom_dirty(ctx, &ctx->alphatest_state.atom);
	r600_mark_atom_dirty(ctx, &ctx->blend_color.atom);
	r600_mark_atom_dirty(ctx, &ctx->cb_misc_state.atom);
	r600_mark_atom_dirty(ctx, &ctx->clip_misc_state.atom);
	r600_mark_atom_dirty(ctx, &ctx->clip_state.atom);
	r600_mark_atom_dirty(ctx, &ctx->db_misc_state.atom);
	r600_mark_atom_dirty(ctx, &ctx->db_state.atom);
	r600_mark_atom_dirty(ctx, &ctx->framebuffer.atom);
	if (ctx->b.chip_class >= EVERGREEN) {
		r600_mark_atom_dirty(ctx, &ctx->fragment_images.atom);
		r600_mark_atom_dirty(ctx, &ctx->fragment_buffers.atom);
		r600_mark_atom_dirty(ctx, &ctx->compute_images.atom);
		r600_mark_atom_dirty(ctx, &ctx->compute_buffers.atom);
	}
	r600_mark_atom_dirty(ctx, &ctx->hw_shader_stages[R600_HW_STAGE_PS].atom);
	r600_mark_atom_dirty(ctx, &ctx->poly_offset_state.atom);
	r600_mark_atom_dirty(ctx, &ctx->vgt_state.atom);
	r600_mark_atom_dirty(ctx, &ctx->sample_mask.atom);
	ctx->b.scissors.dirty_mask = (1 << R600_MAX_VIEWPORTS) - 1;
	r600_mark_atom_dirty(ctx, &ctx->b.scissors.atom);
	ctx->b.viewports.dirty_mask = (1 << R600_MAX_VIEWPORTS) - 1;
	ctx->b.viewports.depth_range_dirty_mask = (1 << R600_MAX_VIEWPORTS) - 1;
	r600_mark_atom_dirty(ctx, &ctx->b.viewports.atom);
	if (ctx->b.chip_class <= EVERGREEN) {
		r600_mark_atom_dirty(ctx, &ctx->config_state.atom);
	}
	r600_mark_atom_dirty(ctx, &ctx->stencil_ref.atom);
	r600_mark_atom_dirty(ctx, &ctx->vertex_fetch_shader.atom);
	r600_mark_atom_dirty(ctx, &ctx->hw_shader_stages[R600_HW_STAGE_ES].atom);
	r600_mark_atom_dirty(ctx, &ctx->shader_stages.atom);
	if (ctx->gs_shader) {
		r600_mark_atom_dirty(ctx, &ctx->hw_shader_stages[R600_HW_STAGE_GS].atom);
		r600_mark_atom_dirty(ctx, &ctx->gs_rings.atom);
	}
	if (ctx->tes_shader) {
		r600_mark_atom_dirty(ctx, &ctx->hw_shader_stages[EG_HW_STAGE_HS].atom);
		r600_mark_atom_dirty(ctx, &ctx->hw_shader_stages[EG_HW_STAGE_LS].atom);
	}
	r600_mark_atom_dirty(ctx, &ctx->hw_shader_stages[R600_HW_STAGE_VS].atom);
	r600_mark_atom_dirty(ctx, &ctx->b.streamout.enable_atom);
	r600_mark_atom_dirty(ctx, &ctx->b.render_cond_atom);

	if (ctx->blend_state.cso)
		r600_mark_atom_dirty(ctx, &ctx->blend_state.atom);
	if (ctx->dsa_state.cso)
		r600_mark_atom_dirty(ctx, &ctx->dsa_state.atom);
	if (ctx->rasterizer_state.cso)
		r600_mark_atom_dirty(ctx, &ctx->rasterizer_state.atom);

	if (ctx->b.chip_class <= R700) {
		r600_mark_atom_dirty(ctx, &ctx->seamless_cube_map.atom);
	}

	ctx->vertex_buffer_state.dirty_mask = ctx->vertex_buffer_state.enabled_mask;
	r600_vertex_buffers_dirty(ctx);

	/* Re-emit shader resources. */
	for (shader = 0; shader < PIPE_SHADER_TYPES; shader++) {
		struct r600_constbuf_state *constbuf = &ctx->constbuf_state[shader];
		struct r600_textures_info *samplers = &ctx->samplers[shader];

		constbuf->dirty_mask = constbuf->enabled_mask;
		samplers->views.dirty_mask = samplers->views.enabled_mask;
		samplers->states.dirty_mask = samplers->states.enabled_mask;

		r600_constant_buffers_dirty(ctx, constbuf);
		r600_sampler_views_dirty(ctx, &samplers->views);
		r600_sampler_states_dirty(ctx, &samplers->states);
	}

	for (shader = 0; shader < ARRAY_SIZE(ctx->scratch_buffers); shader++) {
		ctx->scratch_buffers[shader].dirty = true;
	}

	r600_postflush_resume_features(&ctx->b);

	/* Re-emit the draw state. */
	ctx->last_primitive_type = -1;
	ctx->last_start_instance = -1;
	ctx->last_rast_prim      = -1;
	ctx->current_rast_prim   = -1;

	assert(!ctx->b.gfx.cs->prev_dw);
	ctx->b.initial_gfx_cs_size = ctx->b.gfx.cs->current.cdw;
}