Ejemplo n.º 1
0
void vi_dcc_clear_level(struct si_context *sctx,
			struct si_texture *tex,
			unsigned level, unsigned clear_value)
{
	struct pipe_resource *dcc_buffer;
	uint64_t dcc_offset, clear_size;

	assert(vi_dcc_enabled(tex, level));

	if (tex->dcc_separate_buffer) {
		dcc_buffer = &tex->dcc_separate_buffer->b.b;
		dcc_offset = 0;
	} else {
		dcc_buffer = &tex->buffer.b.b;
		dcc_offset = tex->dcc_offset;
	}

	if (sctx->chip_class >= GFX9) {
		/* Mipmap level clears aren't implemented. */
		assert(tex->buffer.b.b.last_level == 0);
		/* 4x and 8x MSAA needs a sophisticated compute shader for
		 * the clear. See AMDVLK. */
		assert(tex->buffer.b.b.nr_storage_samples <= 2);
		clear_size = tex->surface.dcc_size;
	} else {
		unsigned num_layers = util_num_layers(&tex->buffer.b.b, level);

		/* If this is 0, fast clear isn't possible. (can occur with MSAA) */
		assert(tex->surface.u.legacy.level[level].dcc_fast_clear_size);
		/* Layered 4x and 8x MSAA DCC fast clears need to clear
		 * dcc_fast_clear_size bytes for each layer. A compute shader
		 * would be more efficient than separate per-layer clear operations.
		 */
		assert(tex->buffer.b.b.nr_storage_samples <= 2 || num_layers == 1);

		dcc_offset += tex->surface.u.legacy.level[level].dcc_offset;
		clear_size = tex->surface.u.legacy.level[level].dcc_fast_clear_size *
			     num_layers;
	}

	si_clear_buffer(sctx, dcc_buffer, dcc_offset, clear_size,
			&clear_value, 4, SI_COHERENCY_CB_META, false);
}
Ejemplo n.º 2
0
static struct pipe_context *si_create_context(struct pipe_screen *screen,
                                              unsigned flags)
{
	struct si_context *sctx = CALLOC_STRUCT(si_context);
	struct si_screen* sscreen = (struct si_screen *)screen;
	struct radeon_winsys *ws = sscreen->ws;
	int shader, i;
	bool stop_exec_on_failure = (flags & PIPE_CONTEXT_LOSE_CONTEXT_ON_RESET) != 0;

	if (!sctx)
		return NULL;

	sctx->has_graphics = sscreen->info.chip_class == SI ||
			     !(flags & PIPE_CONTEXT_COMPUTE_ONLY);

	if (flags & PIPE_CONTEXT_DEBUG)
		sscreen->record_llvm_ir = true; /* racy but not critical */

	sctx->b.screen = screen; /* this must be set first */
	sctx->b.priv = NULL;
	sctx->b.destroy = si_destroy_context;
	sctx->screen = sscreen; /* Easy accessing of screen/winsys. */
	sctx->is_debug = (flags & PIPE_CONTEXT_DEBUG) != 0;

	slab_create_child(&sctx->pool_transfers, &sscreen->pool_transfers);
	slab_create_child(&sctx->pool_transfers_unsync, &sscreen->pool_transfers);

	sctx->ws = sscreen->ws;
	sctx->family = sscreen->info.family;
	sctx->chip_class = sscreen->info.chip_class;

	if (sscreen->info.has_gpu_reset_counter_query) {
		sctx->gpu_reset_counter =
			sctx->ws->query_value(sctx->ws, RADEON_GPU_RESET_COUNTER);
	}


	if (sctx->chip_class == CIK ||
	    sctx->chip_class == VI ||
	    sctx->chip_class == GFX9) {
		sctx->eop_bug_scratch = si_resource(
			pipe_buffer_create(&sscreen->b, 0, PIPE_USAGE_DEFAULT,
					   16 * sscreen->info.num_render_backends));
		if (!sctx->eop_bug_scratch)
			goto fail;
	}

	/* Initialize context allocators. */
	sctx->allocator_zeroed_memory =
		u_suballocator_create(&sctx->b, 128 * 1024,
				      0, PIPE_USAGE_DEFAULT,
				      SI_RESOURCE_FLAG_UNMAPPABLE |
				      SI_RESOURCE_FLAG_CLEAR, false);
	if (!sctx->allocator_zeroed_memory)
		goto fail;

	sctx->b.stream_uploader = u_upload_create(&sctx->b, 1024 * 1024,
						    0, PIPE_USAGE_STREAM,
						    SI_RESOURCE_FLAG_READ_ONLY);
	if (!sctx->b.stream_uploader)
		goto fail;

	sctx->cached_gtt_allocator = u_upload_create(&sctx->b, 16 * 1024,
						       0, PIPE_USAGE_STAGING, 0);
	if (!sctx->cached_gtt_allocator)
		goto fail;

	sctx->ctx = sctx->ws->ctx_create(sctx->ws);
	if (!sctx->ctx)
		goto fail;

	if (sscreen->info.num_sdma_rings && !(sscreen->debug_flags & DBG(NO_ASYNC_DMA))) {
		sctx->dma_cs = sctx->ws->cs_create(sctx->ctx, RING_DMA,
						   (void*)si_flush_dma_cs,
						   sctx, stop_exec_on_failure);
	}

	bool use_sdma_upload = sscreen->info.has_dedicated_vram && sctx->dma_cs;
	sctx->b.const_uploader = u_upload_create(&sctx->b, 256 * 1024,
						 0, PIPE_USAGE_DEFAULT,
						 SI_RESOURCE_FLAG_32BIT |
						 (use_sdma_upload ?
							  SI_RESOURCE_FLAG_UPLOAD_FLUSH_EXPLICIT_VIA_SDMA :
							  (sscreen->cpdma_prefetch_writes_memory ?
								   0 : SI_RESOURCE_FLAG_READ_ONLY)));
	if (!sctx->b.const_uploader)
		goto fail;

	if (use_sdma_upload)
		u_upload_enable_flush_explicit(sctx->b.const_uploader);

	sctx->gfx_cs = ws->cs_create(sctx->ctx,
				     sctx->has_graphics ? RING_GFX : RING_COMPUTE,
				     (void*)si_flush_gfx_cs, sctx, stop_exec_on_failure);

	/* Border colors. */
	sctx->border_color_table = malloc(SI_MAX_BORDER_COLORS *
					  sizeof(*sctx->border_color_table));
	if (!sctx->border_color_table)
		goto fail;

	sctx->border_color_buffer = si_resource(
		pipe_buffer_create(screen, 0, PIPE_USAGE_DEFAULT,
				   SI_MAX_BORDER_COLORS *
				   sizeof(*sctx->border_color_table)));
	if (!sctx->border_color_buffer)
		goto fail;

	sctx->border_color_map =
		ws->buffer_map(sctx->border_color_buffer->buf,
			       NULL, PIPE_TRANSFER_WRITE);
	if (!sctx->border_color_map)
		goto fail;

	/* Initialize context functions used by graphics and compute. */
	sctx->b.emit_string_marker = si_emit_string_marker;
	sctx->b.set_debug_callback = si_set_debug_callback;
	sctx->b.set_log_context = si_set_log_context;
	sctx->b.set_context_param = si_set_context_param;
	sctx->b.get_device_reset_status = si_get_reset_status;
	sctx->b.set_device_reset_callback = si_set_device_reset_callback;

	si_init_all_descriptors(sctx);
	si_init_buffer_functions(sctx);
	si_init_clear_functions(sctx);
	si_init_blit_functions(sctx);
	si_init_compute_functions(sctx);
	si_init_compute_blit_functions(sctx);
	si_init_debug_functions(sctx);
	si_init_fence_functions(sctx);
	si_init_state_compute_functions(sctx);

	if (sscreen->debug_flags & DBG(FORCE_DMA))
		sctx->b.resource_copy_region = sctx->dma_copy;

	/* Initialize graphics-only context functions. */
	if (sctx->has_graphics) {
		si_init_context_texture_functions(sctx);
		si_init_query_functions(sctx);
		si_init_msaa_functions(sctx);
		si_init_shader_functions(sctx);
		si_init_state_functions(sctx);
		si_init_streamout_functions(sctx);
		si_init_viewport_functions(sctx);

		sctx->blitter = util_blitter_create(&sctx->b);
		if (sctx->blitter == NULL)
			goto fail;
		sctx->blitter->skip_viewport_restore = true;

		si_init_draw_functions(sctx);
	}

	/* Initialize SDMA functions. */
	if (sctx->chip_class >= CIK)
		cik_init_sdma_functions(sctx);
	else
		si_init_dma_functions(sctx);

	sctx->sample_mask = 0xffff;

	/* Initialize multimedia functions. */
	if (sscreen->info.has_hw_decode) {
		sctx->b.create_video_codec = si_uvd_create_decoder;
		sctx->b.create_video_buffer = si_video_buffer_create;
	} else {
		sctx->b.create_video_codec = vl_create_decoder;
		sctx->b.create_video_buffer = vl_video_buffer_create;
	}

	if (sctx->chip_class >= GFX9) {
		sctx->wait_mem_scratch = si_resource(
			pipe_buffer_create(screen, 0, PIPE_USAGE_DEFAULT, 4));
		if (!sctx->wait_mem_scratch)
			goto fail;

		/* Initialize the memory. */
		si_cp_write_data(sctx, sctx->wait_mem_scratch, 0, 4,
				 V_370_MEM, V_370_ME, &sctx->wait_mem_number);
	}

	/* CIK cannot unbind a constant buffer (S_BUFFER_LOAD doesn't skip loads
	 * if NUM_RECORDS == 0). We need to use a dummy buffer instead. */
	if (sctx->chip_class == CIK) {
		sctx->null_const_buf.buffer =
			pipe_aligned_buffer_create(screen,
						   SI_RESOURCE_FLAG_32BIT,
						   PIPE_USAGE_DEFAULT, 16,
						   sctx->screen->info.tcc_cache_line_size);
		if (!sctx->null_const_buf.buffer)
			goto fail;
		sctx->null_const_buf.buffer_size = sctx->null_const_buf.buffer->width0;

		unsigned start_shader = sctx->has_graphics ? 0 :  PIPE_SHADER_COMPUTE;
		for (shader = start_shader; shader < SI_NUM_SHADERS; shader++) {
			for (i = 0; i < SI_NUM_CONST_BUFFERS; i++) {
				sctx->b.set_constant_buffer(&sctx->b, shader, i,
							      &sctx->null_const_buf);
			}
		}

		si_set_rw_buffer(sctx, SI_HS_CONST_DEFAULT_TESS_LEVELS,
				 &sctx->null_const_buf);
		si_set_rw_buffer(sctx, SI_VS_CONST_INSTANCE_DIVISORS,
				 &sctx->null_const_buf);
		si_set_rw_buffer(sctx, SI_VS_CONST_CLIP_PLANES,
				 &sctx->null_const_buf);
		si_set_rw_buffer(sctx, SI_PS_CONST_POLY_STIPPLE,
				 &sctx->null_const_buf);
		si_set_rw_buffer(sctx, SI_PS_CONST_SAMPLE_POSITIONS,
				 &sctx->null_const_buf);
	}

	uint64_t max_threads_per_block;
	screen->get_compute_param(screen, PIPE_SHADER_IR_TGSI,
				  PIPE_COMPUTE_CAP_MAX_THREADS_PER_BLOCK,
				  &max_threads_per_block);

	/* The maximum number of scratch waves. Scratch space isn't divided
	 * evenly between CUs. The number is only a function of the number of CUs.
	 * We can decrease the constant to decrease the scratch buffer size.
	 *
	 * sctx->scratch_waves must be >= the maximum posible size of
	 * 1 threadgroup, so that the hw doesn't hang from being unable
	 * to start any.
	 *
	 * The recommended value is 4 per CU at most. Higher numbers don't
	 * bring much benefit, but they still occupy chip resources (think
	 * async compute). I've seen ~2% performance difference between 4 and 32.
	 */
	sctx->scratch_waves = MAX2(32 * sscreen->info.num_good_compute_units,
				   max_threads_per_block / 64);

	si_init_compiler(sscreen, &sctx->compiler);

	/* Bindless handles. */
	sctx->tex_handles = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
						    _mesa_key_pointer_equal);
	sctx->img_handles = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
						    _mesa_key_pointer_equal);

	util_dynarray_init(&sctx->resident_tex_handles, NULL);
	util_dynarray_init(&sctx->resident_img_handles, NULL);
	util_dynarray_init(&sctx->resident_tex_needs_color_decompress, NULL);
	util_dynarray_init(&sctx->resident_img_needs_color_decompress, NULL);
	util_dynarray_init(&sctx->resident_tex_needs_depth_decompress, NULL);

	sctx->sample_pos_buffer =
		pipe_buffer_create(sctx->b.screen, 0, PIPE_USAGE_DEFAULT,
				   sizeof(sctx->sample_positions));
	pipe_buffer_write(&sctx->b, sctx->sample_pos_buffer, 0,
			  sizeof(sctx->sample_positions), &sctx->sample_positions);

	/* this must be last */
	si_begin_new_gfx_cs(sctx);

	if (sctx->chip_class == CIK) {
		/* Clear the NULL constant buffer, because loads should return zeros.
		 * Note that this forces CP DMA to be used, because clover deadlocks
		 * for some reason when the compute codepath is used.
		 */
		uint32_t clear_value = 0;
		si_clear_buffer(sctx, sctx->null_const_buf.buffer, 0,
				sctx->null_const_buf.buffer->width0,
				&clear_value, 4, SI_COHERENCY_SHADER, true);
	}
	return &sctx->b;
fail:
	fprintf(stderr, "radeonsi: Failed to create a context.\n");
	si_destroy_context(&sctx->b);
	return NULL;
}
Ejemplo n.º 3
0
static void si_do_fast_color_clear(struct si_context *sctx,
				   unsigned *buffers,
				   const union pipe_color_union *color)
{
	struct pipe_framebuffer_state *fb = &sctx->framebuffer.state;
	int i;

	/* This function is broken in BE, so just disable this path for now */
#ifdef PIPE_ARCH_BIG_ENDIAN
	return;
#endif

	if (sctx->render_cond)
		return;

	for (i = 0; i < fb->nr_cbufs; i++) {
		struct si_texture *tex;
		unsigned clear_bit = PIPE_CLEAR_COLOR0 << i;

		if (!fb->cbufs[i])
			continue;

		/* if this colorbuffer is not being cleared */
		if (!(*buffers & clear_bit))
			continue;

		unsigned level = fb->cbufs[i]->u.tex.level;
		if (level > 0)
			continue;

		tex = (struct si_texture *)fb->cbufs[i]->texture;

		/* TODO: GFX9: Implement DCC fast clear for level 0 of
		 * mipmapped textures. Mipmapped DCC has to clear a rectangular
		 * area of DCC for level 0 (because the whole miptree is
		 * organized in a 2D plane).
		 */
		if (sctx->chip_class >= GFX9 &&
		    tex->buffer.b.b.last_level > 0)
			continue;

		/* the clear is allowed if all layers are bound */
		if (fb->cbufs[i]->u.tex.first_layer != 0 ||
		    fb->cbufs[i]->u.tex.last_layer != util_max_layer(&tex->buffer.b.b, 0)) {
			continue;
		}

		/* only supported on tiled surfaces */
		if (tex->surface.is_linear) {
			continue;
		}

		/* shared textures can't use fast clear without an explicit flush,
		 * because there is no way to communicate the clear color among
		 * all clients
		 */
		if (tex->buffer.b.is_shared &&
		    !(tex->buffer.external_usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH))
			continue;

		if (sctx->chip_class <= GFX8 &&
		    tex->surface.u.legacy.level[0].mode == RADEON_SURF_MODE_1D &&
		    !sctx->screen->info.htile_cmask_support_1d_tiling)
			continue;

		/* Use a slow clear for small surfaces where the cost of
		 * the eliminate pass can be higher than the benefit of fast
		 * clear. The closed driver does this, but the numbers may differ.
		 *
		 * This helps on both dGPUs and APUs, even small APUs like Mullins.
		 */
		bool too_small = tex->buffer.b.b.nr_samples <= 1 &&
				 tex->buffer.b.b.width0 *
				 tex->buffer.b.b.height0 <= 512 * 512;
		bool eliminate_needed = false;
		bool fmask_decompress_needed = false;

		/* Fast clear is the most appropriate place to enable DCC for
		 * displayable surfaces.
		 */
		if (sctx->family == CHIP_STONEY && !too_small) {
			vi_separate_dcc_try_enable(sctx, tex);

			/* RB+ isn't supported with a CMASK clear only on Stoney,
			 * so all clears are considered to be hypothetically slow
			 * clears, which is weighed when determining whether to
			 * enable separate DCC.
			 */
			if (tex->dcc_gather_statistics) /* only for Stoney */
				tex->num_slow_clears++;
		}

		/* Try to clear DCC first, otherwise try CMASK. */
		if (vi_dcc_enabled(tex, 0)) {
			uint32_t reset_value;

			if (sctx->screen->debug_flags & DBG(NO_DCC_CLEAR))
				continue;

			/* This can happen with mipmapping or MSAA. */
			if (sctx->chip_class == GFX8 &&
			    !tex->surface.u.legacy.level[level].dcc_fast_clear_size)
				continue;

			if (!vi_get_fast_clear_parameters(tex->buffer.b.b.format,
							  fb->cbufs[i]->format,
							  color, &reset_value,
							  &eliminate_needed))
				continue;

			if (eliminate_needed && too_small)
				continue;

			/* DCC fast clear with MSAA should clear CMASK to 0xC. */
			if (tex->buffer.b.b.nr_samples >= 2 && tex->cmask_buffer) {
				/* TODO: This doesn't work with MSAA. */
				if (eliminate_needed)
					continue;

				uint32_t clear_value = 0xCCCCCCCC;
				si_clear_buffer(sctx, &tex->cmask_buffer->b.b,
						tex->cmask_offset, tex->surface.cmask_size,
						&clear_value, 4, SI_COHERENCY_CB_META, false);
				fmask_decompress_needed = true;
			}

			vi_dcc_clear_level(sctx, tex, 0, reset_value);
			tex->separate_dcc_dirty = true;
		} else {
			if (too_small)
				continue;

			/* 128-bit formats are unusupported */
			if (tex->surface.bpe > 8) {
				continue;
			}

			/* RB+ doesn't work with CMASK fast clear on Stoney. */
			if (sctx->family == CHIP_STONEY)
				continue;

			/* ensure CMASK is enabled */
			si_alloc_separate_cmask(sctx->screen, tex);
			if (!tex->cmask_buffer)
				continue;

			/* Do the fast clear. */
			uint32_t clear_value = 0;
			si_clear_buffer(sctx, &tex->cmask_buffer->b.b,
					tex->cmask_offset, tex->surface.cmask_size,
					&clear_value, 4, SI_COHERENCY_CB_META, false);
			eliminate_needed = true;
		}

		if ((eliminate_needed || fmask_decompress_needed) &&
		    !(tex->dirty_level_mask & (1 << level))) {
			tex->dirty_level_mask |= 1 << level;
			p_atomic_inc(&sctx->screen->compressed_colortex_counter);
		}

		/* We can change the micro tile mode before a full clear. */
		si_set_optimal_micro_tile_mode(sctx->screen, tex);

		*buffers &= ~clear_bit;

		/* Chips with DCC constant encoding don't need to set the clear
		 * color registers for DCC clear values 0 and 1.
		 */
		if (sctx->screen->has_dcc_constant_encode && !eliminate_needed)
			continue;

		if (si_set_clear_color(tex, fb->cbufs[i]->format, color)) {
			sctx->framebuffer.dirty_cbufs |= 1 << i;
			si_mark_atom_dirty(sctx, &sctx->atoms.s.framebuffer);
		}
	}
}