Пример #1
0
static void si_dump_shader(struct si_screen *sscreen,
			   struct si_shader_ctx_state *state, FILE *f)
{
	if (!state->cso || !state->current)
		return;

	si_dump_shader_key(state->cso->type, &state->current->key, f);
	si_shader_dump(sscreen, state->current, NULL,
		       state->cso->info.processor, f);
}
Пример #2
0
static void *si_create_compute_state(
	struct pipe_context *ctx,
	const struct pipe_compute_state *cso)
{
	struct si_context *sctx = (struct si_context *)ctx;
	struct si_screen *sscreen = (struct si_screen *)ctx->screen;
	struct si_compute *program = CALLOC_STRUCT(si_compute);
	struct si_shader *shader = &program->shader;


	program->ir_type = cso->ir_type;
	program->local_size = cso->req_local_mem;
	program->private_size = cso->req_private_mem;
	program->input_size = cso->req_input_mem;
	program->use_code_object_v2 = HAVE_LLVM >= 0x0400 &&
					cso->ir_type == PIPE_SHADER_IR_NATIVE;


	if (cso->ir_type == PIPE_SHADER_IR_TGSI) {
		struct si_shader_selector sel;
		bool scratch_enabled;

		memset(&sel, 0, sizeof(sel));

		sel.tokens = tgsi_dup_tokens(cso->prog);
		if (!sel.tokens) {
			FREE(program);
			return NULL;
		}

		tgsi_scan_shader(cso->prog, &sel.info);
		sel.type = PIPE_SHADER_COMPUTE;
		sel.local_size = cso->req_local_mem;

		p_atomic_inc(&sscreen->b.num_shaders_created);

		program->shader.selector = &sel;

		if (si_shader_create(sscreen, sctx->tm, &program->shader,
		                     &sctx->b.debug)) {
			FREE(sel.tokens);
			FREE(program);
			return NULL;
		}

		scratch_enabled = shader->config.scratch_bytes_per_wave > 0;

		shader->config.rsrc1 =
			   S_00B848_VGPRS((shader->config.num_vgprs - 1) / 4) |
			   S_00B848_SGPRS((shader->config.num_sgprs - 1) / 8) |
			   S_00B848_DX10_CLAMP(1) |
			   S_00B848_FLOAT_MODE(shader->config.float_mode);

		shader->config.rsrc2 = S_00B84C_USER_SGPR(SI_CS_NUM_USER_SGPR) |
			   S_00B84C_SCRATCH_EN(scratch_enabled) |
			   S_00B84C_TGID_X_EN(1) | S_00B84C_TGID_Y_EN(1) |
			   S_00B84C_TGID_Z_EN(1) | S_00B84C_TIDIG_COMP_CNT(2) |
			   S_00B84C_LDS_SIZE(shader->config.lds_size);

		FREE(sel.tokens);
	} else {
		const struct pipe_llvm_program_header *header;
		const char *code;
		header = cso->prog;
		code = cso->prog + sizeof(struct pipe_llvm_program_header);

		radeon_elf_read(code, header->num_bytes, &program->shader.binary);
		if (program->use_code_object_v2) {
			const amd_kernel_code_t *code_object =
				si_compute_get_code_object(program, 0);
			code_object_to_config(code_object, &program->shader.config);
		} else {
			si_shader_binary_read_config(&program->shader.binary,
				     &program->shader.config, 0);
		}
		si_shader_dump(sctx->screen, &program->shader, &sctx->b.debug,
			       PIPE_SHADER_COMPUTE, stderr);
		si_shader_binary_upload(sctx->screen, &program->shader);
	}

	return program;
}
Пример #3
0
static void *si_create_compute_state(
	struct pipe_context *ctx,
	const struct pipe_compute_state *cso)
{
	struct si_context *sctx = (struct si_context *)ctx;
	struct si_screen *sscreen = (struct si_screen *)ctx->screen;
	struct si_compute *program = CALLOC_STRUCT(si_compute);

	pipe_reference_init(&program->reference, 1);
	program->screen = (struct si_screen *)ctx->screen;
	program->ir_type = cso->ir_type;
	program->local_size = cso->req_local_mem;
	program->private_size = cso->req_private_mem;
	program->input_size = cso->req_input_mem;
	program->use_code_object_v2 = cso->ir_type == PIPE_SHADER_IR_NATIVE;

	if (cso->ir_type != PIPE_SHADER_IR_NATIVE) {
		if (cso->ir_type == PIPE_SHADER_IR_TGSI) {
			program->ir.tgsi = tgsi_dup_tokens(cso->prog);
			if (!program->ir.tgsi) {
				FREE(program);
				return NULL;
			}
		} else {
			assert(cso->ir_type == PIPE_SHADER_IR_NIR);
			program->ir.nir = (struct nir_shader *) cso->prog;
		}

		program->compiler_ctx_state.debug = sctx->debug;
		program->compiler_ctx_state.is_debug_context = sctx->is_debug;
		p_atomic_inc(&sscreen->num_shaders_created);
		util_queue_fence_init(&program->ready);

		struct util_async_debug_callback async_debug;
		bool wait =
			(sctx->debug.debug_message && !sctx->debug.async) ||
			sctx->is_debug ||
			si_can_dump_shader(sscreen, PIPE_SHADER_COMPUTE);

		if (wait) {
			u_async_debug_init(&async_debug);
			program->compiler_ctx_state.debug = async_debug.base;
		}

		util_queue_add_job(&sscreen->shader_compiler_queue,
				   program, &program->ready,
				   si_create_compute_state_async, NULL);

		if (wait) {
			util_queue_fence_wait(&program->ready);
			u_async_debug_drain(&async_debug, &sctx->debug);
			u_async_debug_cleanup(&async_debug);
		}
	} else {
		const struct pipe_llvm_program_header *header;
		const char *code;
		header = cso->prog;
		code = cso->prog + sizeof(struct pipe_llvm_program_header);

		ac_elf_read(code, header->num_bytes, &program->shader.binary);
		if (program->use_code_object_v2) {
			const amd_kernel_code_t *code_object =
				si_compute_get_code_object(program, 0);
			code_object_to_config(code_object, &program->shader.config);
		} else {
			si_shader_binary_read_config(&program->shader.binary,
				     &program->shader.config, 0);
		}
		si_shader_dump(sctx->screen, &program->shader, &sctx->debug,
			       PIPE_SHADER_COMPUTE, stderr, true);
		if (si_shader_binary_upload(sctx->screen, &program->shader) < 0) {
			fprintf(stderr, "LLVM failed to upload shader\n");
			FREE(program);
			return NULL;
		}
	}

	return program;
}
Пример #4
0
/* Asynchronous compute shader compilation. */
static void si_create_compute_state_async(void *job, int thread_index)
{
	struct si_compute *program = (struct si_compute *)job;
	struct si_shader *shader = &program->shader;
	struct si_shader_selector sel;
	struct ac_llvm_compiler *compiler;
	struct pipe_debug_callback *debug = &program->compiler_ctx_state.debug;
	struct si_screen *sscreen = program->screen;

	assert(!debug->debug_message || debug->async);
	assert(thread_index >= 0);
	assert(thread_index < ARRAY_SIZE(sscreen->compiler));
	compiler = &sscreen->compiler[thread_index];

	memset(&sel, 0, sizeof(sel));

	sel.screen = sscreen;

	if (program->ir_type == PIPE_SHADER_IR_TGSI) {
		tgsi_scan_shader(program->ir.tgsi, &sel.info);
		sel.tokens = program->ir.tgsi;
	} else {
		assert(program->ir_type == PIPE_SHADER_IR_NIR);
		sel.nir = program->ir.nir;

		si_nir_scan_shader(sel.nir, &sel.info);
		si_lower_nir(&sel);
	}

	/* Store the declared LDS size into tgsi_shader_info for the shader
	 * cache to include it.
	 */
	sel.info.properties[TGSI_PROPERTY_CS_LOCAL_SIZE] = program->local_size;

	sel.type = PIPE_SHADER_COMPUTE;
	si_get_active_slot_masks(&sel.info,
				 &program->active_const_and_shader_buffers,
				 &program->active_samplers_and_images);

	program->shader.selector = &sel;
	program->shader.is_monolithic = true;
	program->uses_grid_size = sel.info.uses_grid_size;
	program->uses_bindless_samplers = sel.info.uses_bindless_samplers;
	program->uses_bindless_images = sel.info.uses_bindless_images;
	program->reads_variable_block_size =
		sel.info.uses_block_size &&
		sel.info.properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] == 0;
	program->num_cs_user_data_dwords =
		sel.info.properties[TGSI_PROPERTY_CS_USER_DATA_DWORDS];

	void *ir_binary = si_get_ir_binary(&sel);

	/* Try to load the shader from the shader cache. */
	mtx_lock(&sscreen->shader_cache_mutex);

	if (ir_binary &&
	    si_shader_cache_load_shader(sscreen, ir_binary, shader)) {
		mtx_unlock(&sscreen->shader_cache_mutex);

		si_shader_dump_stats_for_shader_db(shader, debug);
		si_shader_dump(sscreen, shader, debug, PIPE_SHADER_COMPUTE,
			       stderr, true);

		if (si_shader_binary_upload(sscreen, shader))
			program->shader.compilation_failed = true;
	} else {
		mtx_unlock(&sscreen->shader_cache_mutex);

		if (si_shader_create(sscreen, compiler, &program->shader, debug)) {
			program->shader.compilation_failed = true;

			if (program->ir_type == PIPE_SHADER_IR_TGSI)
				FREE(program->ir.tgsi);
			program->shader.selector = NULL;
			return;
		}

		bool scratch_enabled = shader->config.scratch_bytes_per_wave > 0;
		unsigned user_sgprs = SI_NUM_RESOURCE_SGPRS +
				      (sel.info.uses_grid_size ? 3 : 0) +
				      (program->reads_variable_block_size ? 3 : 0) +
				      program->num_cs_user_data_dwords;

		shader->config.rsrc1 =
			S_00B848_VGPRS((shader->config.num_vgprs - 1) / 4) |
			S_00B848_SGPRS((shader->config.num_sgprs - 1) / 8) |
			S_00B848_DX10_CLAMP(1) |
			S_00B848_FLOAT_MODE(shader->config.float_mode);

		shader->config.rsrc2 =
			S_00B84C_USER_SGPR(user_sgprs) |
			S_00B84C_SCRATCH_EN(scratch_enabled) |
			S_00B84C_TGID_X_EN(sel.info.uses_block_id[0]) |
			S_00B84C_TGID_Y_EN(sel.info.uses_block_id[1]) |
			S_00B84C_TGID_Z_EN(sel.info.uses_block_id[2]) |
			S_00B84C_TIDIG_COMP_CNT(sel.info.uses_thread_id[2] ? 2 :
						sel.info.uses_thread_id[1] ? 1 : 0) |
			S_00B84C_LDS_SIZE(shader->config.lds_size);

		if (ir_binary) {
			mtx_lock(&sscreen->shader_cache_mutex);
			if (!si_shader_cache_insert_shader(sscreen, ir_binary, shader, true))
				FREE(ir_binary);
			mtx_unlock(&sscreen->shader_cache_mutex);
		}
	}

	if (program->ir_type == PIPE_SHADER_IR_TGSI)
		FREE(program->ir.tgsi);

	program->shader.selector = NULL;
}
Пример #5
0
static void *si_create_compute_state(
	struct pipe_context *ctx,
	const struct pipe_compute_state *cso)
{
	struct si_context *sctx = (struct si_context *)ctx;
	struct si_screen *sscreen = (struct si_screen *)ctx->screen;
	struct si_compute *program = CALLOC_STRUCT(si_compute);

	pipe_reference_init(&program->reference, 1);
	program->screen = (struct si_screen *)ctx->screen;
	program->ir_type = cso->ir_type;
	program->local_size = cso->req_local_mem;
	program->private_size = cso->req_private_mem;
	program->input_size = cso->req_input_mem;
	program->use_code_object_v2 = cso->ir_type == PIPE_SHADER_IR_NATIVE;

	if (cso->ir_type != PIPE_SHADER_IR_NATIVE) {
		if (cso->ir_type == PIPE_SHADER_IR_TGSI) {
			program->ir.tgsi = tgsi_dup_tokens(cso->prog);
			if (!program->ir.tgsi) {
				FREE(program);
				return NULL;
			}
		} else {
			assert(cso->ir_type == PIPE_SHADER_IR_NIR);
			program->ir.nir = (struct nir_shader *) cso->prog;
		}

		program->compiler_ctx_state.debug = sctx->debug;
		program->compiler_ctx_state.is_debug_context = sctx->is_debug;
		p_atomic_inc(&sscreen->num_shaders_created);

		si_schedule_initial_compile(sctx, PIPE_SHADER_COMPUTE,
					    &program->ready,
					    &program->compiler_ctx_state,
					    program, si_create_compute_state_async);
	} else {
		const struct pipe_llvm_program_header *header;
		const char *code;
		header = cso->prog;
		code = cso->prog + sizeof(struct pipe_llvm_program_header);

		ac_elf_read(code, header->num_bytes, &program->shader.binary);
		if (program->use_code_object_v2) {
			const amd_kernel_code_t *code_object =
				si_compute_get_code_object(program, 0);
			code_object_to_config(code_object, &program->shader.config);
			if (program->shader.binary.reloc_count != 0) {
				fprintf(stderr, "Error: %d unsupported relocations\n",
					program->shader.binary.reloc_count);
				FREE(program);
				return NULL;
			}
		} else {
			si_shader_binary_read_config(&program->shader.binary,
				     &program->shader.config, 0);
		}
		si_shader_dump(sctx->screen, &program->shader, &sctx->debug,
			       PIPE_SHADER_COMPUTE, stderr, true);
		if (si_shader_binary_upload(sctx->screen, &program->shader) < 0) {
			fprintf(stderr, "LLVM failed to upload shader\n");
			FREE(program);
			return NULL;
		}
	}

	return program;
}
Пример #6
0
static void *si_create_compute_state(
	struct pipe_context *ctx,
	const struct pipe_compute_state *cso)
{
	struct si_context *sctx = (struct si_context *)ctx;
	struct si_screen *sscreen = (struct si_screen *)ctx->screen;
	struct si_compute *program = CALLOC_STRUCT(si_compute);

	program->screen = (struct si_screen *)ctx->screen;
	program->ir_type = cso->ir_type;
	program->local_size = cso->req_local_mem;
	program->private_size = cso->req_private_mem;
	program->input_size = cso->req_input_mem;
	program->use_code_object_v2 = HAVE_LLVM >= 0x0400 &&
					cso->ir_type == PIPE_SHADER_IR_NATIVE;

	if (cso->ir_type == PIPE_SHADER_IR_TGSI) {
		program->tokens = tgsi_dup_tokens(cso->prog);
		if (!program->tokens) {
			FREE(program);
			return NULL;
		}

		program->compiler_ctx_state.tm = sctx->tm;
		program->compiler_ctx_state.debug = sctx->b.debug;
		program->compiler_ctx_state.is_debug_context = sctx->is_debug;
		p_atomic_inc(&sscreen->b.num_shaders_created);
		util_queue_fence_init(&program->ready);

		if ((sctx->b.debug.debug_message && !sctx->b.debug.async) ||
		    sctx->is_debug ||
		    r600_can_dump_shader(&sscreen->b, PIPE_SHADER_COMPUTE))
			si_create_compute_state_async(program, -1);
		else
			util_queue_add_job(&sscreen->shader_compiler_queue,
					   program, &program->ready,
					   si_create_compute_state_async, NULL);
	} else {
		const struct pipe_llvm_program_header *header;
		const char *code;
		header = cso->prog;
		code = cso->prog + sizeof(struct pipe_llvm_program_header);

		ac_elf_read(code, header->num_bytes, &program->shader.binary);
		if (program->use_code_object_v2) {
			const amd_kernel_code_t *code_object =
				si_compute_get_code_object(program, 0);
			code_object_to_config(code_object, &program->shader.config);
		} else {
			si_shader_binary_read_config(&program->shader.binary,
				     &program->shader.config, 0);
		}
		si_shader_dump(sctx->screen, &program->shader, &sctx->b.debug,
			       PIPE_SHADER_COMPUTE, stderr, true);
		if (si_shader_binary_upload(sctx->screen, &program->shader) < 0) {
			fprintf(stderr, "LLVM failed to upload shader\n");
			FREE(program);
			return NULL;
		}
	}

	return program;
}