Пример #1
0
static void *si_create_compute_state(
	struct pipe_context *ctx,
	const struct pipe_compute_state *cso)
{
	struct si_context *sctx = (struct si_context *)ctx;
	struct si_screen *sscreen = (struct si_screen *)ctx->screen;
	struct si_compute *program = CALLOC_STRUCT(si_compute);
	struct si_shader *shader = &program->shader;


	program->ir_type = cso->ir_type;
	program->local_size = cso->req_local_mem;
	program->private_size = cso->req_private_mem;
	program->input_size = cso->req_input_mem;
	program->use_code_object_v2 = HAVE_LLVM >= 0x0400 &&
					cso->ir_type == PIPE_SHADER_IR_NATIVE;


	if (cso->ir_type == PIPE_SHADER_IR_TGSI) {
		struct si_shader_selector sel;
		bool scratch_enabled;

		memset(&sel, 0, sizeof(sel));

		sel.tokens = tgsi_dup_tokens(cso->prog);
		if (!sel.tokens) {
			FREE(program);
			return NULL;
		}

		tgsi_scan_shader(cso->prog, &sel.info);
		sel.type = PIPE_SHADER_COMPUTE;
		sel.local_size = cso->req_local_mem;

		p_atomic_inc(&sscreen->b.num_shaders_created);

		program->shader.selector = &sel;

		if (si_shader_create(sscreen, sctx->tm, &program->shader,
		                     &sctx->b.debug)) {
			FREE(sel.tokens);
			FREE(program);
			return NULL;
		}

		scratch_enabled = shader->config.scratch_bytes_per_wave > 0;

		shader->config.rsrc1 =
			   S_00B848_VGPRS((shader->config.num_vgprs - 1) / 4) |
			   S_00B848_SGPRS((shader->config.num_sgprs - 1) / 8) |
			   S_00B848_DX10_CLAMP(1) |
			   S_00B848_FLOAT_MODE(shader->config.float_mode);

		shader->config.rsrc2 = S_00B84C_USER_SGPR(SI_CS_NUM_USER_SGPR) |
			   S_00B84C_SCRATCH_EN(scratch_enabled) |
			   S_00B84C_TGID_X_EN(1) | S_00B84C_TGID_Y_EN(1) |
			   S_00B84C_TGID_Z_EN(1) | S_00B84C_TIDIG_COMP_CNT(2) |
			   S_00B84C_LDS_SIZE(shader->config.lds_size);

		FREE(sel.tokens);
	} else {
		const struct pipe_llvm_program_header *header;
		const char *code;
		header = cso->prog;
		code = cso->prog + sizeof(struct pipe_llvm_program_header);

		radeon_elf_read(code, header->num_bytes, &program->shader.binary);
		if (program->use_code_object_v2) {
			const amd_kernel_code_t *code_object =
				si_compute_get_code_object(program, 0);
			code_object_to_config(code_object, &program->shader.config);
		} else {
			si_shader_binary_read_config(&program->shader.binary,
				     &program->shader.config, 0);
		}
		si_shader_dump(sctx->screen, &program->shader, &sctx->b.debug,
			       PIPE_SHADER_COMPUTE, stderr);
		si_shader_binary_upload(sctx->screen, &program->shader);
	}

	return program;
}
Пример #2
0
static void *si_create_compute_state(
	struct pipe_context *ctx,
	const struct pipe_compute_state *cso)
{
	struct si_context *sctx = (struct si_context *)ctx;
	struct si_screen *sscreen = (struct si_screen *)ctx->screen;
	struct si_compute *program = CALLOC_STRUCT(si_compute);

	pipe_reference_init(&program->reference, 1);
	program->screen = (struct si_screen *)ctx->screen;
	program->ir_type = cso->ir_type;
	program->local_size = cso->req_local_mem;
	program->private_size = cso->req_private_mem;
	program->input_size = cso->req_input_mem;
	program->use_code_object_v2 = cso->ir_type == PIPE_SHADER_IR_NATIVE;

	if (cso->ir_type != PIPE_SHADER_IR_NATIVE) {
		if (cso->ir_type == PIPE_SHADER_IR_TGSI) {
			program->ir.tgsi = tgsi_dup_tokens(cso->prog);
			if (!program->ir.tgsi) {
				FREE(program);
				return NULL;
			}
		} else {
			assert(cso->ir_type == PIPE_SHADER_IR_NIR);
			program->ir.nir = (struct nir_shader *) cso->prog;
		}

		program->compiler_ctx_state.debug = sctx->debug;
		program->compiler_ctx_state.is_debug_context = sctx->is_debug;
		p_atomic_inc(&sscreen->num_shaders_created);

		si_schedule_initial_compile(sctx, PIPE_SHADER_COMPUTE,
					    &program->ready,
					    &program->compiler_ctx_state,
					    program, si_create_compute_state_async);
	} else {
		const struct pipe_llvm_program_header *header;
		const char *code;
		header = cso->prog;
		code = cso->prog + sizeof(struct pipe_llvm_program_header);

		ac_elf_read(code, header->num_bytes, &program->shader.binary);
		if (program->use_code_object_v2) {
			const amd_kernel_code_t *code_object =
				si_compute_get_code_object(program, 0);
			code_object_to_config(code_object, &program->shader.config);
			if (program->shader.binary.reloc_count != 0) {
				fprintf(stderr, "Error: %d unsupported relocations\n",
					program->shader.binary.reloc_count);
				FREE(program);
				return NULL;
			}
		} else {
			si_shader_binary_read_config(&program->shader.binary,
				     &program->shader.config, 0);
		}
		si_shader_dump(sctx->screen, &program->shader, &sctx->debug,
			       PIPE_SHADER_COMPUTE, stderr, true);
		if (si_shader_binary_upload(sctx->screen, &program->shader) < 0) {
			fprintf(stderr, "LLVM failed to upload shader\n");
			FREE(program);
			return NULL;
		}
	}

	return program;
}
Пример #3
0
static void *si_create_compute_state(
	struct pipe_context *ctx,
	const struct pipe_compute_state *cso)
{
	struct si_context *sctx = (struct si_context *)ctx;
	struct si_screen *sscreen = (struct si_screen *)ctx->screen;
	struct si_compute *program = CALLOC_STRUCT(si_compute);

	pipe_reference_init(&program->reference, 1);
	program->screen = (struct si_screen *)ctx->screen;
	program->ir_type = cso->ir_type;
	program->local_size = cso->req_local_mem;
	program->private_size = cso->req_private_mem;
	program->input_size = cso->req_input_mem;
	program->use_code_object_v2 = cso->ir_type == PIPE_SHADER_IR_NATIVE;

	if (cso->ir_type != PIPE_SHADER_IR_NATIVE) {
		if (cso->ir_type == PIPE_SHADER_IR_TGSI) {
			program->ir.tgsi = tgsi_dup_tokens(cso->prog);
			if (!program->ir.tgsi) {
				FREE(program);
				return NULL;
			}
		} else {
			assert(cso->ir_type == PIPE_SHADER_IR_NIR);
			program->ir.nir = (struct nir_shader *) cso->prog;
		}

		program->compiler_ctx_state.debug = sctx->debug;
		program->compiler_ctx_state.is_debug_context = sctx->is_debug;
		p_atomic_inc(&sscreen->num_shaders_created);
		util_queue_fence_init(&program->ready);

		struct util_async_debug_callback async_debug;
		bool wait =
			(sctx->debug.debug_message && !sctx->debug.async) ||
			sctx->is_debug ||
			si_can_dump_shader(sscreen, PIPE_SHADER_COMPUTE);

		if (wait) {
			u_async_debug_init(&async_debug);
			program->compiler_ctx_state.debug = async_debug.base;
		}

		util_queue_add_job(&sscreen->shader_compiler_queue,
				   program, &program->ready,
				   si_create_compute_state_async, NULL);

		if (wait) {
			util_queue_fence_wait(&program->ready);
			u_async_debug_drain(&async_debug, &sctx->debug);
			u_async_debug_cleanup(&async_debug);
		}
	} else {
		const struct pipe_llvm_program_header *header;
		const char *code;
		header = cso->prog;
		code = cso->prog + sizeof(struct pipe_llvm_program_header);

		ac_elf_read(code, header->num_bytes, &program->shader.binary);
		if (program->use_code_object_v2) {
			const amd_kernel_code_t *code_object =
				si_compute_get_code_object(program, 0);
			code_object_to_config(code_object, &program->shader.config);
		} else {
			si_shader_binary_read_config(&program->shader.binary,
				     &program->shader.config, 0);
		}
		si_shader_dump(sctx->screen, &program->shader, &sctx->debug,
			       PIPE_SHADER_COMPUTE, stderr, true);
		if (si_shader_binary_upload(sctx->screen, &program->shader) < 0) {
			fprintf(stderr, "LLVM failed to upload shader\n");
			FREE(program);
			return NULL;
		}
	}

	return program;
}
Пример #4
0
static void *si_create_compute_state(
	struct pipe_context *ctx,
	const struct pipe_compute_state *cso)
{
	struct si_context *sctx = (struct si_context *)ctx;
	struct si_screen *sscreen = (struct si_screen *)ctx->screen;
	struct si_compute *program = CALLOC_STRUCT(si_compute);

	program->screen = (struct si_screen *)ctx->screen;
	program->ir_type = cso->ir_type;
	program->local_size = cso->req_local_mem;
	program->private_size = cso->req_private_mem;
	program->input_size = cso->req_input_mem;
	program->use_code_object_v2 = HAVE_LLVM >= 0x0400 &&
					cso->ir_type == PIPE_SHADER_IR_NATIVE;

	if (cso->ir_type == PIPE_SHADER_IR_TGSI) {
		program->tokens = tgsi_dup_tokens(cso->prog);
		if (!program->tokens) {
			FREE(program);
			return NULL;
		}

		program->compiler_ctx_state.tm = sctx->tm;
		program->compiler_ctx_state.debug = sctx->b.debug;
		program->compiler_ctx_state.is_debug_context = sctx->is_debug;
		p_atomic_inc(&sscreen->b.num_shaders_created);
		util_queue_fence_init(&program->ready);

		if ((sctx->b.debug.debug_message && !sctx->b.debug.async) ||
		    sctx->is_debug ||
		    r600_can_dump_shader(&sscreen->b, PIPE_SHADER_COMPUTE))
			si_create_compute_state_async(program, -1);
		else
			util_queue_add_job(&sscreen->shader_compiler_queue,
					   program, &program->ready,
					   si_create_compute_state_async, NULL);
	} else {
		const struct pipe_llvm_program_header *header;
		const char *code;
		header = cso->prog;
		code = cso->prog + sizeof(struct pipe_llvm_program_header);

		ac_elf_read(code, header->num_bytes, &program->shader.binary);
		if (program->use_code_object_v2) {
			const amd_kernel_code_t *code_object =
				si_compute_get_code_object(program, 0);
			code_object_to_config(code_object, &program->shader.config);
		} else {
			si_shader_binary_read_config(&program->shader.binary,
				     &program->shader.config, 0);
		}
		si_shader_dump(sctx->screen, &program->shader, &sctx->b.debug,
			       PIPE_SHADER_COMPUTE, stderr, true);
		if (si_shader_binary_upload(sctx->screen, &program->shader) < 0) {
			fprintf(stderr, "LLVM failed to upload shader\n");
			FREE(program);
			return NULL;
		}
	}

	return program;
}