Esempio n. 1
0
static void evergreen_set_global_binding(struct pipe_context *ctx,
					 unsigned first, unsigned n,
					 struct pipe_resource **resources,
					 uint32_t **handles)
{
	struct r600_context *rctx = (struct r600_context *)ctx;
	struct compute_memory_pool *pool = rctx->screen->global_pool;
	struct r600_resource_global **buffers =
		(struct r600_resource_global **)resources;
	unsigned i;

	COMPUTE_DBG(rctx->screen, "*** evergreen_set_global_binding first = %u n = %u\n",
			first, n);

	if (!resources) {
		/* XXX: Unset */
		return;
	}

	/* We mark these items for promotion to the pool if they
	 * aren't already there */
	for (i = first; i < first + n; i++) {
		struct compute_memory_item *item = buffers[i]->chunk;

		if (!is_item_in_pool(item))
			buffers[i]->chunk->status |= ITEM_FOR_PROMOTING;
	}

	if (compute_memory_finalize_pending(pool, ctx) == -1) {
		/* XXX: Unset */
		return;
	}

	for (i = first; i < first + n; i++)
	{
		uint32_t buffer_offset;
		uint32_t handle;
		assert(resources[i]->target == PIPE_BUFFER);
		assert(resources[i]->bind & PIPE_BIND_GLOBAL);

		buffer_offset = util_le32_to_cpu(*(handles[i]));
		handle = buffer_offset + buffers[i]->chunk->start_in_dw * 4;

		*(handles[i]) = util_cpu_to_le32(handle);
	}

	/* globals for writing */
	evergreen_set_rat(rctx->cs_shader_state.shader, 0, pool->bo, 0, pool->size_in_dw * 4);
	/* globals for reading */
	evergreen_cs_set_vertex_buffer(rctx, 1, 0,
				(struct pipe_resource*)pool->bo);

	/* constants for reading, LLVM puts them in text segment */
	evergreen_cs_set_vertex_buffer(rctx, 2, 0,
				(struct pipe_resource*)rctx->cs_shader_state.shader->code_bo);
}
Esempio n. 2
0
static void evergreen_set_global_binding(
	struct pipe_context *ctx_, unsigned first, unsigned n,
	struct pipe_resource **resources,
	uint32_t **handles)
{
	struct r600_context *ctx = (struct r600_context *)ctx_;
	struct compute_memory_pool *pool = ctx->screen->global_pool;
	struct r600_resource_global **buffers =
		(struct r600_resource_global **)resources;

	COMPUTE_DBG("*** evergreen_set_global_binding first = %u n = %u\n",
			first, n);

	if (!resources) {
		/* XXX: Unset */
		return;
	}

	compute_memory_finalize_pending(pool, ctx_);

	for (int i = 0; i < n; i++)
	{
		assert(resources[i]->target == PIPE_BUFFER);
		assert(resources[i]->bind & PIPE_BIND_GLOBAL);

		*(handles[i]) = buffers[i]->chunk->start_in_dw * 4;
	}

	evergreen_set_rat(ctx->cs_shader_state.shader, 0, pool->bo, 0, pool->size_in_dw * 4);
	evergreen_cs_set_vertex_buffer(ctx, 1, 0,
				(struct pipe_resource*)pool->bo);
}
Esempio n. 3
0
static void evergreen_set_compute_resources(struct pipe_context * ctx_,
		unsigned start, unsigned count,
		struct pipe_surface ** surfaces)
{
	struct r600_context *ctx = (struct r600_context *)ctx_;
	struct r600_surface **resources = (struct r600_surface **)surfaces;

	COMPUTE_DBG("*** evergreen_set_compute_resources: start = %u count = %u\n",
			start, count);

	for (int i = 0; i < count; i++)	{
		/* The First two vertex buffers are reserved for parameters and
		 * global buffers. */
		unsigned vtx_id = 2 + i;
		if (resources[i]) {
			struct r600_resource_global *buffer =
				(struct r600_resource_global*)
				resources[i]->base.texture;
			if (resources[i]->base.writable) {
				assert(i+1 < 12);

				evergreen_set_rat(ctx->cs_shader_state.shader, i+1,
				(struct r600_resource *)resources[i]->base.texture,
				buffer->chunk->start_in_dw*4,
				resources[i]->base.texture->width0);
			}

			evergreen_cs_set_vertex_buffer(ctx, vtx_id,
					buffer->chunk->start_in_dw * 4,
					resources[i]->base.texture);
		}
	}
}
Esempio n. 4
0
/* The kernel parameters are stored a vtx buffer (ID=0), besides the explicit
 * kernel parameters there are inplicit parameters that need to be stored
 * in the vertex buffer as well.  Here is how these parameters are organized in
 * the buffer:
 *
 * DWORDS 0-2: Number of work groups in each dimension (x,y,z)
 * DWORDS 3-5: Number of global work items in each dimension (x,y,z)
 * DWORDS 6-8: Number of work items within each work group in each dimension
 *             (x,y,z)
 * DWORDS 9+ : Kernel parameters
 */
void evergreen_compute_upload_input(
	struct pipe_context *ctx_,
	const uint *block_layout,
	const uint *grid_layout,
	const void *input)
{
	struct r600_context *ctx = (struct r600_context *)ctx_;
	struct r600_pipe_compute *shader = ctx->cs_shader_state.shader;
	int i;
	unsigned kernel_parameters_offset_bytes = 36;
	uint32_t * num_work_groups_start;
	uint32_t * global_size_start;
	uint32_t * local_size_start;
	uint32_t * kernel_parameters_start;

	if (shader->input_size == 0) {
		return;
	}

	if (!shader->kernel_param) {
		unsigned buffer_size = shader->input_size;

		/* Add space for the grid dimensions */
		buffer_size += kernel_parameters_offset_bytes * sizeof(uint);
		shader->kernel_param = r600_compute_buffer_alloc_vram(
						ctx->screen, buffer_size);
	}

	num_work_groups_start = ctx->ws->buffer_map(
		shader->kernel_param->cs_buf, ctx->cs, PIPE_TRANSFER_WRITE);
	global_size_start = num_work_groups_start + (3 * (sizeof(uint) /4));
	local_size_start = global_size_start + (3 * (sizeof(uint)) / 4);
	kernel_parameters_start = local_size_start + (3 * (sizeof(uint)) / 4);

	/* Copy the work group size */
	memcpy(num_work_groups_start, grid_layout, 3 * sizeof(uint));

	/* Copy the global size */
	for (i = 0; i < 3; i++) {
		global_size_start[i] = grid_layout[i] * block_layout[i];
	}

	/* Copy the local dimensions */
	memcpy(local_size_start, block_layout, 3 * sizeof(uint));

	/* Copy the kernel inputs */
	memcpy(kernel_parameters_start, input, shader->input_size);

	for (i = 0; i < (kernel_parameters_offset_bytes / 4) +
					(shader->input_size / 4); i++) {
		COMPUTE_DBG("input %i : %i\n", i,
			((unsigned*)num_work_groups_start)[i]);
	}

	ctx->ws->buffer_unmap(shader->kernel_param->cs_buf);

	///ID=0 is reserved for the parameters
	evergreen_cs_set_vertex_buffer(ctx, 0, 0,
			(struct pipe_resource*)shader->kernel_param);
	///ID=0 is reserved for parameters
	evergreen_set_const_cache(shader, 0, shader->kernel_param,
						shader->input_size, 0);
}