示例#1
0
/**
 * Global buffers are not really resources, they are are actually offsets
 * into a single global resource (r600_screen::global_pool).  The means
 * they don't have their own cs_buf handle, so they cannot be passed
 * to r600_copy_buffer() and must be handled separately.
 */
static void r600_copy_global_buffer(struct pipe_context *ctx,
				    struct pipe_resource *dst, unsigned
				    dstx, struct pipe_resource *src,
				    const struct pipe_box *src_box)
{
	struct r600_context *rctx = (struct r600_context*)ctx;
	struct compute_memory_pool *pool = rctx->screen->global_pool;
	struct pipe_box new_src_box = *src_box;

	if (src->bind & PIPE_BIND_GLOBAL) {
		struct r600_resource_global *rsrc =
			(struct r600_resource_global *)src;
		struct compute_memory_item *item = rsrc->chunk;

		if (is_item_in_pool(item)) {
			new_src_box.x += 4 * item->start_in_dw;
			src = (struct pipe_resource *)pool->bo;
		} else {
			if (item->real_buffer == NULL) {
				item->real_buffer = (struct r600_resource*)
					r600_compute_buffer_alloc_vram(pool->screen,
								       item->size_in_dw * 4);
			}
			src = (struct pipe_resource*)item->real_buffer;
		}
	}
	if (dst->bind & PIPE_BIND_GLOBAL) {
		struct r600_resource_global *rdst =
			(struct r600_resource_global *)dst;
		struct compute_memory_item *item = rdst->chunk;

		if (is_item_in_pool(item)) {
			dstx += 4 * item->start_in_dw;
			dst = (struct pipe_resource *)pool->bo;
		} else {
			if (item->real_buffer == NULL) {
				item->real_buffer = (struct r600_resource*)
					r600_compute_buffer_alloc_vram(pool->screen,
								       item->size_in_dw * 4);
			}
			dst = (struct pipe_resource*)item->real_buffer;
		}
	}

	r600_copy_buffer(ctx, dst, dstx, src, &new_src_box);
}
示例#2
0
static void evergreen_set_global_binding(struct pipe_context *ctx,
					 unsigned first, unsigned n,
					 struct pipe_resource **resources,
					 uint32_t **handles)
{
	struct r600_context *rctx = (struct r600_context *)ctx;
	struct compute_memory_pool *pool = rctx->screen->global_pool;
	struct r600_resource_global **buffers =
		(struct r600_resource_global **)resources;
	unsigned i;

	COMPUTE_DBG(rctx->screen, "*** evergreen_set_global_binding first = %u n = %u\n",
			first, n);

	if (!resources) {
		/* XXX: Unset */
		return;
	}

	/* We mark these items for promotion to the pool if they
	 * aren't already there */
	for (i = first; i < first + n; i++) {
		struct compute_memory_item *item = buffers[i]->chunk;

		if (!is_item_in_pool(item))
			buffers[i]->chunk->status |= ITEM_FOR_PROMOTING;
	}

	if (compute_memory_finalize_pending(pool, ctx) == -1) {
		/* XXX: Unset */
		return;
	}

	for (i = first; i < first + n; i++)
	{
		uint32_t buffer_offset;
		uint32_t handle;
		assert(resources[i]->target == PIPE_BUFFER);
		assert(resources[i]->bind & PIPE_BIND_GLOBAL);

		buffer_offset = util_le32_to_cpu(*(handles[i]));
		handle = buffer_offset + buffers[i]->chunk->start_in_dw * 4;

		*(handles[i]) = util_cpu_to_le32(handle);
	}

	/* globals for writing */
	evergreen_set_rat(rctx->cs_shader_state.shader, 0, pool->bo, 0, pool->size_in_dw * 4);
	/* globals for reading */
	evergreen_cs_set_vertex_buffer(rctx, 1, 0,
				(struct pipe_resource*)pool->bo);

	/* constants for reading, LLVM puts them in text segment */
	evergreen_cs_set_vertex_buffer(rctx, 2, 0,
				(struct pipe_resource*)rctx->cs_shader_state.shader->code_bo);
}
示例#3
0
void *r600_compute_global_transfer_map(
	struct pipe_context *ctx_,
	struct pipe_resource *resource,
	unsigned level,
	unsigned usage,
	const struct pipe_box *box,
	struct pipe_transfer **ptransfer)
{
	struct r600_context *rctx = (struct r600_context*)ctx_;
	struct compute_memory_pool *pool = rctx->screen->global_pool;
	struct r600_resource_global* buffer =
		(struct r600_resource_global*)resource;

	struct compute_memory_item *item = buffer->chunk;
	struct pipe_resource *dst = NULL;
	unsigned offset = box->x;

	if (is_item_in_pool(item)) {
		compute_memory_demote_item(pool, item, ctx_);
	}
	else {
		if (item->real_buffer == NULL) {
			item->real_buffer =
					r600_compute_buffer_alloc_vram(pool->screen, item->size_in_dw * 4);
		}
	}

	dst = (struct pipe_resource*)item->real_buffer;

	if (usage & PIPE_TRANSFER_READ)
		buffer->chunk->status |= ITEM_MAPPED_FOR_READING;

	COMPUTE_DBG(rctx->screen, "* r600_compute_global_transfer_map()\n"
			"level = %u, usage = %u, box(x = %u, y = %u, z = %u "
			"width = %u, height = %u, depth = %u)\n", level, usage,
			box->x, box->y, box->z, box->width, box->height,
			box->depth);
	COMPUTE_DBG(rctx->screen, "Buffer id = %"PRIi64" offset = "
		"%u (box.x)\n", item->id, box->x);


	assert(resource->target == PIPE_BUFFER);
	assert(resource->bind & PIPE_BIND_GLOBAL);
	assert(box->x >= 0);
	assert(box->y == 0);
	assert(box->z == 0);

	///TODO: do it better, mapping is not possible if the pool is too big
	return pipe_buffer_map_range(ctx_, dst,
			offset, box->width, usage, ptransfer);
}