void r600_postflush_resume_features(struct r600_common_context *ctx)
{
	if (ctx->streamout.suspended) {
		ctx->streamout.append_bitmask = ctx->streamout.enabled_mask;
		r600_streamout_buffers_dirty(ctx);
	}

	/* resume queries */
	if (!LIST_IS_EMPTY(&ctx->active_queries))
		r600_resume_queries(ctx);
}
Ejemplo n.º 2
0
void r600_postflush_resume_features(struct r600_common_context *ctx)
{
	if (ctx->streamout.suspended) {
		ctx->streamout.append_bitmask = ctx->streamout.enabled_mask;
		r600_streamout_buffers_dirty(ctx);
	}

	/* resume queries */
	if (ctx->queries_suspended_for_flush) {
		r600_resume_nontimer_queries(ctx);
		r600_resume_timer_queries(ctx);
	}
}
void r600_postflush_resume_features(struct r600_common_context *ctx)
{
	if (ctx->streamout.suspended) {
		ctx->streamout.append_bitmask = ctx->streamout.enabled_mask;
		r600_streamout_buffers_dirty(ctx);
	}

	/* resume queries */
	if (ctx->nontimer_queries_suspended) {
		r600_resume_nontimer_queries(ctx);
	}

	/* Re-enable render condition. */
	if (ctx->saved_render_cond) {
		ctx->b.render_condition(&ctx->b, ctx->saved_render_cond,
					  ctx->saved_render_cond_cond,
					  ctx->saved_render_cond_mode);
	}
}
Ejemplo n.º 4
0
void r600_set_streamout_targets(struct pipe_context *ctx,
				unsigned num_targets,
				struct pipe_stream_output_target **targets,
				const unsigned *offsets)
{
	struct r600_common_context *rctx = (struct r600_common_context *)ctx;
	unsigned i;
        unsigned enabled_mask = 0, append_bitmask = 0;

	/* Stop streamout. */
	if (rctx->streamout.num_targets && rctx->streamout.begin_emitted) {
		r600_emit_streamout_end(rctx);
	}

	/* Set the new targets. */
	for (i = 0; i < num_targets; i++) {
		pipe_so_target_reference((struct pipe_stream_output_target**)&rctx->streamout.targets[i], targets[i]);
		if (!targets[i])
			continue;

		r600_context_add_resource_size(ctx, targets[i]->buffer);
		enabled_mask |= 1 << i;
		if (offsets[i] == ((unsigned)-1))
			append_bitmask |= 1 << i;
	}
	for (; i < rctx->streamout.num_targets; i++) {
		pipe_so_target_reference((struct pipe_stream_output_target**)&rctx->streamout.targets[i], NULL);
	}

	rctx->streamout.enabled_mask = enabled_mask;

	rctx->streamout.num_targets = num_targets;
	rctx->streamout.append_bitmask = append_bitmask;

	if (num_targets) {
		r600_streamout_buffers_dirty(rctx);
	} else {
		rctx->set_atom_dirty(rctx, &rctx->streamout.begin_atom, false);
		r600_set_streamout_enable(rctx, false);
	}
}
Ejemplo n.º 5
0
void r600_set_streamout_targets(struct pipe_context *ctx,
				unsigned num_targets,
				struct pipe_stream_output_target **targets,
				unsigned append_bitmask)
{
	struct r600_common_context *rctx = (struct r600_common_context *)ctx;
	unsigned i;

	/* Stop streamout. */
	if (rctx->streamout.num_targets && rctx->streamout.begin_emitted) {
		r600_emit_streamout_end(rctx);
	}

	/* Set the new targets. */
	for (i = 0; i < num_targets; i++) {
		pipe_so_target_reference((struct pipe_stream_output_target**)&rctx->streamout.targets[i], targets[i]);
		r600_context_add_resource_size(ctx, targets[i]->buffer);
	}
	for (; i < rctx->streamout.num_targets; i++) {
		pipe_so_target_reference((struct pipe_stream_output_target**)&rctx->streamout.targets[i], NULL);
	}

	rctx->streamout.enabled_mask = (num_targets >= 1 && targets[0] ? 1 : 0) |
				       (num_targets >= 2 && targets[1] ? 2 : 0) |
				       (num_targets >= 3 && targets[2] ? 4 : 0) |
				       (num_targets >= 4 && targets[3] ? 8 : 0);

	rctx->streamout.num_targets = num_targets;
	rctx->streamout.append_bitmask = append_bitmask;

	if (num_targets) {
		r600_streamout_buffers_dirty(rctx);
	} else {
		rctx->streamout.begin_atom.dirty = false;
	}
}
Ejemplo n.º 6
0
static void *r600_buffer_transfer_map(struct pipe_context *ctx,
					struct pipe_resource *resource,
					unsigned level,
					unsigned usage,
					const struct pipe_box *box,
					struct pipe_transfer **ptransfer)
{
	struct r600_context *rctx = (struct r600_context*)ctx;
	struct r600_resource *rbuffer = r600_resource(resource);
	uint8_t *data;

	assert(box->x + box->width <= resource->width0);

	/* See if the buffer range being mapped has never been initialized,
	 * in which case it can be mapped unsynchronized. */
	if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
	    usage & PIPE_TRANSFER_WRITE &&
	    !util_ranges_intersect(&rbuffer->valid_buffer_range, box->x, box->x + box->width)) {
		usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
	}

	if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE &&
	    !(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
		assert(usage & PIPE_TRANSFER_WRITE);

		/* Check if mapping this buffer would cause waiting for the GPU. */
		if (r600_rings_is_buffer_referenced(&rctx->b, rbuffer->cs_buf, RADEON_USAGE_READWRITE) ||
		    rctx->b.ws->buffer_is_busy(rbuffer->buf, RADEON_USAGE_READWRITE)) {
			unsigned i, mask;

			/* Discard the buffer. */
			pb_reference(&rbuffer->buf, NULL);

			/* Create a new one in the same pipe_resource. */
			/* XXX We probably want a different alignment for buffers and textures. */
			r600_init_resource(&rctx->screen->b, rbuffer, rbuffer->b.b.width0, 4096,
					   TRUE, rbuffer->b.b.usage);

			/* We changed the buffer, now we need to bind it where the old one was bound. */
			/* Vertex buffers. */
			mask = rctx->vertex_buffer_state.enabled_mask;
			while (mask) {
				i = u_bit_scan(&mask);
				if (rctx->vertex_buffer_state.vb[i].buffer == &rbuffer->b.b) {
					rctx->vertex_buffer_state.dirty_mask |= 1 << i;
					r600_vertex_buffers_dirty(rctx);
				}
			}
			/* Streamout buffers. */
			for (i = 0; i < rctx->b.streamout.num_targets; i++) {
				if (rctx->b.streamout.targets[i]->b.buffer == &rbuffer->b.b) {
					if (rctx->b.streamout.begin_emitted) {
						r600_emit_streamout_end(&rctx->b);
					}
					rctx->b.streamout.append_bitmask = rctx->b.streamout.enabled_mask;
					r600_streamout_buffers_dirty(&rctx->b);
				}
			}
			/* Constant buffers. */
			r600_set_constants_dirty_if_bound(rctx, rbuffer);
		}
	}
	else if ((usage & PIPE_TRANSFER_DISCARD_RANGE) &&
		 !(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
		 !(rctx->screen->b.debug_flags & DBG_NO_DISCARD_RANGE) &&
		 (rctx->screen->has_cp_dma ||
		  (rctx->screen->has_streamout &&
		   /* The buffer range must be aligned to 4 with streamout. */
		   box->x % 4 == 0 && box->width % 4 == 0))) {
		assert(usage & PIPE_TRANSFER_WRITE);

		/* Check if mapping this buffer would cause waiting for the GPU. */
		if (r600_rings_is_buffer_referenced(&rctx->b, rbuffer->cs_buf, RADEON_USAGE_READWRITE) ||
		    rctx->b.ws->buffer_is_busy(rbuffer->buf, RADEON_USAGE_READWRITE)) {
			/* Do a wait-free write-only transfer using a temporary buffer. */
			unsigned offset;
			struct r600_resource *staging = NULL;

			u_upload_alloc(rctx->uploader, 0, box->width + (box->x % R600_MAP_BUFFER_ALIGNMENT),
				       &offset, (struct pipe_resource**)&staging, (void**)&data);

			if (staging) {
				data += box->x % R600_MAP_BUFFER_ALIGNMENT;
				return r600_buffer_get_transfer(ctx, resource, level, usage, box,
								ptransfer, data, staging, offset);
			}
		}
	}

	/* mmap and synchronize with rings */
	data = r600_buffer_map_sync_with_rings(&rctx->b, rbuffer, usage);
	if (!data) {
		return NULL;
	}
	data += box->x;

	return r600_buffer_get_transfer(ctx, resource, level, usage, box,
					ptransfer, data, NULL, 0);
}