Esempio n. 1
0
static void flush(PixmapPtr dest, uint32_t *timestamp)
{
	ring_post(ring);
	fd_ringbuffer_flush(ring);
	next_ring();
	fd_pipe_wait(pipe, fd_ringbuffer_timestamp(ring));
	ring_pre(ring);
}
/* there are two cases where we currently need to wait for render complete:
 * 1) pctx->flush() .. since at the moment we have no way for DDX to sync
 *    the presentation blit with the 3d core
 * 2) wrap-around for ringbuffer.. possibly we can do something more
 *    Intelligent here.  Right now we need to ensure there is enough room
 *    at the end of the drawcmds in the cmdstream buffer for all the per-
 *    tile cmds.  We do this the lamest way possible, by making the ringbuffer
 *    big, and flushing and resetting back to the beginning if we get too
 *    close to the end.
 */
static void
fd_context_wait(struct pipe_context *pctx)
{
	struct fd_context *ctx = fd_context(pctx);
	uint32_t ts = fd_ringbuffer_timestamp(ctx->ring);

	DBG("wait: %u", ts);

	fd_pipe_wait(ctx->screen->pipe, ts);
	fd_ringbuffer_reset(ctx->ring);
	fd_ringmarker_mark(ctx->draw_start);
}
static void fd_resource_transfer_flush_region(struct pipe_context *pctx,
		struct pipe_transfer *ptrans,
		const struct pipe_box *box)
{
	struct fd_context *ctx = fd_context(pctx);
	struct fd_resource *rsc = fd_resource(ptrans->resource);

	if (rsc->dirty)
		fd_context_render(pctx);

	if (rsc->timestamp) {
		fd_pipe_wait(ctx->screen->pipe, rsc->timestamp);
		rsc->timestamp = 0;
	}
}
Esempio n. 4
0
static struct fd_ringbuffer *next_rb(struct fd_context *ctx)
{
	struct fd_ringbuffer *ring;
	uint32_t ts;

	/* grab next ringbuffer: */
	ring = ctx->rings[(ctx->rings_idx++) % ARRAY_SIZE(ctx->rings)];

	/* wait for new rb to be idle: */
	ts = fd_ringbuffer_timestamp(ring);
	if (ts) {
		DBG("wait: %u", ts);
		fd_pipe_wait(ctx->screen->pipe, ts);
	}

	fd_ringbuffer_reset(ring);

	return ring;
}
Esempio n. 5
0
static int bo_alloc(struct kgsl_bo *kgsl_bo)
{
	struct fd_bo *bo = &kgsl_bo->base;
	if (!kgsl_bo->offset) {
		struct drm_kgsl_gem_alloc req = {
				.handle = bo->handle,
		};
		int ret;

		/* if the buffer is already backed by pages then this
		 * doesn't actually do anything (other than giving us
		 * the offset)
		 */
		ret = drmCommandWriteRead(bo->dev->fd, DRM_KGSL_GEM_ALLOC,
				&req, sizeof(req));
		if (ret) {
			ERROR_MSG("alloc failed: %s", strerror(errno));
			return ret;
		}

		kgsl_bo->offset = req.offset;
	}

	return 0;
}

static int kgsl_bo_offset(struct fd_bo *bo, uint64_t *offset)
{
	struct kgsl_bo *kgsl_bo = to_kgsl_bo(bo);
	int ret = bo_alloc(kgsl_bo);
	if (ret)
		return ret;
	*offset = kgsl_bo->offset;
	return 0;
}

static int kgsl_bo_cpu_prep(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op)
{
	uint32_t timestamp = kgsl_bo_get_timestamp(to_kgsl_bo(bo));

	if (op & DRM_FREEDRENO_PREP_NOSYNC) {
		uint32_t current;
		int ret;

		/* special case for is_idle().. we can't really handle that
		 * properly in kgsl (perhaps we need a way to just disable
		 * the bo-cache for kgsl?)
		 */
		if (!pipe)
			return -EBUSY;

		ret = kgsl_pipe_timestamp(to_kgsl_pipe(pipe), &current);
		if (ret)
			return ret;

		if (timestamp > current)
			return -EBUSY;

		return 0;
	}

	if (timestamp)
		fd_pipe_wait(pipe, timestamp);

	return 0;
}
Esempio n. 6
0
static void wait(uint32_t timestamp)
{
	fd_pipe_wait(pipe, timestamp);
}