Esempio n. 1
0
void
fd_gmem_render_tiles(struct fd_batch *batch)
{
	struct fd_context *ctx = batch->ctx;
	struct pipe_framebuffer_state *pfb = &batch->framebuffer;
	bool sysmem = false;

	if (ctx->emit_sysmem_prep) {
		if (batch->cleared || batch->gmem_reason || (batch->num_draws > 5)) {
			DBG("GMEM: cleared=%x, gmem_reason=%x, num_draws=%u",
				batch->cleared, batch->gmem_reason, batch->num_draws);
		} else if (!(fd_mesa_debug & FD_DBG_NOBYPASS)) {
			sysmem = true;
		}
	}

	fd_reset_wfi(batch);

	ctx->stats.batch_total++;

	if (sysmem) {
		DBG("%p: rendering sysmem %ux%u (%s/%s)",
			batch, pfb->width, pfb->height,
			util_format_short_name(pipe_surface_format(pfb->cbufs[0])),
			util_format_short_name(pipe_surface_format(pfb->zsbuf)));
		fd_hw_query_prepare(batch, 1);
		render_sysmem(batch);
		ctx->stats.batch_sysmem++;
	} else {
		struct fd_gmem_stateobj *gmem = &ctx->gmem;
		calculate_tiles(batch);
		DBG("%p: rendering %dx%d tiles %ux%u (%s/%s)",
			batch, pfb->width, pfb->height, gmem->nbins_x, gmem->nbins_y,
			util_format_short_name(pipe_surface_format(pfb->cbufs[0])),
			util_format_short_name(pipe_surface_format(pfb->zsbuf)));
		fd_hw_query_prepare(batch, gmem->nbins_x * gmem->nbins_y);
		render_tiles(batch);
		ctx->stats.batch_gmem++;
	}

	flush_ring(batch);
}
Esempio n. 2
0
void
fd_gmem_render_tiles(struct pipe_context *pctx)
{
	struct fd_context *ctx = fd_context(pctx);
	struct pipe_framebuffer_state *pfb = &ctx->framebuffer;
	uint32_t timestamp = 0;
	bool sysmem = false;

	if (ctx->emit_sysmem_prep) {
		if (ctx->cleared || ctx->gmem_reason || (ctx->num_draws > 5)) {
			DBG("GMEM: cleared=%x, gmem_reason=%x, num_draws=%u",
				ctx->cleared, ctx->gmem_reason, ctx->num_draws);
		} else {
			sysmem = true;
		}
	}

	/* mark the end of the clear/draw cmds before emitting per-tile cmds: */
	fd_ringmarker_mark(ctx->draw_end);

	if (sysmem) {
		DBG("rendering sysmem (%s/%s)",
			util_format_name(pfb->cbufs[0]->format),
			pfb->zsbuf ? util_format_name(pfb->zsbuf->format) : "none");
		render_sysmem(ctx);
	} else {
		struct fd_gmem_stateobj *gmem = &ctx->gmem;
		DBG("rendering %dx%d tiles (%s/%s)", gmem->nbins_x, gmem->nbins_y,
			util_format_name(pfb->cbufs[0]->format),
			pfb->zsbuf ? util_format_name(pfb->zsbuf->format) : "none");
		calculate_tiles(ctx);
		render_tiles(ctx);
	}

	/* GPU executes starting from tile cmds, which IB back to draw cmds: */
	fd_ringmarker_flush(ctx->draw_end);

	/* mark start for next draw cmds: */
	fd_ringmarker_mark(ctx->draw_start);

	/* update timestamps on render targets: */
	timestamp = fd_ringbuffer_timestamp(ctx->ring);
	fd_resource(pfb->cbufs[0]->texture)->timestamp = timestamp;
	if (pfb->zsbuf)
		fd_resource(pfb->zsbuf->texture)->timestamp = timestamp;

	/* reset maximal bounds: */
	ctx->max_scissor.minx = ctx->max_scissor.miny = ~0;
	ctx->max_scissor.maxx = ctx->max_scissor.maxy = 0;

	/* Note that because the per-tile setup and mem2gmem/gmem2mem are emitted
	 * after the draw/clear calls, but executed before, we need to preemptively
	 * flag some state as dirty before the first draw/clear call.
	 *
	 * TODO maybe we need to mark all state as dirty to not worry about state
	 * being clobbered by other contexts?
	 */
	ctx->dirty |= FD_DIRTY_ZSA |
			FD_DIRTY_RASTERIZER |
			FD_DIRTY_FRAMEBUFFER |
			FD_DIRTY_SAMPLE_MASK |
			FD_DIRTY_VIEWPORT |
			FD_DIRTY_CONSTBUF |
			FD_DIRTY_PROG |
			FD_DIRTY_SCISSOR |
			/* probably only needed if we need to mem2gmem on the next
			 * draw..  but not sure if there is a good way to know?
			 */
			FD_DIRTY_VERTTEX |
			FD_DIRTY_FRAGTEX |
			FD_DIRTY_BLEND;

	if (fd_mesa_debug & FD_DBG_DGMEM)
		ctx->dirty = 0xffffffff;
}