Ejemplo n.º 1
0
/* there are two cases where we currently need to wait for render complete:
 * 1) pctx->flush() .. since at the moment we have no way for DDX to sync
 *    the presentation blit with the 3d core
 * 2) wrap-around for ringbuffer.. possibly we can do something more
 *    Intelligent here.  Right now we need to ensure there is enough room
 *    at the end of the drawcmds in the cmdstream buffer for all the per-
 *    tile cmds.  We do this the lamest way possible, by making the ringbuffer
 *    big, and flushing and resetting back to the beginning if we get too
 *    close to the end.
 */
static void
fd_context_wait(struct pipe_context *pctx)
{
	struct fd_context *ctx = fd_context(pctx);
	uint32_t ts = fd_ringbuffer_timestamp(ctx->ring);

	DBG("wait: %u", ts);

	fd_pipe_wait(ctx->screen->pipe, ts);
	fd_ringbuffer_reset(ctx->ring);
	fd_ringmarker_mark(ctx->draw_start);
}
Ejemplo n.º 2
0
struct fd_ringmarker * fd_ringmarker_new(struct fd_ringbuffer *ring)
{
	struct fd_ringmarker *marker = NULL;

	marker = calloc(1, sizeof(*marker));
	if (!marker) {
		ERROR_MSG("allocation failed");
		return NULL;
	}

	marker->ring = ring;

	fd_ringmarker_mark(marker);

	return marker;
}
Ejemplo n.º 3
0
/* emit per-context initialization:
 */
void
fd_state_emit_setup(struct pipe_context *pctx)
{
	struct fd_context *ctx = fd_context(pctx);
	struct fd_ringbuffer *ring = ctx->ring;

	OUT_PKT0(ring, REG_A2XX_TP0_CHICKEN, 1);
	OUT_RING(ring, 0x00000002);

	OUT_PKT3(ring, CP_INVALIDATE_STATE, 1);
	OUT_RING(ring, 0x00007fff);

	OUT_PKT3(ring, CP_SET_CONSTANT, 2);
	OUT_RING(ring, CP_REG(REG_A2XX_SQ_VS_CONST));
	OUT_RING(ring, A2XX_SQ_VS_CONST_BASE(VS_CONST_BASE) |
			A2XX_SQ_VS_CONST_SIZE(0x100));

	OUT_PKT3(ring, CP_SET_CONSTANT, 2);
	OUT_RING(ring, CP_REG(REG_A2XX_SQ_PS_CONST));
	OUT_RING(ring, A2XX_SQ_PS_CONST_BASE(PS_CONST_BASE) |
			A2XX_SQ_PS_CONST_SIZE(0xe0));

	OUT_PKT3(ring, CP_SET_CONSTANT, 3);
	OUT_RING(ring, CP_REG(REG_A2XX_VGT_MAX_VTX_INDX));
	OUT_RING(ring, 0xffffffff);        /* VGT_MAX_VTX_INDX */
	OUT_RING(ring, 0x00000000);        /* VGT_MIN_VTX_INDX */

	OUT_PKT3(ring, CP_SET_CONSTANT, 2);
	OUT_RING(ring, CP_REG(REG_A2XX_VGT_INDX_OFFSET));
	OUT_RING(ring, 0x00000000);

	OUT_PKT3(ring, CP_SET_CONSTANT, 2);
	OUT_RING(ring, CP_REG(REG_A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL));
	OUT_RING(ring, 0x0000003b);

	OUT_PKT3(ring, CP_SET_CONSTANT, 2);
	OUT_RING(ring, CP_REG(REG_A2XX_SQ_CONTEXT_MISC));
	OUT_RING(ring, A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL(CENTERS_ONLY));

	OUT_PKT3(ring, CP_SET_CONSTANT, 2);
	OUT_RING(ring, CP_REG(REG_A2XX_SQ_INTERPOLATOR_CNTL));
	OUT_RING(ring, 0xffffffff);

	OUT_PKT3(ring, CP_SET_CONSTANT, 2);
	OUT_RING(ring, CP_REG(REG_A2XX_PA_SC_AA_CONFIG));
	OUT_RING(ring, 0x00000000);

	OUT_PKT3(ring, CP_SET_CONSTANT, 2);
	OUT_RING(ring, CP_REG(REG_A2XX_PA_SC_LINE_CNTL));
	OUT_RING(ring, 0x00000000);

	OUT_PKT3(ring, CP_SET_CONSTANT, 2);
	OUT_RING(ring, CP_REG(REG_A2XX_PA_SC_WINDOW_OFFSET));
	OUT_RING(ring, 0x00000000);

	// XXX we change this dynamically for draw/clear.. vs gmem<->mem..
	OUT_PKT3(ring, CP_SET_CONSTANT, 2);
	OUT_RING(ring, CP_REG(REG_A2XX_RB_MODECONTROL));
	OUT_RING(ring, A2XX_RB_MODECONTROL_EDRAM_MODE(COLOR_DEPTH));

	OUT_PKT3(ring, CP_SET_CONSTANT, 2);
	OUT_RING(ring, CP_REG(REG_A2XX_RB_SAMPLE_POS));
	OUT_RING(ring, 0x88888888);

	OUT_PKT3(ring, CP_SET_CONSTANT, 2);
	OUT_RING(ring, CP_REG(REG_A2XX_RB_COLOR_DEST_MASK));
	OUT_RING(ring, 0xffffffff);

	OUT_PKT3(ring, CP_SET_CONSTANT, 2);
	OUT_RING(ring, CP_REG(REG_A2XX_RB_COPY_DEST_INFO));
	OUT_RING(ring, A2XX_RB_COPY_DEST_INFO_FORMAT(COLORX_4_4_4_4) |
			A2XX_RB_COPY_DEST_INFO_WRITE_RED |
			A2XX_RB_COPY_DEST_INFO_WRITE_GREEN |
			A2XX_RB_COPY_DEST_INFO_WRITE_BLUE |
			A2XX_RB_COPY_DEST_INFO_WRITE_ALPHA);

	OUT_PKT3(ring, CP_SET_CONSTANT, 3);
	OUT_RING(ring, CP_REG(REG_A2XX_SQ_WRAPPING_0));
	OUT_RING(ring, 0x00000000);        /* SQ_WRAPPING_0 */
	OUT_RING(ring, 0x00000000);        /* SQ_WRAPPING_1 */

	OUT_PKT3(ring, CP_SET_DRAW_INIT_FLAGS, 1);
	OUT_RING(ring, 0x00000000);

	OUT_PKT3(ring, CP_WAIT_REG_EQ, 4);
	OUT_RING(ring, 0x000005d0);
	OUT_RING(ring, 0x00000000);
	OUT_RING(ring, 0x5f601000);
	OUT_RING(ring, 0x00000001);

	OUT_PKT0(ring, REG_A2XX_SQ_INST_STORE_MANAGMENT, 1);
	OUT_RING(ring, 0x00000180);

	OUT_PKT3(ring, CP_INVALIDATE_STATE, 1);
	OUT_RING(ring, 0x00000300);

	OUT_PKT3(ring, CP_SET_SHADER_BASES, 1);
	OUT_RING(ring, 0x80000180);

	/* not sure what this form of CP_SET_CONSTANT is.. */
	OUT_PKT3(ring, CP_SET_CONSTANT, 13);
	OUT_RING(ring, 0x00000000);
	OUT_RING(ring, 0x00000000);
	OUT_RING(ring, 0x00000000);
	OUT_RING(ring, 0x00000000);
	OUT_RING(ring, 0x00000000);
	OUT_RING(ring, 0x469c4000);
	OUT_RING(ring, 0x3f800000);
	OUT_RING(ring, 0x3f000000);
	OUT_RING(ring, 0x00000000);
	OUT_RING(ring, 0x40000000);
	OUT_RING(ring, 0x3f400000);
	OUT_RING(ring, 0x3ec00000);
	OUT_RING(ring, 0x3e800000);

	OUT_PKT3(ring, CP_SET_CONSTANT, 2);
	OUT_RING(ring, CP_REG(REG_A2XX_RB_COLOR_MASK));
	OUT_RING(ring, A2XX_RB_COLOR_MASK_WRITE_RED |
			A2XX_RB_COLOR_MASK_WRITE_GREEN |
			A2XX_RB_COLOR_MASK_WRITE_BLUE |
			A2XX_RB_COLOR_MASK_WRITE_ALPHA);

	OUT_PKT3(ring, CP_SET_CONSTANT, 5);
	OUT_RING(ring, CP_REG(REG_A2XX_RB_BLEND_RED));
	OUT_RING(ring, 0x00000000);        /* RB_BLEND_RED */
	OUT_RING(ring, 0x00000000);        /* RB_BLEND_GREEN */
	OUT_RING(ring, 0x00000000);        /* RB_BLEND_BLUE */
	OUT_RING(ring, 0x000000ff);        /* RB_BLEND_ALPHA */

	fd_ringbuffer_flush(ring);
	fd_ringmarker_mark(ctx->draw_start);
}
Ejemplo n.º 4
0
void
fd_gmem_render_tiles(struct pipe_context *pctx)
{
	struct fd_context *ctx = fd_context(pctx);
	struct pipe_framebuffer_state *pfb = &ctx->framebuffer;
	uint32_t timestamp = 0;
	bool sysmem = false;

	if (ctx->emit_sysmem_prep) {
		if (ctx->cleared || ctx->gmem_reason || (ctx->num_draws > 5)) {
			DBG("GMEM: cleared=%x, gmem_reason=%x, num_draws=%u",
				ctx->cleared, ctx->gmem_reason, ctx->num_draws);
		} else {
			sysmem = true;
		}
	}

	/* mark the end of the clear/draw cmds before emitting per-tile cmds: */
	fd_ringmarker_mark(ctx->draw_end);

	if (sysmem) {
		DBG("rendering sysmem (%s/%s)",
			util_format_name(pfb->cbufs[0]->format),
			pfb->zsbuf ? util_format_name(pfb->zsbuf->format) : "none");
		render_sysmem(ctx);
	} else {
		struct fd_gmem_stateobj *gmem = &ctx->gmem;
		DBG("rendering %dx%d tiles (%s/%s)", gmem->nbins_x, gmem->nbins_y,
			util_format_name(pfb->cbufs[0]->format),
			pfb->zsbuf ? util_format_name(pfb->zsbuf->format) : "none");
		calculate_tiles(ctx);
		render_tiles(ctx);
	}

	/* GPU executes starting from tile cmds, which IB back to draw cmds: */
	fd_ringmarker_flush(ctx->draw_end);

	/* mark start for next draw cmds: */
	fd_ringmarker_mark(ctx->draw_start);

	/* update timestamps on render targets: */
	timestamp = fd_ringbuffer_timestamp(ctx->ring);
	fd_resource(pfb->cbufs[0]->texture)->timestamp = timestamp;
	if (pfb->zsbuf)
		fd_resource(pfb->zsbuf->texture)->timestamp = timestamp;

	/* reset maximal bounds: */
	ctx->max_scissor.minx = ctx->max_scissor.miny = ~0;
	ctx->max_scissor.maxx = ctx->max_scissor.maxy = 0;

	/* Note that because the per-tile setup and mem2gmem/gmem2mem are emitted
	 * after the draw/clear calls, but executed before, we need to preemptively
	 * flag some state as dirty before the first draw/clear call.
	 *
	 * TODO maybe we need to mark all state as dirty to not worry about state
	 * being clobbered by other contexts?
	 */
	ctx->dirty |= FD_DIRTY_ZSA |
			FD_DIRTY_RASTERIZER |
			FD_DIRTY_FRAMEBUFFER |
			FD_DIRTY_SAMPLE_MASK |
			FD_DIRTY_VIEWPORT |
			FD_DIRTY_CONSTBUF |
			FD_DIRTY_PROG |
			FD_DIRTY_SCISSOR |
			/* probably only needed if we need to mem2gmem on the next
			 * draw..  but not sure if there is a good way to know?
			 */
			FD_DIRTY_VERTTEX |
			FD_DIRTY_FRAGTEX |
			FD_DIRTY_BLEND;

	if (fd_mesa_debug & FD_DBG_DGMEM)
		ctx->dirty = 0xffffffff;
}