static void flush(PixmapPtr dest, uint32_t *timestamp) { ring_post(ring); fd_ringbuffer_flush(ring); next_ring(); fd_pipe_wait(pipe, fd_ringbuffer_timestamp(ring)); ring_pre(ring); }
/* there are two cases where we currently need to wait for render complete: * 1) pctx->flush() .. since at the moment we have no way for DDX to sync * the presentation blit with the 3d core * 2) wrap-around for ringbuffer.. possibly we can do something more * Intelligent here. Right now we need to ensure there is enough room * at the end of the drawcmds in the cmdstream buffer for all the per- * tile cmds. We do this the lamest way possible, by making the ringbuffer * big, and flushing and resetting back to the beginning if we get too * close to the end. */ static void fd_context_wait(struct pipe_context *pctx) { struct fd_context *ctx = fd_context(pctx); uint32_t ts = fd_ringbuffer_timestamp(ctx->ring); DBG("wait: %u", ts); fd_pipe_wait(ctx->screen->pipe, ts); fd_ringbuffer_reset(ctx->ring); fd_ringmarker_mark(ctx->draw_start); }
static void flush_ring(struct fd_batch *batch) { struct fd_context *ctx = batch->ctx; int out_fence_fd = -1; fd_ringbuffer_flush2(batch->gmem, batch->in_fence_fd, batch->needs_out_fence_fd ? &out_fence_fd : NULL); fd_fence_ref(&ctx->screen->base, &ctx->last_fence, NULL); ctx->last_fence = fd_fence_create(ctx, fd_ringbuffer_timestamp(batch->gmem), out_fence_fd); }
static struct fd_ringbuffer *next_rb(struct fd_context *ctx) { struct fd_ringbuffer *ring; uint32_t ts; /* grab next ringbuffer: */ ring = ctx->rings[(ctx->rings_idx++) % ARRAY_SIZE(ctx->rings)]; /* wait for new rb to be idle: */ ts = fd_ringbuffer_timestamp(ring); if (ts) { DBG("wait: %u", ts); fd_pipe_wait(ctx->screen->pipe, ts); } fd_ringbuffer_reset(ring); return ring; }
void fd_gmem_render_tiles(struct pipe_context *pctx) { struct fd_context *ctx = fd_context(pctx); struct pipe_framebuffer_state *pfb = &ctx->framebuffer; uint32_t timestamp = 0; bool sysmem = false; if (ctx->emit_sysmem_prep) { if (ctx->cleared || ctx->gmem_reason || (ctx->num_draws > 5)) { DBG("GMEM: cleared=%x, gmem_reason=%x, num_draws=%u", ctx->cleared, ctx->gmem_reason, ctx->num_draws); } else { sysmem = true; } } /* mark the end of the clear/draw cmds before emitting per-tile cmds: */ fd_ringmarker_mark(ctx->draw_end); if (sysmem) { DBG("rendering sysmem (%s/%s)", util_format_name(pfb->cbufs[0]->format), pfb->zsbuf ? util_format_name(pfb->zsbuf->format) : "none"); render_sysmem(ctx); } else { struct fd_gmem_stateobj *gmem = &ctx->gmem; DBG("rendering %dx%d tiles (%s/%s)", gmem->nbins_x, gmem->nbins_y, util_format_name(pfb->cbufs[0]->format), pfb->zsbuf ? util_format_name(pfb->zsbuf->format) : "none"); calculate_tiles(ctx); render_tiles(ctx); } /* GPU executes starting from tile cmds, which IB back to draw cmds: */ fd_ringmarker_flush(ctx->draw_end); /* mark start for next draw cmds: */ fd_ringmarker_mark(ctx->draw_start); /* update timestamps on render targets: */ timestamp = fd_ringbuffer_timestamp(ctx->ring); fd_resource(pfb->cbufs[0]->texture)->timestamp = timestamp; if (pfb->zsbuf) fd_resource(pfb->zsbuf->texture)->timestamp = timestamp; /* reset maximal bounds: */ ctx->max_scissor.minx = ctx->max_scissor.miny = ~0; ctx->max_scissor.maxx = ctx->max_scissor.maxy = 0; /* Note that because the per-tile setup and mem2gmem/gmem2mem are emitted * after the draw/clear calls, but executed before, we need to preemptively * flag some state as dirty before the first draw/clear call. * * TODO maybe we need to mark all state as dirty to not worry about state * being clobbered by other contexts? */ ctx->dirty |= FD_DIRTY_ZSA | FD_DIRTY_RASTERIZER | FD_DIRTY_FRAMEBUFFER | FD_DIRTY_SAMPLE_MASK | FD_DIRTY_VIEWPORT | FD_DIRTY_CONSTBUF | FD_DIRTY_PROG | FD_DIRTY_SCISSOR | /* probably only needed if we need to mem2gmem on the next * draw.. but not sure if there is a good way to know? */ FD_DIRTY_VERTTEX | FD_DIRTY_FRAGTEX | FD_DIRTY_BLEND; if (fd_mesa_debug & FD_DBG_DGMEM) ctx->dirty = 0xffffffff; }