static void fd_context_flush(struct pipe_context *pctx, struct pipe_fence_handle **fencep, unsigned flags) { struct fd_context *ctx = fd_context(pctx); struct pipe_fence_handle *fence = NULL; DBG("%p: flush: flags=%x\n", ctx->batch, flags); /* Take a ref to the batch's fence (batch can be unref'd when flushed: */ fd_fence_ref(pctx->screen, &fence, ctx->batch->fence); if (flags & PIPE_FLUSH_FENCE_FD) ctx->batch->needs_out_fence_fd = true; if (!ctx->screen->reorder) { fd_batch_flush(ctx->batch, true, false); } else if (flags & PIPE_FLUSH_DEFERRED) { fd_bc_flush_deferred(&ctx->screen->batch_cache, ctx); } else { fd_bc_flush(&ctx->screen->batch_cache, ctx); } if (fencep) fd_fence_ref(pctx->screen, fencep, fence); fd_fence_ref(pctx->screen, &fence, NULL); }
static void fd_screen_fence_ref(struct pipe_screen *pscreen, struct pipe_fence_handle **ptr, struct pipe_fence_handle *pfence) { fd_fence_ref(fd_fence(pfence), (struct fd_fence **)ptr); }
static void fd_context_flush(struct pipe_context *pctx, struct pipe_fence_handle **fencep, unsigned flags) { struct fd_context *ctx = fd_context(pctx); struct pipe_fence_handle *fence = NULL; // TODO we want to lookup batch if it exists, but not create one if not. struct fd_batch *batch = fd_context_batch(ctx); DBG("%p: flush: flags=%x\n", ctx->batch, flags); /* if no rendering since last flush, ie. app just decided it needed * a fence, re-use the last one: */ if (ctx->last_fence) { fd_fence_ref(pctx->screen, &fence, ctx->last_fence); goto out; } if (!batch) return; /* Take a ref to the batch's fence (batch can be unref'd when flushed: */ fd_fence_ref(pctx->screen, &fence, batch->fence); /* TODO is it worth trying to figure out if app is using fence-fd's, to * avoid requesting one every batch? */ batch->needs_out_fence_fd = true; if (!ctx->screen->reorder) { fd_batch_flush(batch, true, false); } else if (flags & PIPE_FLUSH_DEFERRED) { fd_bc_flush_deferred(&ctx->screen->batch_cache, ctx); } else { fd_bc_flush(&ctx->screen->batch_cache, ctx); } out: if (fencep) fd_fence_ref(pctx->screen, fencep, fence); fd_fence_ref(pctx->screen, &ctx->last_fence, fence); fd_fence_ref(pctx->screen, &fence, NULL); }
static void flush_ring(struct fd_batch *batch) { struct fd_context *ctx = batch->ctx; int out_fence_fd = -1; fd_ringbuffer_flush2(batch->gmem, batch->in_fence_fd, batch->needs_out_fence_fd ? &out_fence_fd : NULL); fd_fence_ref(&ctx->screen->base, &ctx->last_fence, NULL); ctx->last_fence = fd_fence_create(ctx, fd_ringbuffer_timestamp(batch->gmem), out_fence_fd); }
void fd_context_destroy(struct pipe_context *pctx) { struct fd_context *ctx = fd_context(pctx); unsigned i; DBG(""); fd_fence_ref(pctx->screen, &ctx->last_fence, NULL); if (ctx->screen->reorder && util_queue_is_initialized(&ctx->flush_queue)) util_queue_destroy(&ctx->flush_queue); util_copy_framebuffer_state(&ctx->framebuffer, NULL); fd_batch_reference(&ctx->batch, NULL); /* unref current batch */ fd_bc_invalidate_context(ctx); fd_prog_fini(pctx); if (ctx->blitter) util_blitter_destroy(ctx->blitter); if (pctx->stream_uploader) u_upload_destroy(pctx->stream_uploader); if (ctx->clear_rs_state) pctx->delete_rasterizer_state(pctx, ctx->clear_rs_state); if (ctx->primconvert) util_primconvert_destroy(ctx->primconvert); slab_destroy_child(&ctx->transfer_pool); for (i = 0; i < ARRAY_SIZE(ctx->vsc_pipe); i++) { struct fd_vsc_pipe *pipe = &ctx->vsc_pipe[i]; if (!pipe->bo) break; fd_bo_del(pipe->bo); } fd_device_del(ctx->dev); fd_pipe_del(ctx->pipe); if (fd_mesa_debug & (FD_DBG_BSTAT | FD_DBG_MSGS)) { printf("batch_total=%u, batch_sysmem=%u, batch_gmem=%u, batch_nondraw=%u, batch_restore=%u\n", (uint32_t)ctx->stats.batch_total, (uint32_t)ctx->stats.batch_sysmem, (uint32_t)ctx->stats.batch_gmem, (uint32_t)ctx->stats.batch_nondraw, (uint32_t)ctx->stats.batch_restore); } }
static void fd_context_flush(struct pipe_context *pctx, struct pipe_fence_handle **fence, unsigned flags) { DBG("fence=%p", fence); #if 0 if (fence) { fd_fence_ref(ctx->screen->fence.current, (struct fd_fence **)fence); } #endif fd_context_render(pctx); }