示例#1
0
void si_context_gfx_flush(void *context, unsigned flags,
			  struct pipe_fence_handle **fence)
{
	struct si_context *ctx = context;
	struct radeon_winsys_cs *cs = ctx->b.rings.gfx.cs;
	struct radeon_winsys *ws = ctx->b.ws;

	if (cs->cdw == ctx->b.initial_gfx_cs_size &&
	    (!fence || ctx->last_gfx_fence)) {
		if (fence)
			ws->fence_reference(fence, ctx->last_gfx_fence);
		if (!(flags & RADEON_FLUSH_ASYNC))
			ws->cs_sync_flush(cs);
		return;
	}

	ctx->b.rings.gfx.flushing = true;

	r600_preflush_suspend_features(&ctx->b);

	ctx->b.flags |= SI_CONTEXT_FLUSH_AND_INV_FRAMEBUFFER |
			SI_CONTEXT_INV_TC_L1 |
			SI_CONTEXT_INV_TC_L2 |
			/* this is probably not needed anymore */
			SI_CONTEXT_PS_PARTIAL_FLUSH;
	si_emit_cache_flush(ctx, NULL);

	/* force to keep tiling flags */
	flags |= RADEON_FLUSH_KEEP_TILING_FLAGS;

	if (ctx->trace_buf)
		si_trace_emit(ctx);

	/* Save the IB for debug contexts. */
	if (ctx->is_debug) {
		free(ctx->last_ib);
		ctx->last_ib_dw_size = cs->cdw;
		ctx->last_ib = malloc(cs->cdw * 4);
		memcpy(ctx->last_ib, cs->buf, cs->cdw * 4);
		r600_resource_reference(&ctx->last_trace_buf, ctx->trace_buf);
		r600_resource_reference(&ctx->trace_buf, NULL);
	}

	/* Flush the CS. */
	ws->cs_flush(cs, flags, &ctx->last_gfx_fence,
		     ctx->screen->b.cs_count++);
	ctx->b.rings.gfx.flushing = false;

	if (fence)
		ws->fence_reference(fence, ctx->last_gfx_fence);

	si_begin_new_cs(ctx);
}
示例#2
0
void si_context_gfx_flush(void *context, unsigned flags,
                          struct pipe_fence_handle **fence)
{
    struct si_context *ctx = context;
    struct radeon_winsys_cs *cs = ctx->b.rings.gfx.cs;

    if (cs->cdw == ctx->b.initial_gfx_cs_size && !fence)
        return;

    ctx->b.rings.gfx.flushing = true;

    r600_preflush_suspend_features(&ctx->b);

    ctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV_CB |
                    R600_CONTEXT_FLUSH_AND_INV_CB_META |
                    R600_CONTEXT_FLUSH_AND_INV_DB |
                    R600_CONTEXT_FLUSH_AND_INV_DB_META |
                    R600_CONTEXT_INV_TEX_CACHE |
                    /* this is probably not needed anymore */
                    R600_CONTEXT_PS_PARTIAL_FLUSH;
    si_emit_cache_flush(&ctx->b, NULL);

    /* force to keep tiling flags */
    flags |= RADEON_FLUSH_KEEP_TILING_FLAGS;

    /* Flush the CS. */
    ctx->b.ws->cs_flush(cs, flags, fence, ctx->screen->b.cs_count++);
    ctx->b.rings.gfx.flushing = false;

#if SI_TRACE_CS
    if (ctx->screen->b.trace_bo) {
        struct si_screen *sscreen = ctx->screen;
        unsigned i;

        for (i = 0; i < 10; i++) {
            usleep(5);
            if (!ctx->b.ws->buffer_is_busy(sscreen->b.trace_bo->buf, RADEON_USAGE_READWRITE)) {
                break;
            }
        }
        if (i == 10) {
            fprintf(stderr, "timeout on cs lockup likely happen at cs %d dw %d\n",
                    sscreen->b.trace_ptr[1], sscreen->b.trace_ptr[0]);
        } else {
            fprintf(stderr, "cs %d executed in %dms\n", sscreen->b.trace_ptr[1], i * 5);
        }
    }
#endif

    si_begin_new_cs(ctx);
}
示例#3
0
void si_context_gfx_flush(void *context, unsigned flags,
			  struct pipe_fence_handle **fence)
{
	struct si_context *ctx = context;
	struct radeon_winsys_cs *cs = ctx->b.gfx.cs;
	struct radeon_winsys *ws = ctx->b.ws;

	if (ctx->gfx_flush_in_progress)
		return;

	ctx->gfx_flush_in_progress = true;

	if (cs->cdw == ctx->b.initial_gfx_cs_size &&
	    (!fence || ctx->last_gfx_fence)) {
		if (fence)
			ws->fence_reference(fence, ctx->last_gfx_fence);
		if (!(flags & RADEON_FLUSH_ASYNC))
			ws->cs_sync_flush(cs);
		ctx->gfx_flush_in_progress = false;
		return;
	}

	r600_preflush_suspend_features(&ctx->b);

	ctx->b.flags |= SI_CONTEXT_FLUSH_AND_INV_FRAMEBUFFER |
			SI_CONTEXT_INV_VMEM_L1 |
			SI_CONTEXT_INV_GLOBAL_L2 |
			/* this is probably not needed anymore */
			SI_CONTEXT_PS_PARTIAL_FLUSH;
	si_emit_cache_flush(ctx, NULL);

	/* force to keep tiling flags */
	flags |= RADEON_FLUSH_KEEP_TILING_FLAGS;

	if (ctx->trace_buf)
		si_trace_emit(ctx);

	if (ctx->is_debug) {
		unsigned i;

		/* Save the IB for debug contexts. */
		free(ctx->last_ib);
		ctx->last_ib_dw_size = cs->cdw;
		ctx->last_ib = malloc(cs->cdw * 4);
		memcpy(ctx->last_ib, cs->buf, cs->cdw * 4);
		r600_resource_reference(&ctx->last_trace_buf, ctx->trace_buf);
		r600_resource_reference(&ctx->trace_buf, NULL);

		/* Save the buffer list. */
		if (ctx->last_bo_list) {
			for (i = 0; i < ctx->last_bo_count; i++)
				pb_reference(&ctx->last_bo_list[i].buf, NULL);
			free(ctx->last_bo_list);
		}
		ctx->last_bo_count = ws->cs_get_buffer_list(cs, NULL);
		ctx->last_bo_list = calloc(ctx->last_bo_count,
					   sizeof(ctx->last_bo_list[0]));
		ws->cs_get_buffer_list(cs, ctx->last_bo_list);
	}

	/* Flush the CS. */
	ws->cs_flush(cs, flags, &ctx->last_gfx_fence,
		     ctx->screen->b.cs_count++);

	if (fence)
		ws->fence_reference(fence, ctx->last_gfx_fence);

	/* Check VM faults if needed. */
	if (ctx->screen->b.debug_flags & DBG_CHECK_VM)
		si_check_vm_faults(ctx);

	si_begin_new_cs(ctx);
	ctx->gfx_flush_in_progress = false;
}