Ejemplo n.º 1
0
/**
 * Emit context states and 3DPRIMITIVE.
 */
bool
ilo_3d_pipeline_emit_draw(struct ilo_3d_pipeline *p,
                          const struct ilo_context *ilo,
                          int *prim_generated, int *prim_emitted)
{
   bool success;

   if (ilo->dirty & ILO_DIRTY_SO &&
       ilo->so.enabled && !ilo->so.append_bitmask) {
      /*
       * We keep track of the SVBI in the driver, so that we can restore it
       * when the HW context is invalidated (by another process).  The value
       * needs to be reset when stream output is enabled and the targets are
       * changed.
       */
      p->state.so_num_vertices = 0;

      /* on GEN7+, we need SOL_RESET to reset the SO write offsets */
      if (p->dev->gen >= ILO_GEN(7))
         ilo_cp_set_one_off_flags(p->cp, INTEL_EXEC_GEN7_SOL_RESET);
   }


   while (true) {
      struct ilo_builder_snapshot snapshot;

      /* we will rewind if aperture check below fails */
      ilo_builder_batch_snapshot(&p->cp->builder, &snapshot);

      handle_invalid_batch_bo(p, false);

      /* draw! */
      p->emit_draw(p, ilo);

      if (ilo_builder_validate(&ilo->cp->builder, 0, NULL)) {
         success = true;
         break;
      }

      /* rewind */
      ilo_builder_batch_restore(&p->cp->builder, &snapshot);

      if (ilo_cp_empty(p->cp)) {
         success = false;
         break;
      }
      else {
         /* flush and try again */
         ilo_cp_flush(p->cp, "out of aperture");
      }
   }

   if (success) {
      const int num_verts =
         u_vertices_per_prim(u_reduced_prim(ilo->draw->mode));
      const int max_emit =
         (p->state.so_max_vertices - p->state.so_num_vertices) / num_verts;
      const int generated =
         u_reduced_prims_for_vertices(ilo->draw->mode, ilo->draw->count);
      const int emitted = MIN2(generated, max_emit);

      p->state.so_num_vertices += emitted * num_verts;

      if (prim_generated)
         *prim_generated = generated;

      if (prim_emitted)
         *prim_emitted = emitted;
   }

   p->invalidate_flags = 0x0;

   return success;
}
Ejemplo n.º 2
0
static void
fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
{
	struct fd_context *ctx = fd_context(pctx);
	struct pipe_framebuffer_state *pfb = &ctx->framebuffer;
	struct pipe_scissor_state *scissor = fd_context_get_scissor(ctx);
	unsigned i, prims, buffers = 0;

	/* if we supported transform feedback, we'd have to disable this: */
	if (((scissor->maxx - scissor->minx) *
			(scissor->maxy - scissor->miny)) == 0) {
		return;
	}

	/* TODO: push down the region versions into the tiles */
	if (!fd_render_condition_check(pctx))
		return;

	/* emulate unsupported primitives: */
	if (!fd_supported_prim(ctx, info->mode)) {
		if (ctx->streamout.num_targets > 0)
			debug_error("stream-out with emulated prims");
		util_primconvert_save_index_buffer(ctx->primconvert, &ctx->indexbuf);
		util_primconvert_save_rasterizer_state(ctx->primconvert, ctx->rasterizer);
		util_primconvert_draw_vbo(ctx->primconvert, info);
		return;
	}

	ctx->needs_flush = true;

	/*
	 * Figure out the buffers/features we need:
	 */

	if (fd_depth_enabled(ctx)) {
		buffers |= FD_BUFFER_DEPTH;
		resource_written(ctx, pfb->zsbuf->texture);
		ctx->gmem_reason |= FD_GMEM_DEPTH_ENABLED;
	}

	if (fd_stencil_enabled(ctx)) {
		buffers |= FD_BUFFER_STENCIL;
		resource_written(ctx, pfb->zsbuf->texture);
		ctx->gmem_reason |= FD_GMEM_STENCIL_ENABLED;
	}

	if (fd_logicop_enabled(ctx))
		ctx->gmem_reason |= FD_GMEM_LOGICOP_ENABLED;

	for (i = 0; i < pfb->nr_cbufs; i++) {
		struct pipe_resource *surf;

		if (!pfb->cbufs[i])
			continue;

		surf = pfb->cbufs[i]->texture;

		resource_written(ctx, surf);
		buffers |= PIPE_CLEAR_COLOR0 << i;

		if (surf->nr_samples > 1)
			ctx->gmem_reason |= FD_GMEM_MSAA_ENABLED;

		if (fd_blend_enabled(ctx, i))
			ctx->gmem_reason |= FD_GMEM_BLEND_ENABLED;
	}

	/* Skip over buffer 0, that is sent along with the command stream */
	for (i = 1; i < PIPE_MAX_CONSTANT_BUFFERS; i++) {
		resource_read(ctx, ctx->constbuf[PIPE_SHADER_VERTEX].cb[i].buffer);
		resource_read(ctx, ctx->constbuf[PIPE_SHADER_FRAGMENT].cb[i].buffer);
	}

	/* Mark VBOs as being read */
	for (i = 0; i < ctx->vtx.vertexbuf.count; i++) {
		assert(!ctx->vtx.vertexbuf.vb[i].user_buffer);
		resource_read(ctx, ctx->vtx.vertexbuf.vb[i].buffer);
	}

	/* Mark index buffer as being read */
	resource_read(ctx, ctx->indexbuf.buffer);

	/* Mark textures as being read */
	for (i = 0; i < ctx->verttex.num_textures; i++)
		if (ctx->verttex.textures[i])
			resource_read(ctx, ctx->verttex.textures[i]->texture);
	for (i = 0; i < ctx->fragtex.num_textures; i++)
		if (ctx->fragtex.textures[i])
			resource_read(ctx, ctx->fragtex.textures[i]->texture);

	/* Mark streamout buffers as being written.. */
	for (i = 0; i < ctx->streamout.num_targets; i++)
		if (ctx->streamout.targets[i])
			resource_written(ctx, ctx->streamout.targets[i]->buffer);

	ctx->num_draws++;

	prims = u_reduced_prims_for_vertices(info->mode, info->count);

	ctx->stats.draw_calls++;

	/* TODO prims_emitted should be clipped when the stream-out buffer is
	 * not large enough.  See max_tf_vtx().. probably need to move that
	 * into common code.  Although a bit more annoying since a2xx doesn't
	 * use ir3 so no common way to get at the pipe_stream_output_info
	 * which is needed for this calculation.
	 */
	if (ctx->streamout.num_targets > 0)
		ctx->stats.prims_emitted += prims;
	ctx->stats.prims_generated += prims;

	/* any buffers that haven't been cleared yet, we need to restore: */
	ctx->restore |= buffers & (FD_BUFFER_ALL & ~ctx->cleared);
	/* and any buffers used, need to be resolved: */
	ctx->resolve |= buffers;

	DBG("%x num_draws=%u (%s/%s)", buffers, ctx->num_draws,
		util_format_short_name(pipe_surface_format(pfb->cbufs[0])),
		util_format_short_name(pipe_surface_format(pfb->zsbuf)));

	fd_hw_query_set_stage(ctx, ctx->ring, FD_STAGE_DRAW);
	ctx->draw_vbo(ctx, info);

	for (i = 0; i < ctx->streamout.num_targets; i++)
		ctx->streamout.offsets[i] += info->count;

	if (fd_mesa_debug & FD_DBG_DDRAW)
		ctx->dirty = 0xffffffff;

	/* if an app (or, well, piglit test) does many thousands of draws
	 * without flush (or anything which implicitly flushes, like
	 * changing render targets), we can exceed the ringbuffer size.
	 * Since we don't currently have a sane way to wrapparound, and
	 * we use the same buffer for both draw and tiling commands, for
	 * now we need to do this hack and trigger flush if we are running
	 * low on remaining space for cmds:
	 */
	if (((ctx->ring->cur - ctx->ring->start) >
				(ctx->ring->size/4 - FD_TILING_COMMANDS_DWORDS)) ||
			(fd_mesa_debug & FD_DBG_FLUSH))
		fd_context_render(pctx);
}
Ejemplo n.º 3
0
static void
fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
{
	struct fd_context *ctx = fd_context(pctx);
	struct pipe_framebuffer_state *pfb = &ctx->framebuffer;
	struct pipe_scissor_state *scissor = fd_context_get_scissor(ctx);
	unsigned i, buffers = 0;

	/* if we supported transform feedback, we'd have to disable this: */
	if (((scissor->maxx - scissor->minx) *
			(scissor->maxy - scissor->miny)) == 0) {
		return;
	}

	/* emulate unsupported primitives: */
	if (!fd_supported_prim(ctx, info->mode)) {
		util_primconvert_save_index_buffer(ctx->primconvert, &ctx->indexbuf);
		util_primconvert_save_rasterizer_state(ctx->primconvert, ctx->rasterizer);
		util_primconvert_draw_vbo(ctx->primconvert, info);
		return;
	}

	ctx->needs_flush = true;

	/*
	 * Figure out the buffers/features we need:
	 */

	if (fd_depth_enabled(ctx)) {
		buffers |= FD_BUFFER_DEPTH;
		fd_resource(pfb->zsbuf->texture)->dirty = true;
		ctx->gmem_reason |= FD_GMEM_DEPTH_ENABLED;
	}

	if (fd_stencil_enabled(ctx)) {
		buffers |= FD_BUFFER_STENCIL;
		fd_resource(pfb->zsbuf->texture)->dirty = true;
		ctx->gmem_reason |= FD_GMEM_STENCIL_ENABLED;
	}

	if (fd_logicop_enabled(ctx))
		ctx->gmem_reason |= FD_GMEM_LOGICOP_ENABLED;

	for (i = 0; i < pfb->nr_cbufs; i++) {
		struct pipe_resource *surf;

		if (!pfb->cbufs[i])
			continue;

		surf = pfb->cbufs[i]->texture;

		fd_resource(surf)->dirty = true;
		buffers |= FD_BUFFER_COLOR;

		if (surf->nr_samples > 1)
			ctx->gmem_reason |= FD_GMEM_MSAA_ENABLED;

		if (fd_blend_enabled(ctx, i))
			ctx->gmem_reason |= FD_GMEM_BLEND_ENABLED;
	}

	ctx->num_draws++;

	ctx->stats.draw_calls++;
	ctx->stats.prims_emitted +=
		u_reduced_prims_for_vertices(info->mode, info->count);

	/* any buffers that haven't been cleared, we need to restore: */
	ctx->restore |= buffers & (FD_BUFFER_ALL & ~ctx->cleared);
	/* and any buffers used, need to be resolved: */
	ctx->resolve |= buffers;

	ctx->draw(ctx, info);
}
Ejemplo n.º 4
0
static void
fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
{
    struct fd_context *ctx = fd_context(pctx);
    struct pipe_framebuffer_state *pfb = &ctx->framebuffer;
    struct pipe_scissor_state *scissor = fd_context_get_scissor(ctx);
    unsigned i, buffers = 0;

    /* if we supported transform feedback, we'd have to disable this: */
    if (((scissor->maxx - scissor->minx) *
            (scissor->maxy - scissor->miny)) == 0) {
        return;
    }

    /* emulate unsupported primitives: */
    if (!fd_supported_prim(ctx, info->mode)) {
        util_primconvert_save_index_buffer(ctx->primconvert, &ctx->indexbuf);
        util_primconvert_save_rasterizer_state(ctx->primconvert, ctx->rasterizer);
        util_primconvert_draw_vbo(ctx->primconvert, info);
        return;
    }

    ctx->needs_flush = true;

    /*
     * Figure out the buffers/features we need:
     */

    if (fd_depth_enabled(ctx)) {
        buffers |= FD_BUFFER_DEPTH;
        fd_resource(pfb->zsbuf->texture)->dirty = true;
        ctx->gmem_reason |= FD_GMEM_DEPTH_ENABLED;
    }

    if (fd_stencil_enabled(ctx)) {
        buffers |= FD_BUFFER_STENCIL;
        fd_resource(pfb->zsbuf->texture)->dirty = true;
        ctx->gmem_reason |= FD_GMEM_STENCIL_ENABLED;
    }

    if (fd_logicop_enabled(ctx))
        ctx->gmem_reason |= FD_GMEM_LOGICOP_ENABLED;

    for (i = 0; i < pfb->nr_cbufs; i++) {
        struct pipe_resource *surf;

        if (!pfb->cbufs[i])
            continue;

        surf = pfb->cbufs[i]->texture;

        fd_resource(surf)->dirty = true;
        buffers |= FD_BUFFER_COLOR;

        if (surf->nr_samples > 1)
            ctx->gmem_reason |= FD_GMEM_MSAA_ENABLED;

        if (fd_blend_enabled(ctx, i))
            ctx->gmem_reason |= FD_GMEM_BLEND_ENABLED;
    }

    ctx->num_draws++;

    ctx->stats.draw_calls++;
    ctx->stats.prims_emitted +=
        u_reduced_prims_for_vertices(info->mode, info->count);

    /* any buffers that haven't been cleared yet, we need to restore: */
    ctx->restore |= buffers & (FD_BUFFER_ALL & ~ctx->cleared);
    /* and any buffers used, need to be resolved: */
    ctx->resolve |= buffers;

    DBG("%x num_draws=%u (%s/%s)", buffers, ctx->num_draws,
        util_format_short_name(pipe_surface_format(pfb->cbufs[0])),
        util_format_short_name(pipe_surface_format(pfb->zsbuf)));

    fd_hw_query_set_stage(ctx, ctx->ring, FD_STAGE_DRAW);
    ctx->draw_vbo(ctx, info);

    /* if an app (or, well, piglit test) does many thousands of draws
     * without flush (or anything which implicitly flushes, like
     * changing render targets), we can exceed the ringbuffer size.
     * Since we don't currently have a sane way to wrapparound, and
     * we use the same buffer for both draw and tiling commands, for
     * now we need to do this hack and trigger flush if we are running
     * low on remaining space for cmds:
     */
    if (((ctx->ring->cur - ctx->ring->start) >
            (ctx->ring->size/4 - FD_TILING_COMMANDS_DWORDS)) ||
            (fd_mesa_debug & FD_DBG_FLUSH))
        fd_context_render(pctx);
}