/** * Does the initial bining command list setup for drawing to a given FBO. */ static void vc4_start_draw(struct vc4_context *vc4) { struct vc4_job *job = vc4->job; if (job->needs_flush) return; vc4_get_draw_cl_space(job, 0); struct vc4_cl_out *bcl = cl_start(&job->bcl); // Tile state data is 48 bytes per tile, I think it can be thrown away // as soon as binning is finished. cl_u8(&bcl, VC4_PACKET_TILE_BINNING_MODE_CONFIG); cl_u32(&bcl, 0); /* tile alloc addr, filled by kernel */ cl_u32(&bcl, 0); /* tile alloc size, filled by kernel */ cl_u32(&bcl, 0); /* tile state addr, filled by kernel */ cl_u8(&bcl, job->draw_tiles_x); cl_u8(&bcl, job->draw_tiles_y); /* Other flags are filled by kernel. */ cl_u8(&bcl, job->msaa ? VC4_BIN_CONFIG_MS_MODE_4X : 0); /* START_TILE_BINNING resets the statechange counters in the hardware, * which are what is used when a primitive is binned to a tile to * figure out what new state packets need to be written to that tile's * command list. */ cl_u8(&bcl, VC4_PACKET_START_TILE_BINNING); /* Reset the current compressed primitives format. This gets modified * by VC4_PACKET_GL_INDEXED_PRIMITIVE and * VC4_PACKET_GL_ARRAY_PRIMITIVE, so it needs to be reset at the start * of every tile. */ cl_u8(&bcl, VC4_PACKET_PRIMITIVE_LIST_FORMAT); cl_u8(&bcl, (VC4_PRIMITIVE_LIST_FORMAT_16_INDEX | VC4_PRIMITIVE_LIST_FORMAT_TYPE_TRIANGLES)); job->needs_flush = true; job->draw_width = vc4->framebuffer.width; job->draw_height = vc4->framebuffer.height; cl_end(&job->bcl, bcl); }
static void vc4_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info) { struct vc4_context *vc4 = vc4_context(pctx); if (info->mode >= PIPE_PRIM_QUADS) { util_primconvert_save_index_buffer(vc4->primconvert, &vc4->indexbuf); util_primconvert_save_rasterizer_state(vc4->primconvert, &vc4->rasterizer->base); util_primconvert_draw_vbo(vc4->primconvert, info); perf_debug("Fallback conversion for %d %s vertices\n", info->count, u_prim_name(info->mode)); return; } /* Before setting up the draw, do any fixup blits necessary. */ vc4_update_shadow_textures(pctx, &vc4->verttex); vc4_update_shadow_textures(pctx, &vc4->fragtex); vc4_hw_2116_workaround(pctx); vc4_get_draw_cl_space(vc4); if (vc4->prim_mode != info->mode) { vc4->prim_mode = info->mode; vc4->dirty |= VC4_DIRTY_PRIM_MODE; } vc4_start_draw(vc4); vc4_update_compiled_shaders(vc4, info->mode); vc4_emit_state(pctx); if ((vc4->dirty & (VC4_DIRTY_VTXBUF | VC4_DIRTY_VTXSTATE | VC4_DIRTY_PRIM_MODE | VC4_DIRTY_RASTERIZER | VC4_DIRTY_COMPILED_CS | VC4_DIRTY_COMPILED_VS | VC4_DIRTY_COMPILED_FS | vc4->prog.cs->uniform_dirty_bits | vc4->prog.vs->uniform_dirty_bits | vc4->prog.fs->uniform_dirty_bits)) || vc4->last_index_bias != info->index_bias) { vc4_emit_gl_shader_state(vc4, info); } vc4->dirty = 0; /* Note that the primitive type fields match with OpenGL/gallium * definitions, up to but not including QUADS. */ struct vc4_cl_out *bcl = cl_start(&vc4->bcl); if (info->indexed) { uint32_t offset = vc4->indexbuf.offset; uint32_t index_size = vc4->indexbuf.index_size; struct pipe_resource *prsc; if (vc4->indexbuf.index_size == 4) { prsc = vc4_get_shadow_index_buffer(pctx, &vc4->indexbuf, info->count, &offset); index_size = 2; } else { if (vc4->indexbuf.user_buffer) { prsc = NULL; u_upload_data(vc4->uploader, 0, info->count * index_size, 4, vc4->indexbuf.user_buffer, &offset, &prsc); } else { prsc = vc4->indexbuf.buffer; } } struct vc4_resource *rsc = vc4_resource(prsc); cl_start_reloc(&vc4->bcl, &bcl, 1); cl_u8(&bcl, VC4_PACKET_GL_INDEXED_PRIMITIVE); cl_u8(&bcl, info->mode | (index_size == 2 ? VC4_INDEX_BUFFER_U16: VC4_INDEX_BUFFER_U8)); cl_u32(&bcl, info->count); cl_reloc(vc4, &vc4->bcl, &bcl, rsc->bo, offset); cl_u32(&bcl, vc4->max_index); if (vc4->indexbuf.index_size == 4 || vc4->indexbuf.user_buffer) pipe_resource_reference(&prsc, NULL); } else { cl_u8(&bcl, VC4_PACKET_GL_ARRAY_PRIMITIVE); cl_u8(&bcl, info->mode); cl_u32(&bcl, info->count); cl_u32(&bcl, info->start); } cl_end(&vc4->bcl, bcl); if (vc4->zsa && vc4->zsa->base.depth.enabled) { vc4->resolve |= PIPE_CLEAR_DEPTH; } if (vc4->zsa && vc4->zsa->base.stencil[0].enabled) vc4->resolve |= PIPE_CLEAR_STENCIL; vc4->resolve |= PIPE_CLEAR_COLOR0; vc4->shader_rec_count++; if (vc4_debug & VC4_DEBUG_ALWAYS_FLUSH) vc4_flush(pctx); }
static void vc4_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info) { struct vc4_context *vc4 = vc4_context(pctx); if (info->mode >= PIPE_PRIM_QUADS) { util_primconvert_save_index_buffer(vc4->primconvert, &vc4->indexbuf); util_primconvert_save_rasterizer_state(vc4->primconvert, &vc4->rasterizer->base); util_primconvert_draw_vbo(vc4->primconvert, info); perf_debug("Fallback conversion for %d %s vertices\n", info->count, u_prim_name(info->mode)); return; } /* Before setting up the draw, do any fixup blits necessary. */ vc4_predraw_check_textures(pctx, &vc4->verttex); vc4_predraw_check_textures(pctx, &vc4->fragtex); vc4_hw_2116_workaround(pctx, info->count); struct vc4_job *job = vc4_get_job_for_fbo(vc4); vc4_get_draw_cl_space(job, info->count); if (vc4->prim_mode != info->mode) { vc4->prim_mode = info->mode; vc4->dirty |= VC4_DIRTY_PRIM_MODE; } vc4_start_draw(vc4); if (!vc4_update_compiled_shaders(vc4, info->mode)) { debug_warn_once("shader compile failed, skipping draw call.\n"); return; } vc4_emit_state(pctx); if ((vc4->dirty & (VC4_DIRTY_VTXBUF | VC4_DIRTY_VTXSTATE | VC4_DIRTY_PRIM_MODE | VC4_DIRTY_RASTERIZER | VC4_DIRTY_COMPILED_CS | VC4_DIRTY_COMPILED_VS | VC4_DIRTY_COMPILED_FS | vc4->prog.cs->uniform_dirty_bits | vc4->prog.vs->uniform_dirty_bits | vc4->prog.fs->uniform_dirty_bits)) || vc4->last_index_bias != info->index_bias) { vc4_emit_gl_shader_state(vc4, info, 0); } vc4->dirty = 0; /* Note that the primitive type fields match with OpenGL/gallium * definitions, up to but not including QUADS. */ struct vc4_cl_out *bcl = cl_start(&job->bcl); if (info->indexed) { uint32_t offset = vc4->indexbuf.offset; uint32_t index_size = vc4->indexbuf.index_size; struct pipe_resource *prsc; if (vc4->indexbuf.index_size == 4) { prsc = vc4_get_shadow_index_buffer(pctx, &vc4->indexbuf, info->count, &offset); index_size = 2; } else { if (vc4->indexbuf.user_buffer) { prsc = NULL; u_upload_data(vc4->uploader, 0, info->count * index_size, 4, vc4->indexbuf.user_buffer, &offset, &prsc); } else { prsc = vc4->indexbuf.buffer; } } struct vc4_resource *rsc = vc4_resource(prsc); cl_start_reloc(&job->bcl, &bcl, 1); cl_u8(&bcl, VC4_PACKET_GL_INDEXED_PRIMITIVE); cl_u8(&bcl, info->mode | (index_size == 2 ? VC4_INDEX_BUFFER_U16: VC4_INDEX_BUFFER_U8)); cl_u32(&bcl, info->count); cl_reloc(job, &job->bcl, &bcl, rsc->bo, offset); cl_u32(&bcl, vc4->max_index); job->draw_calls_queued++; if (vc4->indexbuf.index_size == 4 || vc4->indexbuf.user_buffer) pipe_resource_reference(&prsc, NULL); } else { uint32_t count = info->count; uint32_t start = info->start; uint32_t extra_index_bias = 0; while (count) { uint32_t this_count = count; uint32_t step = count; static const uint32_t max_verts = 65535; /* GFXH-515 / SW-5891: The binner emits 16 bit indices * for drawarrays, which means that if start + count > * 64k it would truncate the top bits. Work around * this by emitting a limited number of primitives at * a time and reemitting the shader state pointing * farther down the vertex attribute arrays. * * To do this properly for line loops or trifans, we'd * need to make a new VB containing the first vertex * plus whatever remainder. */ if (extra_index_bias) { cl_end(&job->bcl, bcl); vc4_emit_gl_shader_state(vc4, info, extra_index_bias); bcl = cl_start(&job->bcl); } if (start + count > max_verts) { switch (info->mode) { case PIPE_PRIM_POINTS: this_count = step = max_verts; break; case PIPE_PRIM_LINES: this_count = step = max_verts - (max_verts % 2); break; case PIPE_PRIM_LINE_STRIP: this_count = max_verts; step = max_verts - 1; break; case PIPE_PRIM_LINE_LOOP: this_count = max_verts; step = max_verts - 1; debug_warn_once("unhandled line loop " "looping behavior with " ">65535 verts\n"); break; case PIPE_PRIM_TRIANGLES: this_count = step = max_verts - (max_verts % 3); break; case PIPE_PRIM_TRIANGLE_STRIP: this_count = max_verts; step = max_verts - 2; break; default: debug_warn_once("unhandled primitive " "max vert count, truncating\n"); this_count = step = max_verts; } } cl_u8(&bcl, VC4_PACKET_GL_ARRAY_PRIMITIVE); cl_u8(&bcl, info->mode); cl_u32(&bcl, this_count); cl_u32(&bcl, start); job->draw_calls_queued++; count -= step; extra_index_bias += start + step; start = 0; } } cl_end(&job->bcl, bcl); /* We shouldn't have tripped the HW_2116 bug with the GFXH-515 * workaround. */ assert(job->draw_calls_queued <= VC4_HW_2116_COUNT); if (vc4->zsa && vc4->framebuffer.zsbuf) { struct vc4_resource *rsc = vc4_resource(vc4->framebuffer.zsbuf->texture); if (vc4->zsa->base.depth.enabled) { job->resolve |= PIPE_CLEAR_DEPTH; rsc->initialized_buffers = PIPE_CLEAR_DEPTH; } if (vc4->zsa->base.stencil[0].enabled) { job->resolve |= PIPE_CLEAR_STENCIL; rsc->initialized_buffers |= PIPE_CLEAR_STENCIL; } } job->resolve |= PIPE_CLEAR_COLOR0; if (vc4_debug & VC4_DEBUG_ALWAYS_FLUSH) vc4_flush(pctx); }