static void vc4_clear(struct pipe_context *pctx, unsigned buffers, const union pipe_color_union *color, double depth, unsigned stencil) { struct vc4_context *vc4 = vc4_context(pctx); /* We can't flag new buffers for clearing once we've queued draws. We * could avoid this by using the 3d engine to clear. */ if (vc4->draw_call_queued) vc4_flush(pctx); if (buffers & PIPE_CLEAR_COLOR0) { vc4->clear_color[0] = vc4->clear_color[1] = pack_rgba(vc4->framebuffer.cbufs[0]->format, color->f); } if (buffers & PIPE_CLEAR_DEPTH) { /* Though the depth buffer is stored with Z in the high 24, * for this field we just need to store it in the low 24. */ vc4->clear_depth = util_pack_z(PIPE_FORMAT_Z24X8_UNORM, depth); } if (buffers & PIPE_CLEAR_STENCIL) vc4->clear_stencil = stencil; vc4->cleared |= buffers; vc4->resolve |= buffers; vc4_start_draw(vc4); }
/** * Flushes the current command lists if they reference the given BO. * * This helps avoid flushing the command buffers when unnecessary. */ void vc4_flush_for_bo(struct pipe_context *pctx, struct vc4_bo *bo) { struct vc4_context *vc4 = vc4_context(pctx); if (!vc4->needs_flush) return; /* Walk all the referenced BOs in the drawing command list to see if * they match. */ struct vc4_bo **referenced_bos = vc4->bo_pointers.base; for (int i = 0; i < (vc4->bo_handles.next - vc4->bo_handles.base) / 4; i++) { if (referenced_bos[i] == bo) { vc4_flush(pctx); return; } } /* Also check for the Z/color buffers, since the references to those * are only added immediately before submit. */ struct vc4_surface *csurf = vc4_surface(vc4->framebuffer.cbufs[0]); if (csurf) { struct vc4_resource *ctex = vc4_resource(csurf->base.texture); if (ctex->bo == bo) { vc4_flush(pctx); return; } } struct vc4_surface *zsurf = vc4_surface(vc4->framebuffer.zsbuf); if (zsurf) { struct vc4_resource *ztex = vc4_resource(zsurf->base.texture); if (ztex->bo == bo) { vc4_flush(pctx); return; } } }
/** * HW-2116 workaround: Flush the batch before triggering the hardware state * counter wraparound behavior. * * State updates are tracked by a global counter which increments at the first * state update after a draw or a START_BINNING. Tiles can then have their * state updated at draw time with a set of cheap checks for whether the * state's copy of the global counter matches the global counter the last time * that state was written to the tile. * * The state counters are relatively small and wrap around quickly, so you * could get false negatives for needing to update a particular state in the * tile. To avoid this, the hardware attempts to write all of the state in * the tile at wraparound time. This apparently is broken, so we just flush * everything before that behavior is triggered. A batch flush is sufficient * to get our current contents drawn and reset the counters to 0. * * Note that we can't just use VC4_PACKET_FLUSH_ALL, because that caps the * tiles with VC4_PACKET_RETURN_FROM_LIST. */ static void vc4_hw_2116_workaround(struct pipe_context *pctx) { struct vc4_context *vc4 = vc4_context(pctx); if (vc4->draw_calls_queued == 0x1ef0) { perf_debug("Flushing batch due to HW-2116 workaround " "(too many draw calls per scene\n"); vc4_flush(pctx); } }
static void vc4_pipe_flush(struct pipe_context *pctx, struct pipe_fence_handle **fence, unsigned flags) { struct vc4_context *vc4 = vc4_context(pctx); vc4_flush(pctx); if (fence) { struct vc4_fence *f = vc4_fence_create(vc4->screen, vc4->last_emit_seqno); *fence = (struct pipe_fence_handle *)f; } }
static void vc4_set_framebuffer_state(struct pipe_context *pctx, const struct pipe_framebuffer_state *framebuffer) { struct vc4_context *vc4 = vc4_context(pctx); struct pipe_framebuffer_state *cso = &vc4->framebuffer; unsigned i; vc4_flush(pctx); for (i = 0; i < framebuffer->nr_cbufs; i++) pipe_surface_reference(&cso->cbufs[i], framebuffer->cbufs[i]); for (; i < vc4->framebuffer.nr_cbufs; i++) pipe_surface_reference(&cso->cbufs[i], NULL); cso->nr_cbufs = framebuffer->nr_cbufs; pipe_surface_reference(&cso->zsbuf, framebuffer->zsbuf); cso->width = framebuffer->width; cso->height = framebuffer->height; /* Nonzero texture mipmap levels are laid out as if they were in * power-of-two-sized spaces. The renderbuffer config infers its * stride from the width parameter, so we need to configure our * framebuffer. Note that if the z/color buffers were mismatched * sizes, we wouldn't be able to do this. */ if (cso->cbufs[0] && cso->cbufs[0]->u.tex.level) { struct vc4_resource *rsc = vc4_resource(cso->cbufs[0]->texture); cso->width = (rsc->slices[cso->cbufs[0]->u.tex.level].stride / rsc->cpp); } else if (cso->zsbuf && cso->zsbuf->u.tex.level){ struct vc4_resource *rsc = vc4_resource(cso->zsbuf->texture); cso->width = (rsc->slices[cso->zsbuf->u.tex.level].stride / rsc->cpp); } vc4->dirty |= VC4_DIRTY_FRAMEBUFFER; }
static void vc4_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info) { struct vc4_context *vc4 = vc4_context(pctx); if (info->mode >= PIPE_PRIM_QUADS) { util_primconvert_save_index_buffer(vc4->primconvert, &vc4->indexbuf); util_primconvert_save_rasterizer_state(vc4->primconvert, &vc4->rasterizer->base); util_primconvert_draw_vbo(vc4->primconvert, info); perf_debug("Fallback conversion for %d %s vertices\n", info->count, u_prim_name(info->mode)); return; } /* Before setting up the draw, do any fixup blits necessary. */ vc4_update_shadow_textures(pctx, &vc4->verttex); vc4_update_shadow_textures(pctx, &vc4->fragtex); vc4_hw_2116_workaround(pctx); vc4_get_draw_cl_space(vc4); if (vc4->prim_mode != info->mode) { vc4->prim_mode = info->mode; vc4->dirty |= VC4_DIRTY_PRIM_MODE; } vc4_start_draw(vc4); vc4_update_compiled_shaders(vc4, info->mode); vc4_emit_state(pctx); if ((vc4->dirty & (VC4_DIRTY_VTXBUF | VC4_DIRTY_VTXSTATE | VC4_DIRTY_PRIM_MODE | VC4_DIRTY_RASTERIZER | VC4_DIRTY_COMPILED_CS | VC4_DIRTY_COMPILED_VS | VC4_DIRTY_COMPILED_FS | vc4->prog.cs->uniform_dirty_bits | vc4->prog.vs->uniform_dirty_bits | vc4->prog.fs->uniform_dirty_bits)) || vc4->last_index_bias != info->index_bias) { vc4_emit_gl_shader_state(vc4, info); } vc4->dirty = 0; /* Note that the primitive type fields match with OpenGL/gallium * definitions, up to but not including QUADS. */ struct vc4_cl_out *bcl = cl_start(&vc4->bcl); if (info->indexed) { uint32_t offset = vc4->indexbuf.offset; uint32_t index_size = vc4->indexbuf.index_size; struct pipe_resource *prsc; if (vc4->indexbuf.index_size == 4) { prsc = vc4_get_shadow_index_buffer(pctx, &vc4->indexbuf, info->count, &offset); index_size = 2; } else { if (vc4->indexbuf.user_buffer) { prsc = NULL; u_upload_data(vc4->uploader, 0, info->count * index_size, 4, vc4->indexbuf.user_buffer, &offset, &prsc); } else { prsc = vc4->indexbuf.buffer; } } struct vc4_resource *rsc = vc4_resource(prsc); cl_start_reloc(&vc4->bcl, &bcl, 1); cl_u8(&bcl, VC4_PACKET_GL_INDEXED_PRIMITIVE); cl_u8(&bcl, info->mode | (index_size == 2 ? VC4_INDEX_BUFFER_U16: VC4_INDEX_BUFFER_U8)); cl_u32(&bcl, info->count); cl_reloc(vc4, &vc4->bcl, &bcl, rsc->bo, offset); cl_u32(&bcl, vc4->max_index); if (vc4->indexbuf.index_size == 4 || vc4->indexbuf.user_buffer) pipe_resource_reference(&prsc, NULL); } else { cl_u8(&bcl, VC4_PACKET_GL_ARRAY_PRIMITIVE); cl_u8(&bcl, info->mode); cl_u32(&bcl, info->count); cl_u32(&bcl, info->start); } cl_end(&vc4->bcl, bcl); if (vc4->zsa && vc4->zsa->base.depth.enabled) { vc4->resolve |= PIPE_CLEAR_DEPTH; } if (vc4->zsa && vc4->zsa->base.stencil[0].enabled) vc4->resolve |= PIPE_CLEAR_STENCIL; vc4->resolve |= PIPE_CLEAR_COLOR0; vc4->shader_rec_count++; if (vc4_debug & VC4_DEBUG_ALWAYS_FLUSH) vc4_flush(pctx); }
static void vc4_set_framebuffer_state(struct pipe_context *pctx, const struct pipe_framebuffer_state *framebuffer) { struct vc4_context *vc4 = vc4_context(pctx); struct pipe_framebuffer_state *cso = &vc4->framebuffer; unsigned i; vc4_flush(pctx); for (i = 0; i < framebuffer->nr_cbufs; i++) pipe_surface_reference(&cso->cbufs[i], framebuffer->cbufs[i]); for (; i < vc4->framebuffer.nr_cbufs; i++) pipe_surface_reference(&cso->cbufs[i], NULL); cso->nr_cbufs = framebuffer->nr_cbufs; pipe_surface_reference(&cso->zsbuf, framebuffer->zsbuf); cso->width = framebuffer->width; cso->height = framebuffer->height; /* If we're binding to uninitialized buffers, no need to load their * contents before drawing.. */ if (cso->cbufs[0]) { struct vc4_resource *rsc = vc4_resource(cso->cbufs[0]->texture); if (!rsc->writes) vc4->cleared |= PIPE_CLEAR_COLOR0; } if (cso->zsbuf) { struct vc4_resource *rsc = vc4_resource(cso->zsbuf->texture); if (!rsc->writes) vc4->cleared |= PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL; } /* Nonzero texture mipmap levels are laid out as if they were in * power-of-two-sized spaces. The renderbuffer config infers its * stride from the width parameter, so we need to configure our * framebuffer. Note that if the z/color buffers were mismatched * sizes, we wouldn't be able to do this. */ if (cso->cbufs[0] && cso->cbufs[0]->u.tex.level) { struct vc4_resource *rsc = vc4_resource(cso->cbufs[0]->texture); cso->width = (rsc->slices[cso->cbufs[0]->u.tex.level].stride / rsc->cpp); } else if (cso->zsbuf && cso->zsbuf->u.tex.level){ struct vc4_resource *rsc = vc4_resource(cso->zsbuf->texture); cso->width = (rsc->slices[cso->zsbuf->u.tex.level].stride / rsc->cpp); } vc4->msaa = false; if (cso->cbufs[0]) vc4->msaa = cso->cbufs[0]->texture->nr_samples > 1; else if (cso->zsbuf) vc4->msaa = cso->zsbuf->texture->nr_samples > 1; if (vc4->msaa) { vc4->tile_width = 32; vc4->tile_height = 32; } else { vc4->tile_width = 64; vc4->tile_height = 64; } vc4->draw_tiles_x = DIV_ROUND_UP(cso->width, vc4->tile_width); vc4->draw_tiles_y = DIV_ROUND_UP(cso->height, vc4->tile_height); vc4->dirty |= VC4_DIRTY_FRAMEBUFFER; }
static void vc4_pipe_flush(struct pipe_context *pctx, struct pipe_fence_handle **fence, unsigned flags) { vc4_flush(pctx); }
static void * vc4_resource_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc, unsigned level, unsigned usage, const struct pipe_box *box, struct pipe_transfer **pptrans) { struct vc4_context *vc4 = vc4_context(pctx); struct vc4_resource *rsc = vc4_resource(prsc); struct vc4_resource_slice *slice = &rsc->slices[level]; struct vc4_transfer *trans; struct pipe_transfer *ptrans; enum pipe_format format = prsc->format; char *buf; if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) { vc4_resource_bo_alloc(rsc); } else if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) { if (vc4_cl_references_bo(pctx, rsc->bo)) { if ((usage & PIPE_TRANSFER_DISCARD_RANGE) && prsc->last_level == 0 && prsc->width0 == box->width && prsc->height0 == box->height && prsc->depth0 == box->depth) { vc4_resource_bo_alloc(rsc); } else { vc4_flush(pctx); } } } if (usage & PIPE_TRANSFER_WRITE) rsc->writes++; trans = util_slab_alloc(&vc4->transfer_pool); if (!trans) return NULL; /* XXX: Handle DONTBLOCK, DISCARD_RANGE, PERSISTENT, COHERENT. */ /* util_slab_alloc() doesn't zero: */ memset(trans, 0, sizeof(*trans)); ptrans = &trans->base; pipe_resource_reference(&ptrans->resource, prsc); ptrans->level = level; ptrans->usage = usage; ptrans->box = *box; /* Note that the current kernel implementation is synchronous, so no * need to do syncing stuff here yet. */ buf = vc4_bo_map(rsc->bo); if (!buf) { fprintf(stderr, "Failed to map bo\n"); goto fail; } *pptrans = ptrans; if (rsc->tiled) { uint32_t utile_w = vc4_utile_width(rsc->cpp); uint32_t utile_h = vc4_utile_height(rsc->cpp); /* No direct mappings of tiled, since we need to manually * tile/untile. */ if (usage & PIPE_TRANSFER_MAP_DIRECTLY) return NULL; /* We need to align the box to utile boundaries, since that's * what load/store operate on. */ uint32_t box_start_x = ptrans->box.x & (utile_w - 1); uint32_t box_start_y = ptrans->box.y & (utile_h - 1); ptrans->box.width += box_start_x; ptrans->box.x -= box_start_x; ptrans->box.height += box_start_y; ptrans->box.y -= box_start_y; ptrans->box.width = align(ptrans->box.width, utile_w); ptrans->box.height = align(ptrans->box.height, utile_h); ptrans->stride = ptrans->box.width * rsc->cpp; ptrans->layer_stride = ptrans->stride; trans->map = malloc(ptrans->stride * ptrans->box.height); if (usage & PIPE_TRANSFER_READ) { vc4_load_tiled_image(trans->map, ptrans->stride, buf + slice->offset + box->z * rsc->cube_map_stride, slice->stride, slice->tiling, rsc->cpp, &ptrans->box); } return (trans->map + box_start_x * rsc->cpp + box_start_y * ptrans->stride); } else { ptrans->stride = slice->stride; ptrans->layer_stride = ptrans->stride; return buf + slice->offset + box->y / util_format_get_blockheight(format) * ptrans->stride + box->x / util_format_get_blockwidth(format) * rsc->cpp + box->z * rsc->cube_map_stride; } fail: vc4_resource_transfer_unmap(pctx, ptrans); return NULL; }
static void vc4_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info) { struct vc4_context *vc4 = vc4_context(pctx); if (info->mode >= PIPE_PRIM_QUADS) { util_primconvert_save_index_buffer(vc4->primconvert, &vc4->indexbuf); util_primconvert_save_rasterizer_state(vc4->primconvert, &vc4->rasterizer->base); util_primconvert_draw_vbo(vc4->primconvert, info); return; } struct vc4_vertex_stateobj *vtx = vc4->vtx; struct vc4_vertexbuf_stateobj *vertexbuf = &vc4->vertexbuf; if (vc4->prim_mode != info->mode) { vc4->prim_mode = info->mode; vc4->dirty |= VC4_DIRTY_PRIM_MODE; } vc4_start_draw(vc4); vc4_update_compiled_shaders(vc4, info->mode); vc4_emit_state(pctx); vc4->dirty = 0; vc4_write_uniforms(vc4, vc4->prog.fs, &vc4->constbuf[PIPE_SHADER_FRAGMENT], &vc4->fragtex); vc4_write_uniforms(vc4, vc4->prog.vs, &vc4->constbuf[PIPE_SHADER_VERTEX], &vc4->verttex); vc4_write_uniforms(vc4, vc4->prog.cs, &vc4->constbuf[PIPE_SHADER_VERTEX], &vc4->verttex); /* The simulator throws a fit if VS or CS don't read an attribute, so * we emit a dummy read. */ uint32_t num_elements_emit = MAX2(vtx->num_elements, 1); /* Emit the shader record. */ cl_start_shader_reloc(&vc4->shader_rec, 3 + num_elements_emit); cl_u16(&vc4->shader_rec, VC4_SHADER_FLAG_ENABLE_CLIPPING | ((info->mode == PIPE_PRIM_POINTS && vc4->rasterizer->base.point_size_per_vertex) ? VC4_SHADER_FLAG_VS_POINT_SIZE : 0)); cl_u8(&vc4->shader_rec, 0); /* fs num uniforms (unused) */ cl_u8(&vc4->shader_rec, vc4->prog.fs->num_inputs); cl_reloc(vc4, &vc4->shader_rec, vc4->prog.fs->bo, 0); cl_u32(&vc4->shader_rec, 0); /* UBO offset written by kernel */ cl_u16(&vc4->shader_rec, 0); /* vs num uniforms */ cl_u8(&vc4->shader_rec, (1 << num_elements_emit) - 1); /* vs attribute array bitfield */ cl_u8(&vc4->shader_rec, 16 * num_elements_emit); /* vs total attribute size */ cl_reloc(vc4, &vc4->shader_rec, vc4->prog.vs->bo, 0); cl_u32(&vc4->shader_rec, 0); /* UBO offset written by kernel */ cl_u16(&vc4->shader_rec, 0); /* cs num uniforms */ cl_u8(&vc4->shader_rec, (1 << num_elements_emit) - 1); /* cs attribute array bitfield */ cl_u8(&vc4->shader_rec, 16 * num_elements_emit); /* cs total attribute size */ cl_reloc(vc4, &vc4->shader_rec, vc4->prog.cs->bo, 0); cl_u32(&vc4->shader_rec, 0); /* UBO offset written by kernel */ uint32_t max_index = 0xffff; for (int i = 0; i < vtx->num_elements; i++) { struct pipe_vertex_element *elem = &vtx->pipe[i]; struct pipe_vertex_buffer *vb = &vertexbuf->vb[elem->vertex_buffer_index]; struct vc4_resource *rsc = vc4_resource(vb->buffer); uint32_t offset = vb->buffer_offset + elem->src_offset; uint32_t vb_size = rsc->bo->size - offset; uint32_t elem_size = util_format_get_blocksize(elem->src_format); cl_reloc(vc4, &vc4->shader_rec, rsc->bo, offset); cl_u8(&vc4->shader_rec, elem_size - 1); cl_u8(&vc4->shader_rec, vb->stride); cl_u8(&vc4->shader_rec, i * 16); /* VS VPM offset */ cl_u8(&vc4->shader_rec, i * 16); /* CS VPM offset */ if (vb->stride > 0) { max_index = MIN2(max_index, (vb_size - elem_size) / vb->stride); } } if (vtx->num_elements == 0) { assert(num_elements_emit == 1); struct vc4_bo *bo = vc4_bo_alloc(vc4->screen, 4096, "scratch VBO"); cl_reloc(vc4, &vc4->shader_rec, bo, 0); cl_u8(&vc4->shader_rec, 16 - 1); /* element size */ cl_u8(&vc4->shader_rec, 0); /* stride */ cl_u8(&vc4->shader_rec, 0); /* VS VPM offset */ cl_u8(&vc4->shader_rec, 0); /* CS VPM offset */ vc4_bo_unreference(&bo); } /* the actual draw call. */ cl_u8(&vc4->bcl, VC4_PACKET_GL_SHADER_STATE); assert(vtx->num_elements <= 8); /* Note that number of attributes == 0 in the packet means 8 * attributes. This field also contains the offset into shader_rec. */ cl_u32(&vc4->bcl, num_elements_emit & 0x7); /* Note that the primitive type fields match with OpenGL/gallium * definitions, up to but not including QUADS. */ if (info->indexed) { struct vc4_resource *rsc = vc4_resource(vc4->indexbuf.buffer); uint32_t offset = vc4->indexbuf.offset; uint32_t index_size = vc4->indexbuf.index_size; if (rsc->shadow_parent) { vc4_update_shadow_index_buffer(pctx, &vc4->indexbuf); offset = 0; index_size = 2; } cl_start_reloc(&vc4->bcl, 1); cl_u8(&vc4->bcl, VC4_PACKET_GL_INDEXED_PRIMITIVE); cl_u8(&vc4->bcl, info->mode | (index_size == 2 ? VC4_INDEX_BUFFER_U16: VC4_INDEX_BUFFER_U8)); cl_u32(&vc4->bcl, info->count); cl_reloc(vc4, &vc4->bcl, rsc->bo, offset); cl_u32(&vc4->bcl, max_index); } else { cl_u8(&vc4->bcl, VC4_PACKET_GL_ARRAY_PRIMITIVE); cl_u8(&vc4->bcl, info->mode); cl_u32(&vc4->bcl, info->count); cl_u32(&vc4->bcl, info->start); } if (vc4->zsa && vc4->zsa->base.depth.enabled) { vc4->resolve |= PIPE_CLEAR_DEPTH; } if (vc4->zsa && vc4->zsa->base.stencil[0].enabled) vc4->resolve |= PIPE_CLEAR_STENCIL; vc4->resolve |= PIPE_CLEAR_COLOR0; vc4->shader_rec_count++; if (vc4_debug & VC4_DEBUG_ALWAYS_FLUSH) vc4_flush(pctx); }
static void vc4_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info) { struct vc4_context *vc4 = vc4_context(pctx); if (info->mode >= PIPE_PRIM_QUADS) { util_primconvert_save_index_buffer(vc4->primconvert, &vc4->indexbuf); util_primconvert_save_rasterizer_state(vc4->primconvert, &vc4->rasterizer->base); util_primconvert_draw_vbo(vc4->primconvert, info); perf_debug("Fallback conversion for %d %s vertices\n", info->count, u_prim_name(info->mode)); return; } /* Before setting up the draw, do any fixup blits necessary. */ vc4_predraw_check_textures(pctx, &vc4->verttex); vc4_predraw_check_textures(pctx, &vc4->fragtex); vc4_hw_2116_workaround(pctx, info->count); struct vc4_job *job = vc4_get_job_for_fbo(vc4); vc4_get_draw_cl_space(job, info->count); if (vc4->prim_mode != info->mode) { vc4->prim_mode = info->mode; vc4->dirty |= VC4_DIRTY_PRIM_MODE; } vc4_start_draw(vc4); if (!vc4_update_compiled_shaders(vc4, info->mode)) { debug_warn_once("shader compile failed, skipping draw call.\n"); return; } vc4_emit_state(pctx); if ((vc4->dirty & (VC4_DIRTY_VTXBUF | VC4_DIRTY_VTXSTATE | VC4_DIRTY_PRIM_MODE | VC4_DIRTY_RASTERIZER | VC4_DIRTY_COMPILED_CS | VC4_DIRTY_COMPILED_VS | VC4_DIRTY_COMPILED_FS | vc4->prog.cs->uniform_dirty_bits | vc4->prog.vs->uniform_dirty_bits | vc4->prog.fs->uniform_dirty_bits)) || vc4->last_index_bias != info->index_bias) { vc4_emit_gl_shader_state(vc4, info, 0); } vc4->dirty = 0; /* Note that the primitive type fields match with OpenGL/gallium * definitions, up to but not including QUADS. */ struct vc4_cl_out *bcl = cl_start(&job->bcl); if (info->indexed) { uint32_t offset = vc4->indexbuf.offset; uint32_t index_size = vc4->indexbuf.index_size; struct pipe_resource *prsc; if (vc4->indexbuf.index_size == 4) { prsc = vc4_get_shadow_index_buffer(pctx, &vc4->indexbuf, info->count, &offset); index_size = 2; } else { if (vc4->indexbuf.user_buffer) { prsc = NULL; u_upload_data(vc4->uploader, 0, info->count * index_size, 4, vc4->indexbuf.user_buffer, &offset, &prsc); } else { prsc = vc4->indexbuf.buffer; } } struct vc4_resource *rsc = vc4_resource(prsc); cl_start_reloc(&job->bcl, &bcl, 1); cl_u8(&bcl, VC4_PACKET_GL_INDEXED_PRIMITIVE); cl_u8(&bcl, info->mode | (index_size == 2 ? VC4_INDEX_BUFFER_U16: VC4_INDEX_BUFFER_U8)); cl_u32(&bcl, info->count); cl_reloc(job, &job->bcl, &bcl, rsc->bo, offset); cl_u32(&bcl, vc4->max_index); job->draw_calls_queued++; if (vc4->indexbuf.index_size == 4 || vc4->indexbuf.user_buffer) pipe_resource_reference(&prsc, NULL); } else { uint32_t count = info->count; uint32_t start = info->start; uint32_t extra_index_bias = 0; while (count) { uint32_t this_count = count; uint32_t step = count; static const uint32_t max_verts = 65535; /* GFXH-515 / SW-5891: The binner emits 16 bit indices * for drawarrays, which means that if start + count > * 64k it would truncate the top bits. Work around * this by emitting a limited number of primitives at * a time and reemitting the shader state pointing * farther down the vertex attribute arrays. * * To do this properly for line loops or trifans, we'd * need to make a new VB containing the first vertex * plus whatever remainder. */ if (extra_index_bias) { cl_end(&job->bcl, bcl); vc4_emit_gl_shader_state(vc4, info, extra_index_bias); bcl = cl_start(&job->bcl); } if (start + count > max_verts) { switch (info->mode) { case PIPE_PRIM_POINTS: this_count = step = max_verts; break; case PIPE_PRIM_LINES: this_count = step = max_verts - (max_verts % 2); break; case PIPE_PRIM_LINE_STRIP: this_count = max_verts; step = max_verts - 1; break; case PIPE_PRIM_LINE_LOOP: this_count = max_verts; step = max_verts - 1; debug_warn_once("unhandled line loop " "looping behavior with " ">65535 verts\n"); break; case PIPE_PRIM_TRIANGLES: this_count = step = max_verts - (max_verts % 3); break; case PIPE_PRIM_TRIANGLE_STRIP: this_count = max_verts; step = max_verts - 2; break; default: debug_warn_once("unhandled primitive " "max vert count, truncating\n"); this_count = step = max_verts; } } cl_u8(&bcl, VC4_PACKET_GL_ARRAY_PRIMITIVE); cl_u8(&bcl, info->mode); cl_u32(&bcl, this_count); cl_u32(&bcl, start); job->draw_calls_queued++; count -= step; extra_index_bias += start + step; start = 0; } } cl_end(&job->bcl, bcl); /* We shouldn't have tripped the HW_2116 bug with the GFXH-515 * workaround. */ assert(job->draw_calls_queued <= VC4_HW_2116_COUNT); if (vc4->zsa && vc4->framebuffer.zsbuf) { struct vc4_resource *rsc = vc4_resource(vc4->framebuffer.zsbuf->texture); if (vc4->zsa->base.depth.enabled) { job->resolve |= PIPE_CLEAR_DEPTH; rsc->initialized_buffers = PIPE_CLEAR_DEPTH; } if (vc4->zsa->base.stencil[0].enabled) { job->resolve |= PIPE_CLEAR_STENCIL; rsc->initialized_buffers |= PIPE_CLEAR_STENCIL; } } job->resolve |= PIPE_CLEAR_COLOR0; if (vc4_debug & VC4_DEBUG_ALWAYS_FLUSH) vc4_flush(pctx); }