/** * Does the initial bining command list setup for drawing to a given FBO. */ static void vc4_start_draw(struct vc4_context *vc4) { struct vc4_job *job = vc4->job; if (job->needs_flush) return; vc4_get_draw_cl_space(job, 0); struct vc4_cl_out *bcl = cl_start(&job->bcl); // Tile state data is 48 bytes per tile, I think it can be thrown away // as soon as binning is finished. cl_u8(&bcl, VC4_PACKET_TILE_BINNING_MODE_CONFIG); cl_u32(&bcl, 0); /* tile alloc addr, filled by kernel */ cl_u32(&bcl, 0); /* tile alloc size, filled by kernel */ cl_u32(&bcl, 0); /* tile state addr, filled by kernel */ cl_u8(&bcl, job->draw_tiles_x); cl_u8(&bcl, job->draw_tiles_y); /* Other flags are filled by kernel. */ cl_u8(&bcl, job->msaa ? VC4_BIN_CONFIG_MS_MODE_4X : 0); /* START_TILE_BINNING resets the statechange counters in the hardware, * which are what is used when a primitive is binned to a tile to * figure out what new state packets need to be written to that tile's * command list. */ cl_u8(&bcl, VC4_PACKET_START_TILE_BINNING); /* Reset the current compressed primitives format. This gets modified * by VC4_PACKET_GL_INDEXED_PRIMITIVE and * VC4_PACKET_GL_ARRAY_PRIMITIVE, so it needs to be reset at the start * of every tile. */ cl_u8(&bcl, VC4_PACKET_PRIMITIVE_LIST_FORMAT); cl_u8(&bcl, (VC4_PRIMITIVE_LIST_FORMAT_16_INDEX | VC4_PRIMITIVE_LIST_FORMAT_TYPE_TRIANGLES)); job->needs_flush = true; job->draw_width = vc4->framebuffer.width; job->draw_height = vc4->framebuffer.height; cl_end(&job->bcl, bcl); }
/** * Emits a no-op STORE_TILE_BUFFER_GENERAL. * * If we emit a PACKET_TILE_COORDINATES, it must be followed by a store of * some sort before another load is triggered. */ static void vc4_store_before_load(struct vc4_context *vc4, bool *coords_emitted) { if (!*coords_emitted) return; cl_u8(&vc4->rcl, VC4_PACKET_STORE_TILE_BUFFER_GENERAL); cl_u8(&vc4->rcl, VC4_LOADSTORE_TILE_BUFFER_NONE); cl_u8(&vc4->rcl, (VC4_STORE_TILE_BUFFER_DISABLE_COLOR_CLEAR | VC4_STORE_TILE_BUFFER_DISABLE_ZS_CLEAR | VC4_STORE_TILE_BUFFER_DISABLE_VG_MASK_CLEAR)); cl_u32(&vc4->rcl, 0); /* no address, since we're in None mode */ *coords_emitted = false; }
uint32_t vc4_gem_hindex(struct vc4_context *vc4, struct vc4_bo *bo) { uint32_t hindex; uint32_t *current_handles = vc4->bo_handles.base; for (hindex = 0; hindex < (vc4->bo_handles.next - vc4->bo_handles.base) / 4; hindex++) { if (current_handles[hindex] == bo->handle) return hindex; } cl_u32(&vc4->bo_handles, bo->handle); cl_ptr(&vc4->bo_pointers, vc4_bo_reference(bo)); return hindex; }
uint32_t vc4_gem_hindex(struct vc4_context *vc4, struct vc4_bo *bo) { uint32_t hindex; uint32_t *current_handles = vc4->bo_handles.base; for (hindex = 0; hindex < cl_offset(&vc4->bo_handles) / 4; hindex++) { if (current_handles[hindex] == bo->handle) return hindex; } struct vc4_cl_out *out; out = cl_start(&vc4->bo_handles); cl_u32(&out, bo->handle); cl_end(&vc4->bo_handles, out); out = cl_start(&vc4->bo_pointers); cl_ptr(&out, vc4_bo_reference(bo)); cl_end(&vc4->bo_pointers, out); return hindex; }
static void vc4_setup_rcl(struct vc4_context *vc4) { struct vc4_surface *csurf = vc4_surface(vc4->framebuffer.cbufs[0]); struct vc4_resource *ctex = csurf ? vc4_resource(csurf->base.texture) : NULL; struct vc4_surface *zsurf = vc4_surface(vc4->framebuffer.zsbuf); struct vc4_resource *ztex = zsurf ? vc4_resource(zsurf->base.texture) : NULL; if (!csurf) vc4->resolve &= ~PIPE_CLEAR_COLOR0; if (!zsurf) vc4->resolve &= ~(PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL); uint32_t resolve_uncleared = vc4->resolve & ~vc4->cleared; uint32_t width = vc4->framebuffer.width; uint32_t height = vc4->framebuffer.height; uint32_t stride_in_tiles = align(width, 64) / 64; assert(vc4->draw_min_x != ~0 && vc4->draw_min_y != ~0); uint32_t min_x_tile = vc4->draw_min_x / 64; uint32_t min_y_tile = vc4->draw_min_y / 64; uint32_t max_x_tile = (vc4->draw_max_x - 1) / 64; uint32_t max_y_tile = (vc4->draw_max_y - 1) / 64; uint32_t xtiles = max_x_tile - min_x_tile + 1; uint32_t ytiles = max_y_tile - min_y_tile + 1; #if 0 fprintf(stderr, "RCL: resolve 0x%x clear 0x%x resolve uncleared 0x%x\n", vc4->resolve, vc4->cleared, resolve_uncleared); #endif uint32_t reloc_size = 9; uint32_t clear_size = 14; uint32_t config_size = 11 + reloc_size; uint32_t loadstore_size = 7 + reloc_size; uint32_t tilecoords_size = 3; uint32_t branch_size = 5 + reloc_size; uint32_t color_store_size = 1; uint32_t semaphore_size = 1; cl_ensure_space(&vc4->rcl, clear_size + config_size + loadstore_size + semaphore_size + xtiles * ytiles * (loadstore_size * 4 + tilecoords_size * 3 + branch_size + color_store_size)); if (vc4->cleared) { cl_u8(&vc4->rcl, VC4_PACKET_CLEAR_COLORS); cl_u32(&vc4->rcl, vc4->clear_color[0]); cl_u32(&vc4->rcl, vc4->clear_color[1]); cl_u32(&vc4->rcl, vc4->clear_depth); cl_u8(&vc4->rcl, vc4->clear_stencil); } /* The rendering mode config determines the pointer that's used for * VC4_PACKET_STORE_MS_TILE_BUFFER address computations. The kernel * could handle a no-relocation rendering mode config and deny those * packets, but instead we just tell the kernel we're doing our color * rendering to the Z buffer, and just don't emit any of those * packets. */ struct vc4_surface *render_surf = csurf ? csurf : zsurf; struct vc4_resource *render_tex = vc4_resource(render_surf->base.texture); cl_start_reloc(&vc4->rcl, 1); cl_u8(&vc4->rcl, VC4_PACKET_TILE_RENDERING_MODE_CONFIG); cl_reloc(vc4, &vc4->rcl, render_tex->bo, render_surf->offset); cl_u16(&vc4->rcl, width); cl_u16(&vc4->rcl, height); cl_u16(&vc4->rcl, ((render_surf->tiling << VC4_RENDER_CONFIG_MEMORY_FORMAT_SHIFT) | (vc4_rt_format_is_565(render_surf->base.format) ? VC4_RENDER_CONFIG_FORMAT_BGR565 : VC4_RENDER_CONFIG_FORMAT_RGBA8888))); /* The tile buffer normally gets cleared when the previous tile is * stored. If the clear values changed between frames, then the tile * buffer has stale clear values in it, so we have to do a store in * None mode (no writes) so that we trigger the tile buffer clear. * * Excess clearing is only a performance cost, since per-tile contents * will be loaded/stored in the loop below. */ if (vc4->cleared & (PIPE_CLEAR_COLOR0 | PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL)) { cl_u8(&vc4->rcl, VC4_PACKET_TILE_COORDINATES); cl_u8(&vc4->rcl, 0); cl_u8(&vc4->rcl, 0); cl_u8(&vc4->rcl, VC4_PACKET_STORE_TILE_BUFFER_GENERAL); cl_u16(&vc4->rcl, VC4_LOADSTORE_TILE_BUFFER_NONE); cl_u32(&vc4->rcl, 0); /* no address, since we're in None mode */ } uint32_t color_hindex = ctex ? vc4_gem_hindex(vc4, ctex->bo) : 0; uint32_t depth_hindex = ztex ? vc4_gem_hindex(vc4, ztex->bo) : 0; uint32_t tile_alloc_hindex = vc4_gem_hindex(vc4, vc4->tile_alloc); for (int y = min_y_tile; y <= max_y_tile; y++) { for (int x = min_x_tile; x <= max_x_tile; x++) { bool end_of_frame = (x == max_x_tile && y == max_y_tile); bool coords_emitted = false; /* Note that the load doesn't actually occur until the * tile coords packet is processed, and only one load * may be outstanding at a time. */ if (resolve_uncleared & PIPE_CLEAR_COLOR) { vc4_store_before_load(vc4, &coords_emitted); cl_start_reloc(&vc4->rcl, 1); cl_u8(&vc4->rcl, VC4_PACKET_LOAD_TILE_BUFFER_GENERAL); cl_u8(&vc4->rcl, VC4_LOADSTORE_TILE_BUFFER_COLOR | (csurf->tiling << VC4_LOADSTORE_TILE_BUFFER_FORMAT_SHIFT)); cl_u8(&vc4->rcl, vc4_rt_format_is_565(csurf->base.format) ? VC4_LOADSTORE_TILE_BUFFER_BGR565 : VC4_LOADSTORE_TILE_BUFFER_RGBA8888); cl_reloc_hindex(&vc4->rcl, color_hindex, csurf->offset); vc4_tile_coordinates(vc4, x, y, &coords_emitted); } if (resolve_uncleared & (PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL)) { vc4_store_before_load(vc4, &coords_emitted); cl_start_reloc(&vc4->rcl, 1); cl_u8(&vc4->rcl, VC4_PACKET_LOAD_TILE_BUFFER_GENERAL); cl_u8(&vc4->rcl, VC4_LOADSTORE_TILE_BUFFER_ZS | (zsurf->tiling << VC4_LOADSTORE_TILE_BUFFER_FORMAT_SHIFT)); cl_u8(&vc4->rcl, 0); cl_reloc_hindex(&vc4->rcl, depth_hindex, zsurf->offset); vc4_tile_coordinates(vc4, x, y, &coords_emitted); } /* Clipping depends on tile coordinates having been * emitted, so make sure it's happened even if * everything was cleared to start. */ vc4_tile_coordinates(vc4, x, y, &coords_emitted); /* Wait for the binner before jumping to the first * tile's lists. */ if (x == min_x_tile && y == min_y_tile) cl_u8(&vc4->rcl, VC4_PACKET_WAIT_ON_SEMAPHORE); cl_start_reloc(&vc4->rcl, 1); cl_u8(&vc4->rcl, VC4_PACKET_BRANCH_TO_SUB_LIST); cl_reloc_hindex(&vc4->rcl, tile_alloc_hindex, (y * stride_in_tiles + x) * 32); if (vc4->resolve & (PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL)) { vc4_tile_coordinates(vc4, x, y, &coords_emitted); cl_start_reloc(&vc4->rcl, 1); cl_u8(&vc4->rcl, VC4_PACKET_STORE_TILE_BUFFER_GENERAL); cl_u8(&vc4->rcl, VC4_LOADSTORE_TILE_BUFFER_ZS | (zsurf->tiling << VC4_LOADSTORE_TILE_BUFFER_FORMAT_SHIFT)); cl_u8(&vc4->rcl, VC4_STORE_TILE_BUFFER_DISABLE_COLOR_CLEAR); cl_reloc_hindex(&vc4->rcl, depth_hindex, zsurf->offset | ((end_of_frame && !(vc4->resolve & PIPE_CLEAR_COLOR0)) ? VC4_LOADSTORE_TILE_BUFFER_EOF : 0)); coords_emitted = false; } if (vc4->resolve & PIPE_CLEAR_COLOR0) { vc4_tile_coordinates(vc4, x, y, &coords_emitted); if (end_of_frame) { cl_u8(&vc4->rcl, VC4_PACKET_STORE_MS_TILE_BUFFER_AND_EOF); } else { cl_u8(&vc4->rcl, VC4_PACKET_STORE_MS_TILE_BUFFER); } coords_emitted = false; } /* One of the bits needs to have been set that would * have triggered an EOF. */ assert(vc4->resolve & (PIPE_CLEAR_COLOR0 | PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL)); /* Any coords emitted must also have been consumed by * a store. */ assert(!coords_emitted); } } if (vc4->resolve & PIPE_CLEAR_COLOR0) ctex->writes++; if (vc4->resolve & (PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL)) ztex->writes++; }
static void vc4_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info) { struct vc4_context *vc4 = vc4_context(pctx); if (info->mode >= PIPE_PRIM_QUADS) { util_primconvert_save_index_buffer(vc4->primconvert, &vc4->indexbuf); util_primconvert_save_rasterizer_state(vc4->primconvert, &vc4->rasterizer->base); util_primconvert_draw_vbo(vc4->primconvert, info); perf_debug("Fallback conversion for %d %s vertices\n", info->count, u_prim_name(info->mode)); return; } /* Before setting up the draw, do any fixup blits necessary. */ vc4_update_shadow_textures(pctx, &vc4->verttex); vc4_update_shadow_textures(pctx, &vc4->fragtex); vc4_hw_2116_workaround(pctx); vc4_get_draw_cl_space(vc4); if (vc4->prim_mode != info->mode) { vc4->prim_mode = info->mode; vc4->dirty |= VC4_DIRTY_PRIM_MODE; } vc4_start_draw(vc4); vc4_update_compiled_shaders(vc4, info->mode); vc4_emit_state(pctx); if ((vc4->dirty & (VC4_DIRTY_VTXBUF | VC4_DIRTY_VTXSTATE | VC4_DIRTY_PRIM_MODE | VC4_DIRTY_RASTERIZER | VC4_DIRTY_COMPILED_CS | VC4_DIRTY_COMPILED_VS | VC4_DIRTY_COMPILED_FS | vc4->prog.cs->uniform_dirty_bits | vc4->prog.vs->uniform_dirty_bits | vc4->prog.fs->uniform_dirty_bits)) || vc4->last_index_bias != info->index_bias) { vc4_emit_gl_shader_state(vc4, info); } vc4->dirty = 0; /* Note that the primitive type fields match with OpenGL/gallium * definitions, up to but not including QUADS. */ struct vc4_cl_out *bcl = cl_start(&vc4->bcl); if (info->indexed) { uint32_t offset = vc4->indexbuf.offset; uint32_t index_size = vc4->indexbuf.index_size; struct pipe_resource *prsc; if (vc4->indexbuf.index_size == 4) { prsc = vc4_get_shadow_index_buffer(pctx, &vc4->indexbuf, info->count, &offset); index_size = 2; } else { if (vc4->indexbuf.user_buffer) { prsc = NULL; u_upload_data(vc4->uploader, 0, info->count * index_size, 4, vc4->indexbuf.user_buffer, &offset, &prsc); } else { prsc = vc4->indexbuf.buffer; } } struct vc4_resource *rsc = vc4_resource(prsc); cl_start_reloc(&vc4->bcl, &bcl, 1); cl_u8(&bcl, VC4_PACKET_GL_INDEXED_PRIMITIVE); cl_u8(&bcl, info->mode | (index_size == 2 ? VC4_INDEX_BUFFER_U16: VC4_INDEX_BUFFER_U8)); cl_u32(&bcl, info->count); cl_reloc(vc4, &vc4->bcl, &bcl, rsc->bo, offset); cl_u32(&bcl, vc4->max_index); if (vc4->indexbuf.index_size == 4 || vc4->indexbuf.user_buffer) pipe_resource_reference(&prsc, NULL); } else { cl_u8(&bcl, VC4_PACKET_GL_ARRAY_PRIMITIVE); cl_u8(&bcl, info->mode); cl_u32(&bcl, info->count); cl_u32(&bcl, info->start); } cl_end(&vc4->bcl, bcl); if (vc4->zsa && vc4->zsa->base.depth.enabled) { vc4->resolve |= PIPE_CLEAR_DEPTH; } if (vc4->zsa && vc4->zsa->base.stencil[0].enabled) vc4->resolve |= PIPE_CLEAR_STENCIL; vc4->resolve |= PIPE_CLEAR_COLOR0; vc4->shader_rec_count++; if (vc4_debug & VC4_DEBUG_ALWAYS_FLUSH) vc4_flush(pctx); }
static void vc4_emit_gl_shader_state(struct vc4_context *vc4, const struct pipe_draw_info *info) { /* VC4_DIRTY_VTXSTATE */ struct vc4_vertex_stateobj *vtx = vc4->vtx; /* VC4_DIRTY_VTXBUF */ struct vc4_vertexbuf_stateobj *vertexbuf = &vc4->vertexbuf; /* The simulator throws a fit if VS or CS don't read an attribute, so * we emit a dummy read. */ uint32_t num_elements_emit = MAX2(vtx->num_elements, 1); /* Emit the shader record. */ struct vc4_cl_out *shader_rec = cl_start_shader_reloc(&vc4->shader_rec, 3 + num_elements_emit); /* VC4_DIRTY_PRIM_MODE | VC4_DIRTY_RASTERIZER */ cl_u16(&shader_rec, VC4_SHADER_FLAG_ENABLE_CLIPPING | VC4_SHADER_FLAG_FS_SINGLE_THREAD | ((info->mode == PIPE_PRIM_POINTS && vc4->rasterizer->base.point_size_per_vertex) ? VC4_SHADER_FLAG_VS_POINT_SIZE : 0)); /* VC4_DIRTY_COMPILED_FS */ cl_u8(&shader_rec, 0); /* fs num uniforms (unused) */ cl_u8(&shader_rec, vc4->prog.fs->num_inputs); cl_reloc(vc4, &vc4->shader_rec, &shader_rec, vc4->prog.fs->bo, 0); cl_u32(&shader_rec, 0); /* UBO offset written by kernel */ /* VC4_DIRTY_COMPILED_VS */ cl_u16(&shader_rec, 0); /* vs num uniforms */ cl_u8(&shader_rec, vc4->prog.vs->vattrs_live); cl_u8(&shader_rec, vc4->prog.vs->vattr_offsets[8]); cl_reloc(vc4, &vc4->shader_rec, &shader_rec, vc4->prog.vs->bo, 0); cl_u32(&shader_rec, 0); /* UBO offset written by kernel */ /* VC4_DIRTY_COMPILED_CS */ cl_u16(&shader_rec, 0); /* cs num uniforms */ cl_u8(&shader_rec, vc4->prog.cs->vattrs_live); cl_u8(&shader_rec, vc4->prog.cs->vattr_offsets[8]); cl_reloc(vc4, &vc4->shader_rec, &shader_rec, vc4->prog.cs->bo, 0); cl_u32(&shader_rec, 0); /* UBO offset written by kernel */ uint32_t max_index = 0xffff; for (int i = 0; i < vtx->num_elements; i++) { struct pipe_vertex_element *elem = &vtx->pipe[i]; struct pipe_vertex_buffer *vb = &vertexbuf->vb[elem->vertex_buffer_index]; struct vc4_resource *rsc = vc4_resource(vb->buffer); /* not vc4->dirty tracked: vc4->last_index_bias */ uint32_t offset = (vb->buffer_offset + elem->src_offset + vb->stride * info->index_bias); uint32_t vb_size = rsc->bo->size - offset; uint32_t elem_size = util_format_get_blocksize(elem->src_format); cl_reloc(vc4, &vc4->shader_rec, &shader_rec, rsc->bo, offset); cl_u8(&shader_rec, elem_size - 1); cl_u8(&shader_rec, vb->stride); cl_u8(&shader_rec, vc4->prog.vs->vattr_offsets[i]); cl_u8(&shader_rec, vc4->prog.cs->vattr_offsets[i]); if (vb->stride > 0) { max_index = MIN2(max_index, (vb_size - elem_size) / vb->stride); } } if (vtx->num_elements == 0) { assert(num_elements_emit == 1); struct vc4_bo *bo = vc4_bo_alloc(vc4->screen, 4096, "scratch VBO"); cl_reloc(vc4, &vc4->shader_rec, &shader_rec, bo, 0); cl_u8(&shader_rec, 16 - 1); /* element size */ cl_u8(&shader_rec, 0); /* stride */ cl_u8(&shader_rec, 0); /* VS VPM offset */ cl_u8(&shader_rec, 0); /* CS VPM offset */ vc4_bo_unreference(&bo); } cl_end(&vc4->shader_rec, shader_rec); struct vc4_cl_out *bcl = cl_start(&vc4->bcl); /* the actual draw call. */ cl_u8(&bcl, VC4_PACKET_GL_SHADER_STATE); assert(vtx->num_elements <= 8); /* Note that number of attributes == 0 in the packet means 8 * attributes. This field also contains the offset into shader_rec. */ cl_u32(&bcl, num_elements_emit & 0x7); cl_end(&vc4->bcl, bcl); vc4_write_uniforms(vc4, vc4->prog.fs, &vc4->constbuf[PIPE_SHADER_FRAGMENT], &vc4->fragtex); vc4_write_uniforms(vc4, vc4->prog.vs, &vc4->constbuf[PIPE_SHADER_VERTEX], &vc4->verttex); vc4_write_uniforms(vc4, vc4->prog.cs, &vc4->constbuf[PIPE_SHADER_VERTEX], &vc4->verttex); vc4->last_index_bias = info->index_bias; vc4->max_index = max_index; }
void vc4_emit_state(struct pipe_context *pctx) { struct vc4_context *vc4 = vc4_context(pctx); struct vc4_cl_out *bcl = cl_start(&vc4->bcl); if (vc4->dirty & (VC4_DIRTY_SCISSOR | VC4_DIRTY_VIEWPORT | VC4_DIRTY_RASTERIZER)) { float *vpscale = vc4->viewport.scale; float *vptranslate = vc4->viewport.translate; float vp_minx = -fabsf(vpscale[0]) + vptranslate[0]; float vp_maxx = fabsf(vpscale[0]) + vptranslate[0]; float vp_miny = -fabsf(vpscale[1]) + vptranslate[1]; float vp_maxy = fabsf(vpscale[1]) + vptranslate[1]; /* Clip to the scissor if it's enabled, but still clip to the * drawable regardless since that controls where the binner * tries to put things. * * Additionally, always clip the rendering to the viewport, * since the hardware does guardband clipping, meaning * primitives would rasterize outside of the view volume. */ uint32_t minx, miny, maxx, maxy; if (!vc4->rasterizer->base.scissor) { minx = MAX2(vp_minx, 0); miny = MAX2(vp_miny, 0); maxx = MIN2(vp_maxx, vc4->draw_width); maxy = MIN2(vp_maxy, vc4->draw_height); } else { minx = MAX2(vp_minx, vc4->scissor.minx); miny = MAX2(vp_miny, vc4->scissor.miny); maxx = MIN2(vp_maxx, vc4->scissor.maxx); maxy = MIN2(vp_maxy, vc4->scissor.maxy); } cl_u8(&bcl, VC4_PACKET_CLIP_WINDOW); cl_u16(&bcl, minx); cl_u16(&bcl, miny); cl_u16(&bcl, maxx - minx); cl_u16(&bcl, maxy - miny); vc4->draw_min_x = MIN2(vc4->draw_min_x, minx); vc4->draw_min_y = MIN2(vc4->draw_min_y, miny); vc4->draw_max_x = MAX2(vc4->draw_max_x, maxx); vc4->draw_max_y = MAX2(vc4->draw_max_y, maxy); } if (vc4->dirty & (VC4_DIRTY_RASTERIZER | VC4_DIRTY_ZSA)) { uint8_t ez_enable_mask_out = ~0; /* HW-2905: If the RCL ends up doing a full-res load when * multisampling, then early Z tracking may end up with values * from the previous tile due to a HW bug. Disable it to * avoid that. * * We should be able to skip this when the Z is cleared, but I * was seeing bad rendering on glxgears -samples 4 even in * that case. */ if (vc4->msaa) ez_enable_mask_out &= ~VC4_CONFIG_BITS_EARLY_Z; cl_u8(&bcl, VC4_PACKET_CONFIGURATION_BITS); cl_u8(&bcl, vc4->rasterizer->config_bits[0] | vc4->zsa->config_bits[0]); cl_u8(&bcl, vc4->rasterizer->config_bits[1] | vc4->zsa->config_bits[1]); cl_u8(&bcl, (vc4->rasterizer->config_bits[2] | vc4->zsa->config_bits[2]) & ez_enable_mask_out); } if (vc4->dirty & VC4_DIRTY_RASTERIZER) { cl_u8(&bcl, VC4_PACKET_DEPTH_OFFSET); cl_u16(&bcl, vc4->rasterizer->offset_factor); cl_u16(&bcl, vc4->rasterizer->offset_units); cl_u8(&bcl, VC4_PACKET_POINT_SIZE); cl_f(&bcl, vc4->rasterizer->point_size); cl_u8(&bcl, VC4_PACKET_LINE_WIDTH); cl_f(&bcl, vc4->rasterizer->base.line_width); } if (vc4->dirty & VC4_DIRTY_VIEWPORT) { cl_u8(&bcl, VC4_PACKET_CLIPPER_XY_SCALING); cl_f(&bcl, vc4->viewport.scale[0] * 16.0f); cl_f(&bcl, vc4->viewport.scale[1] * 16.0f); cl_u8(&bcl, VC4_PACKET_CLIPPER_Z_SCALING); cl_f(&bcl, vc4->viewport.translate[2]); cl_f(&bcl, vc4->viewport.scale[2]); cl_u8(&bcl, VC4_PACKET_VIEWPORT_OFFSET); cl_u16(&bcl, 16 * vc4->viewport.translate[0]); cl_u16(&bcl, 16 * vc4->viewport.translate[1]); } if (vc4->dirty & VC4_DIRTY_FLAT_SHADE_FLAGS) { cl_u8(&bcl, VC4_PACKET_FLAT_SHADE_FLAGS); cl_u32(&bcl, vc4->rasterizer->base.flatshade ? vc4->prog.fs->color_inputs : 0); } cl_end(&vc4->bcl, bcl); }
/** * Does the initial bining command list setup for drawing to a given FBO. */ static void vc4_start_draw(struct vc4_context *vc4) { if (vc4->needs_flush) return; uint32_t width = vc4->framebuffer.width; uint32_t height = vc4->framebuffer.height; uint32_t tilew = align(width, 64) / 64; uint32_t tileh = align(height, 64) / 64; /* Tile alloc memory setup: We use an initial alloc size of 32b. The * hardware then aligns that to 256b (we use 4096, because all of our * BO allocations align to that anyway), then for some reason the * simulator wants an extra page available, even if you have overflow * memory set up. */ uint32_t tile_alloc_size = 32 * tilew * tileh; tile_alloc_size = align(tile_alloc_size, 4096); tile_alloc_size += 4096; uint32_t tile_state_size = 48 * tilew * tileh; if (!vc4->tile_alloc || vc4->tile_alloc->size < tile_alloc_size) { vc4_bo_unreference(&vc4->tile_alloc); vc4->tile_alloc = vc4_bo_alloc(vc4->screen, tile_alloc_size, "tile_alloc"); } if (!vc4->tile_state || vc4->tile_state->size < tile_state_size) { vc4_bo_unreference(&vc4->tile_state); vc4->tile_state = vc4_bo_alloc(vc4->screen, tile_state_size, "tile_state"); } // Tile state data is 48 bytes per tile, I think it can be thrown away // as soon as binning is finished. cl_start_reloc(&vc4->bcl, 2); cl_u8(&vc4->bcl, VC4_PACKET_TILE_BINNING_MODE_CONFIG); cl_reloc(vc4, &vc4->bcl, vc4->tile_alloc, 0); cl_u32(&vc4->bcl, vc4->tile_alloc->size); cl_reloc(vc4, &vc4->bcl, vc4->tile_state, 0); cl_u8(&vc4->bcl, tilew); cl_u8(&vc4->bcl, tileh); cl_u8(&vc4->bcl, VC4_BIN_CONFIG_AUTO_INIT_TSDA | VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_32 | VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_32); /* START_TILE_BINNING resets the statechange counters in the hardware, * which are what is used when a primitive is binned to a tile to * figure out what new state packets need to be written to that tile's * command list. */ cl_u8(&vc4->bcl, VC4_PACKET_START_TILE_BINNING); /* Reset the current compressed primitives format. This gets modified * by VC4_PACKET_GL_INDEXED_PRIMITIVE and * VC4_PACKET_GL_ARRAY_PRIMITIVE, so it needs to be reset at the start * of every tile. */ cl_u8(&vc4->bcl, VC4_PACKET_PRIMITIVE_LIST_FORMAT); cl_u8(&vc4->bcl, (VC4_PRIMITIVE_LIST_FORMAT_16_INDEX | VC4_PRIMITIVE_LIST_FORMAT_TYPE_TRIANGLES)); vc4->needs_flush = true; vc4->draw_call_queued = true; }
static void vc4_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info) { struct vc4_context *vc4 = vc4_context(pctx); if (info->mode >= PIPE_PRIM_QUADS) { util_primconvert_save_index_buffer(vc4->primconvert, &vc4->indexbuf); util_primconvert_save_rasterizer_state(vc4->primconvert, &vc4->rasterizer->base); util_primconvert_draw_vbo(vc4->primconvert, info); return; } struct vc4_vertex_stateobj *vtx = vc4->vtx; struct vc4_vertexbuf_stateobj *vertexbuf = &vc4->vertexbuf; if (vc4->prim_mode != info->mode) { vc4->prim_mode = info->mode; vc4->dirty |= VC4_DIRTY_PRIM_MODE; } vc4_start_draw(vc4); vc4_update_compiled_shaders(vc4, info->mode); vc4_emit_state(pctx); vc4->dirty = 0; vc4_write_uniforms(vc4, vc4->prog.fs, &vc4->constbuf[PIPE_SHADER_FRAGMENT], &vc4->fragtex); vc4_write_uniforms(vc4, vc4->prog.vs, &vc4->constbuf[PIPE_SHADER_VERTEX], &vc4->verttex); vc4_write_uniforms(vc4, vc4->prog.cs, &vc4->constbuf[PIPE_SHADER_VERTEX], &vc4->verttex); /* The simulator throws a fit if VS or CS don't read an attribute, so * we emit a dummy read. */ uint32_t num_elements_emit = MAX2(vtx->num_elements, 1); /* Emit the shader record. */ cl_start_shader_reloc(&vc4->shader_rec, 3 + num_elements_emit); cl_u16(&vc4->shader_rec, VC4_SHADER_FLAG_ENABLE_CLIPPING | ((info->mode == PIPE_PRIM_POINTS && vc4->rasterizer->base.point_size_per_vertex) ? VC4_SHADER_FLAG_VS_POINT_SIZE : 0)); cl_u8(&vc4->shader_rec, 0); /* fs num uniforms (unused) */ cl_u8(&vc4->shader_rec, vc4->prog.fs->num_inputs); cl_reloc(vc4, &vc4->shader_rec, vc4->prog.fs->bo, 0); cl_u32(&vc4->shader_rec, 0); /* UBO offset written by kernel */ cl_u16(&vc4->shader_rec, 0); /* vs num uniforms */ cl_u8(&vc4->shader_rec, (1 << num_elements_emit) - 1); /* vs attribute array bitfield */ cl_u8(&vc4->shader_rec, 16 * num_elements_emit); /* vs total attribute size */ cl_reloc(vc4, &vc4->shader_rec, vc4->prog.vs->bo, 0); cl_u32(&vc4->shader_rec, 0); /* UBO offset written by kernel */ cl_u16(&vc4->shader_rec, 0); /* cs num uniforms */ cl_u8(&vc4->shader_rec, (1 << num_elements_emit) - 1); /* cs attribute array bitfield */ cl_u8(&vc4->shader_rec, 16 * num_elements_emit); /* cs total attribute size */ cl_reloc(vc4, &vc4->shader_rec, vc4->prog.cs->bo, 0); cl_u32(&vc4->shader_rec, 0); /* UBO offset written by kernel */ uint32_t max_index = 0xffff; for (int i = 0; i < vtx->num_elements; i++) { struct pipe_vertex_element *elem = &vtx->pipe[i]; struct pipe_vertex_buffer *vb = &vertexbuf->vb[elem->vertex_buffer_index]; struct vc4_resource *rsc = vc4_resource(vb->buffer); uint32_t offset = vb->buffer_offset + elem->src_offset; uint32_t vb_size = rsc->bo->size - offset; uint32_t elem_size = util_format_get_blocksize(elem->src_format); cl_reloc(vc4, &vc4->shader_rec, rsc->bo, offset); cl_u8(&vc4->shader_rec, elem_size - 1); cl_u8(&vc4->shader_rec, vb->stride); cl_u8(&vc4->shader_rec, i * 16); /* VS VPM offset */ cl_u8(&vc4->shader_rec, i * 16); /* CS VPM offset */ if (vb->stride > 0) { max_index = MIN2(max_index, (vb_size - elem_size) / vb->stride); } } if (vtx->num_elements == 0) { assert(num_elements_emit == 1); struct vc4_bo *bo = vc4_bo_alloc(vc4->screen, 4096, "scratch VBO"); cl_reloc(vc4, &vc4->shader_rec, bo, 0); cl_u8(&vc4->shader_rec, 16 - 1); /* element size */ cl_u8(&vc4->shader_rec, 0); /* stride */ cl_u8(&vc4->shader_rec, 0); /* VS VPM offset */ cl_u8(&vc4->shader_rec, 0); /* CS VPM offset */ vc4_bo_unreference(&bo); } /* the actual draw call. */ cl_u8(&vc4->bcl, VC4_PACKET_GL_SHADER_STATE); assert(vtx->num_elements <= 8); /* Note that number of attributes == 0 in the packet means 8 * attributes. This field also contains the offset into shader_rec. */ cl_u32(&vc4->bcl, num_elements_emit & 0x7); /* Note that the primitive type fields match with OpenGL/gallium * definitions, up to but not including QUADS. */ if (info->indexed) { struct vc4_resource *rsc = vc4_resource(vc4->indexbuf.buffer); uint32_t offset = vc4->indexbuf.offset; uint32_t index_size = vc4->indexbuf.index_size; if (rsc->shadow_parent) { vc4_update_shadow_index_buffer(pctx, &vc4->indexbuf); offset = 0; index_size = 2; } cl_start_reloc(&vc4->bcl, 1); cl_u8(&vc4->bcl, VC4_PACKET_GL_INDEXED_PRIMITIVE); cl_u8(&vc4->bcl, info->mode | (index_size == 2 ? VC4_INDEX_BUFFER_U16: VC4_INDEX_BUFFER_U8)); cl_u32(&vc4->bcl, info->count); cl_reloc(vc4, &vc4->bcl, rsc->bo, offset); cl_u32(&vc4->bcl, max_index); } else { cl_u8(&vc4->bcl, VC4_PACKET_GL_ARRAY_PRIMITIVE); cl_u8(&vc4->bcl, info->mode); cl_u32(&vc4->bcl, info->count); cl_u32(&vc4->bcl, info->start); } if (vc4->zsa && vc4->zsa->base.depth.enabled) { vc4->resolve |= PIPE_CLEAR_DEPTH; } if (vc4->zsa && vc4->zsa->base.stencil[0].enabled) vc4->resolve |= PIPE_CLEAR_STENCIL; vc4->resolve |= PIPE_CLEAR_COLOR0; vc4->shader_rec_count++; if (vc4_debug & VC4_DEBUG_ALWAYS_FLUSH) vc4_flush(pctx); }
static void vc4_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info) { struct vc4_context *vc4 = vc4_context(pctx); if (info->mode >= PIPE_PRIM_QUADS) { util_primconvert_save_index_buffer(vc4->primconvert, &vc4->indexbuf); util_primconvert_save_rasterizer_state(vc4->primconvert, &vc4->rasterizer->base); util_primconvert_draw_vbo(vc4->primconvert, info); perf_debug("Fallback conversion for %d %s vertices\n", info->count, u_prim_name(info->mode)); return; } /* Before setting up the draw, do any fixup blits necessary. */ vc4_predraw_check_textures(pctx, &vc4->verttex); vc4_predraw_check_textures(pctx, &vc4->fragtex); vc4_hw_2116_workaround(pctx, info->count); struct vc4_job *job = vc4_get_job_for_fbo(vc4); vc4_get_draw_cl_space(job, info->count); if (vc4->prim_mode != info->mode) { vc4->prim_mode = info->mode; vc4->dirty |= VC4_DIRTY_PRIM_MODE; } vc4_start_draw(vc4); if (!vc4_update_compiled_shaders(vc4, info->mode)) { debug_warn_once("shader compile failed, skipping draw call.\n"); return; } vc4_emit_state(pctx); if ((vc4->dirty & (VC4_DIRTY_VTXBUF | VC4_DIRTY_VTXSTATE | VC4_DIRTY_PRIM_MODE | VC4_DIRTY_RASTERIZER | VC4_DIRTY_COMPILED_CS | VC4_DIRTY_COMPILED_VS | VC4_DIRTY_COMPILED_FS | vc4->prog.cs->uniform_dirty_bits | vc4->prog.vs->uniform_dirty_bits | vc4->prog.fs->uniform_dirty_bits)) || vc4->last_index_bias != info->index_bias) { vc4_emit_gl_shader_state(vc4, info, 0); } vc4->dirty = 0; /* Note that the primitive type fields match with OpenGL/gallium * definitions, up to but not including QUADS. */ struct vc4_cl_out *bcl = cl_start(&job->bcl); if (info->indexed) { uint32_t offset = vc4->indexbuf.offset; uint32_t index_size = vc4->indexbuf.index_size; struct pipe_resource *prsc; if (vc4->indexbuf.index_size == 4) { prsc = vc4_get_shadow_index_buffer(pctx, &vc4->indexbuf, info->count, &offset); index_size = 2; } else { if (vc4->indexbuf.user_buffer) { prsc = NULL; u_upload_data(vc4->uploader, 0, info->count * index_size, 4, vc4->indexbuf.user_buffer, &offset, &prsc); } else { prsc = vc4->indexbuf.buffer; } } struct vc4_resource *rsc = vc4_resource(prsc); cl_start_reloc(&job->bcl, &bcl, 1); cl_u8(&bcl, VC4_PACKET_GL_INDEXED_PRIMITIVE); cl_u8(&bcl, info->mode | (index_size == 2 ? VC4_INDEX_BUFFER_U16: VC4_INDEX_BUFFER_U8)); cl_u32(&bcl, info->count); cl_reloc(job, &job->bcl, &bcl, rsc->bo, offset); cl_u32(&bcl, vc4->max_index); job->draw_calls_queued++; if (vc4->indexbuf.index_size == 4 || vc4->indexbuf.user_buffer) pipe_resource_reference(&prsc, NULL); } else { uint32_t count = info->count; uint32_t start = info->start; uint32_t extra_index_bias = 0; while (count) { uint32_t this_count = count; uint32_t step = count; static const uint32_t max_verts = 65535; /* GFXH-515 / SW-5891: The binner emits 16 bit indices * for drawarrays, which means that if start + count > * 64k it would truncate the top bits. Work around * this by emitting a limited number of primitives at * a time and reemitting the shader state pointing * farther down the vertex attribute arrays. * * To do this properly for line loops or trifans, we'd * need to make a new VB containing the first vertex * plus whatever remainder. */ if (extra_index_bias) { cl_end(&job->bcl, bcl); vc4_emit_gl_shader_state(vc4, info, extra_index_bias); bcl = cl_start(&job->bcl); } if (start + count > max_verts) { switch (info->mode) { case PIPE_PRIM_POINTS: this_count = step = max_verts; break; case PIPE_PRIM_LINES: this_count = step = max_verts - (max_verts % 2); break; case PIPE_PRIM_LINE_STRIP: this_count = max_verts; step = max_verts - 1; break; case PIPE_PRIM_LINE_LOOP: this_count = max_verts; step = max_verts - 1; debug_warn_once("unhandled line loop " "looping behavior with " ">65535 verts\n"); break; case PIPE_PRIM_TRIANGLES: this_count = step = max_verts - (max_verts % 3); break; case PIPE_PRIM_TRIANGLE_STRIP: this_count = max_verts; step = max_verts - 2; break; default: debug_warn_once("unhandled primitive " "max vert count, truncating\n"); this_count = step = max_verts; } } cl_u8(&bcl, VC4_PACKET_GL_ARRAY_PRIMITIVE); cl_u8(&bcl, info->mode); cl_u32(&bcl, this_count); cl_u32(&bcl, start); job->draw_calls_queued++; count -= step; extra_index_bias += start + step; start = 0; } } cl_end(&job->bcl, bcl); /* We shouldn't have tripped the HW_2116 bug with the GFXH-515 * workaround. */ assert(job->draw_calls_queued <= VC4_HW_2116_COUNT); if (vc4->zsa && vc4->framebuffer.zsbuf) { struct vc4_resource *rsc = vc4_resource(vc4->framebuffer.zsbuf->texture); if (vc4->zsa->base.depth.enabled) { job->resolve |= PIPE_CLEAR_DEPTH; rsc->initialized_buffers = PIPE_CLEAR_DEPTH; } if (vc4->zsa->base.stencil[0].enabled) { job->resolve |= PIPE_CLEAR_STENCIL; rsc->initialized_buffers |= PIPE_CLEAR_STENCIL; } } job->resolve |= PIPE_CLEAR_COLOR0; if (vc4_debug & VC4_DEBUG_ALWAYS_FLUSH) vc4_flush(pctx); }
static void vc4_setup_rcl(struct vc4_context *vc4) { struct vc4_surface *csurf = vc4_surface(vc4->framebuffer.cbufs[0]); struct vc4_resource *ctex = vc4_resource(csurf->base.texture); uint32_t resolve_uncleared = vc4->resolve & ~vc4->cleared; uint32_t width = vc4->framebuffer.width; uint32_t height = vc4->framebuffer.height; uint32_t xtiles = align(width, 64) / 64; uint32_t ytiles = align(height, 64) / 64; #if 0 fprintf(stderr, "RCL: resolve 0x%x clear 0x%x resolve uncleared 0x%x\n", vc4->resolve, vc4->cleared, resolve_uncleared); #endif cl_u8(&vc4->rcl, VC4_PACKET_CLEAR_COLORS); cl_u32(&vc4->rcl, vc4->clear_color[0]); cl_u32(&vc4->rcl, vc4->clear_color[1]); cl_u32(&vc4->rcl, vc4->clear_depth); cl_u8(&vc4->rcl, 0); cl_start_reloc(&vc4->rcl, 1); cl_u8(&vc4->rcl, VC4_PACKET_TILE_RENDERING_MODE_CONFIG); cl_reloc(vc4, &vc4->rcl, ctex->bo, csurf->offset); cl_u16(&vc4->rcl, width); cl_u16(&vc4->rcl, height); cl_u16(&vc4->rcl, ((csurf->tiling << VC4_RENDER_CONFIG_MEMORY_FORMAT_SHIFT) | (vc4_rt_format_is_565(csurf->base.format) ? VC4_RENDER_CONFIG_FORMAT_BGR565 : VC4_RENDER_CONFIG_FORMAT_RGBA8888) | VC4_RENDER_CONFIG_EARLY_Z_COVERAGE_DISABLE)); /* The tile buffer normally gets cleared when the previous tile is * stored. If the clear values changed between frames, then the tile * buffer has stale clear values in it, so we have to do a store in * None mode (no writes) so that we trigger the tile buffer clear. */ if (vc4->cleared & PIPE_CLEAR_COLOR0) { cl_u8(&vc4->rcl, VC4_PACKET_TILE_COORDINATES); cl_u8(&vc4->rcl, 0); cl_u8(&vc4->rcl, 0); cl_u8(&vc4->rcl, VC4_PACKET_STORE_TILE_BUFFER_GENERAL); cl_u16(&vc4->rcl, VC4_LOADSTORE_TILE_BUFFER_NONE); cl_u32(&vc4->rcl, 0); /* no address, since we're in None mode */ } for (int y = 0; y < ytiles; y++) { for (int x = 0; x < xtiles; x++) { bool end_of_frame = (x == xtiles - 1 && y == ytiles - 1); /* Note that the load doesn't actually occur until the * tile coords packet is processed. */ if (resolve_uncleared & PIPE_CLEAR_COLOR) { cl_start_reloc(&vc4->rcl, 1); cl_u8(&vc4->rcl, VC4_PACKET_LOAD_TILE_BUFFER_GENERAL); cl_u8(&vc4->rcl, VC4_LOADSTORE_TILE_BUFFER_COLOR | (csurf->tiling << VC4_LOADSTORE_TILE_BUFFER_FORMAT_SHIFT)); cl_u8(&vc4->rcl, vc4_rt_format_is_565(csurf->base.format) ? VC4_LOADSTORE_TILE_BUFFER_BGR565 : VC4_LOADSTORE_TILE_BUFFER_RGBA8888); cl_reloc(vc4, &vc4->rcl, ctex->bo, csurf->offset); } cl_u8(&vc4->rcl, VC4_PACKET_TILE_COORDINATES); cl_u8(&vc4->rcl, x); cl_u8(&vc4->rcl, y); cl_start_reloc(&vc4->rcl, 1); cl_u8(&vc4->rcl, VC4_PACKET_BRANCH_TO_SUB_LIST); cl_reloc(vc4, &vc4->rcl, vc4->tile_alloc, (y * xtiles + x) * 32); if (vc4->resolve & PIPE_CLEAR_COLOR0) { if (end_of_frame) { cl_u8(&vc4->rcl, VC4_PACKET_STORE_MS_TILE_BUFFER_AND_EOF); } else { cl_u8(&vc4->rcl, VC4_PACKET_STORE_MS_TILE_BUFFER); } } else { assert(!"unfinished: Need to end the frame\n"); } } } }