static void fd_clear(struct pipe_context *pctx, unsigned buffers, const union pipe_color_union *color, double depth, unsigned stencil) { struct fd_context *ctx = fd_context(pctx); struct pipe_framebuffer_state *pfb = &ctx->framebuffer; ctx->cleared |= buffers; ctx->resolve |= buffers; ctx->needs_flush = true; if (buffers & PIPE_CLEAR_COLOR) fd_resource(pfb->cbufs[0]->texture)->dirty = true; if (buffers & (PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL)) { fd_resource(pfb->zsbuf->texture)->dirty = true; ctx->gmem_reason |= FD_GMEM_CLEARS_DEPTH_STENCIL; } DBG("%x depth=%f, stencil=%u (%s/%s)", buffers, depth, stencil, util_format_short_name(pipe_surface_format(pfb->cbufs[0])), util_format_short_name(pipe_surface_format(pfb->zsbuf))); ctx->clear(ctx, buffers, color, depth, stencil); ctx->dirty |= FD_DIRTY_ZSA | FD_DIRTY_RASTERIZER | FD_DIRTY_SAMPLE_MASK | FD_DIRTY_PROG | FD_DIRTY_CONSTBUF | FD_DIRTY_BLEND; if (fd_mesa_debug & FD_DBG_DCLEAR) ctx->dirty = 0xffffffff; }
static void fd_clear(struct pipe_context *pctx, unsigned buffers, const union pipe_color_union *color, double depth, unsigned stencil) { struct fd_context *ctx = fd_context(pctx); struct pipe_framebuffer_state *pfb = &ctx->framebuffer; struct pipe_scissor_state *scissor = fd_context_get_scissor(ctx); unsigned cleared_buffers; /* for bookkeeping about which buffers have been cleared (and thus * can fully or partially skip mem2gmem) we need to ignore buffers * that have already had a draw, in case apps do silly things like * clear after draw (ie. if you only clear the color buffer, but * something like alpha-test causes side effects from the draw in * the depth buffer, etc) */ cleared_buffers = buffers & (FD_BUFFER_ALL & ~ctx->restore); /* do we have full-screen scissor? */ if (!memcmp(scissor, &ctx->disabled_scissor, sizeof(*scissor))) { ctx->cleared |= cleared_buffers; } else { ctx->partial_cleared |= cleared_buffers; if (cleared_buffers & PIPE_CLEAR_COLOR) ctx->cleared_scissor.color = *scissor; if (cleared_buffers & PIPE_CLEAR_DEPTH) ctx->cleared_scissor.depth = *scissor; if (cleared_buffers & PIPE_CLEAR_STENCIL) ctx->cleared_scissor.stencil = *scissor; } ctx->resolve |= buffers; ctx->needs_flush = true; if (buffers & PIPE_CLEAR_COLOR) fd_resource(pfb->cbufs[0]->texture)->dirty = true; if (buffers & (PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL)) { fd_resource(pfb->zsbuf->texture)->dirty = true; ctx->gmem_reason |= FD_GMEM_CLEARS_DEPTH_STENCIL; } DBG("%x depth=%f, stencil=%u (%s/%s)", buffers, depth, stencil, util_format_short_name(pipe_surface_format(pfb->cbufs[0])), util_format_short_name(pipe_surface_format(pfb->zsbuf))); fd_hw_query_set_stage(ctx, ctx->ring, FD_STAGE_CLEAR); ctx->clear(ctx, buffers, color, depth, stencil); ctx->dirty |= FD_DIRTY_ZSA | FD_DIRTY_VIEWPORT | FD_DIRTY_RASTERIZER | FD_DIRTY_SAMPLE_MASK | FD_DIRTY_PROG | FD_DIRTY_CONSTBUF | FD_DIRTY_BLEND; if (fd_mesa_debug & FD_DBG_DCLEAR) ctx->dirty = 0xffffffff; }
void fd_gmem_render_tiles(struct fd_batch *batch) { struct fd_context *ctx = batch->ctx; struct pipe_framebuffer_state *pfb = &batch->framebuffer; bool sysmem = false; if (ctx->emit_sysmem_prep) { if (batch->cleared || batch->gmem_reason || (batch->num_draws > 5)) { DBG("GMEM: cleared=%x, gmem_reason=%x, num_draws=%u", batch->cleared, batch->gmem_reason, batch->num_draws); } else if (!(fd_mesa_debug & FD_DBG_NOBYPASS)) { sysmem = true; } } fd_reset_wfi(batch); ctx->stats.batch_total++; if (sysmem) { DBG("%p: rendering sysmem %ux%u (%s/%s)", batch, pfb->width, pfb->height, util_format_short_name(pipe_surface_format(pfb->cbufs[0])), util_format_short_name(pipe_surface_format(pfb->zsbuf))); fd_hw_query_prepare(batch, 1); render_sysmem(batch); ctx->stats.batch_sysmem++; } else { struct fd_gmem_stateobj *gmem = &ctx->gmem; calculate_tiles(batch); DBG("%p: rendering %dx%d tiles %ux%u (%s/%s)", batch, pfb->width, pfb->height, gmem->nbins_x, gmem->nbins_y, util_format_short_name(pipe_surface_format(pfb->cbufs[0])), util_format_short_name(pipe_surface_format(pfb->zsbuf))); fd_hw_query_prepare(batch, gmem->nbins_x * gmem->nbins_y); render_tiles(batch); ctx->stats.batch_gmem++; } flush_ring(batch); }
static void fd3_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *info) { struct fd3_context *fd3_ctx = fd3_context(ctx); struct pipe_framebuffer_state *pfb = &ctx->framebuffer; struct fd3_emit emit = { .vtx = &ctx->vtx, .prog = &ctx->prog, .info = info, .key = { /* do binning pass first: */ .binning_pass = true, .color_two_side = ctx->rasterizer ? ctx->rasterizer->light_twoside : false, .alpha = util_format_is_alpha(pipe_surface_format(pfb->cbufs[0])), // TODO set .half_precision based on render target format, // ie. float16 and smaller use half, float32 use full.. .half_precision = !!(fd_mesa_debug & FD_DBG_FRAGHALF), .has_per_samp = (fd3_ctx->fsaturate || fd3_ctx->vsaturate || fd3_ctx->vinteger_s || fd3_ctx->finteger_s), .vsaturate_s = fd3_ctx->vsaturate_s, .vsaturate_t = fd3_ctx->vsaturate_t, .vsaturate_r = fd3_ctx->vsaturate_r, .fsaturate_s = fd3_ctx->fsaturate_s, .fsaturate_t = fd3_ctx->fsaturate_t, .fsaturate_r = fd3_ctx->fsaturate_r, .vinteger_s = fd3_ctx->vinteger_s, .finteger_s = fd3_ctx->finteger_s, }, .format = pipe_surface_format(pfb->cbufs[0]), .rasterflat = ctx->rasterizer && ctx->rasterizer->flatshade, }; unsigned dirty; fixup_shader_state(ctx, &emit.key); dirty = ctx->dirty; emit.dirty = dirty & ~(FD_DIRTY_BLEND); draw_impl(ctx, ctx->binning_ring, &emit); /* and now regular (non-binning) pass: */ emit.key.binning_pass = false; emit.dirty = dirty; emit.vp = NULL; /* we changed key so need to refetch vp */ draw_impl(ctx, ctx->ring, &emit); }
void fd2_emit_state(struct fd_context *ctx, const enum fd_dirty_3d_state dirty) { struct fd2_blend_stateobj *blend = fd2_blend_stateobj(ctx->blend); struct fd2_zsa_stateobj *zsa = fd2_zsa_stateobj(ctx->zsa); struct fd_ringbuffer *ring = ctx->batch->draw; /* NOTE: we probably want to eventually refactor this so each state * object handles emitting it's own state.. although the mapping of * state to registers is not always orthogonal, sometimes a single * register contains bitfields coming from multiple state objects, * so not sure the best way to deal with that yet. */ if (dirty & FD_DIRTY_SAMPLE_MASK) { OUT_PKT3(ring, CP_SET_CONSTANT, 2); OUT_RING(ring, CP_REG(REG_A2XX_PA_SC_AA_MASK)); OUT_RING(ring, ctx->sample_mask); } if (dirty & (FD_DIRTY_ZSA | FD_DIRTY_STENCIL_REF)) { struct pipe_stencil_ref *sr = &ctx->stencil_ref; OUT_PKT3(ring, CP_SET_CONSTANT, 2); OUT_RING(ring, CP_REG(REG_A2XX_RB_DEPTHCONTROL)); OUT_RING(ring, zsa->rb_depthcontrol); OUT_PKT3(ring, CP_SET_CONSTANT, 4); OUT_RING(ring, CP_REG(REG_A2XX_RB_STENCILREFMASK_BF)); OUT_RING(ring, zsa->rb_stencilrefmask_bf | A2XX_RB_STENCILREFMASK_STENCILREF(sr->ref_value[1])); OUT_RING(ring, zsa->rb_stencilrefmask | A2XX_RB_STENCILREFMASK_STENCILREF(sr->ref_value[0])); OUT_RING(ring, zsa->rb_alpha_ref); } if (ctx->rasterizer && dirty & FD_DIRTY_RASTERIZER) { struct fd2_rasterizer_stateobj *rasterizer = fd2_rasterizer_stateobj(ctx->rasterizer); OUT_PKT3(ring, CP_SET_CONSTANT, 3); OUT_RING(ring, CP_REG(REG_A2XX_PA_CL_CLIP_CNTL)); OUT_RING(ring, rasterizer->pa_cl_clip_cntl); OUT_RING(ring, rasterizer->pa_su_sc_mode_cntl | A2XX_PA_SU_SC_MODE_CNTL_VTX_WINDOW_OFFSET_ENABLE); OUT_PKT3(ring, CP_SET_CONSTANT, 5); OUT_RING(ring, CP_REG(REG_A2XX_PA_SU_POINT_SIZE)); OUT_RING(ring, rasterizer->pa_su_point_size); OUT_RING(ring, rasterizer->pa_su_point_minmax); OUT_RING(ring, rasterizer->pa_su_line_cntl); OUT_RING(ring, rasterizer->pa_sc_line_stipple); OUT_PKT3(ring, CP_SET_CONSTANT, 6); OUT_RING(ring, CP_REG(REG_A2XX_PA_SU_VTX_CNTL)); OUT_RING(ring, rasterizer->pa_su_vtx_cntl); OUT_RING(ring, fui(1.0)); /* PA_CL_GB_VERT_CLIP_ADJ */ OUT_RING(ring, fui(1.0)); /* PA_CL_GB_VERT_DISC_ADJ */ OUT_RING(ring, fui(1.0)); /* PA_CL_GB_HORZ_CLIP_ADJ */ OUT_RING(ring, fui(1.0)); /* PA_CL_GB_HORZ_DISC_ADJ */ } /* NOTE: scissor enabled bit is part of rasterizer state: */ if (dirty & (FD_DIRTY_SCISSOR | FD_DIRTY_RASTERIZER)) { struct pipe_scissor_state *scissor = fd_context_get_scissor(ctx); OUT_PKT3(ring, CP_SET_CONSTANT, 3); OUT_RING(ring, CP_REG(REG_A2XX_PA_SC_WINDOW_SCISSOR_TL)); OUT_RING(ring, xy2d(scissor->minx, /* PA_SC_WINDOW_SCISSOR_TL */ scissor->miny)); OUT_RING(ring, xy2d(scissor->maxx, /* PA_SC_WINDOW_SCISSOR_BR */ scissor->maxy)); ctx->batch->max_scissor.minx = MIN2(ctx->batch->max_scissor.minx, scissor->minx); ctx->batch->max_scissor.miny = MIN2(ctx->batch->max_scissor.miny, scissor->miny); ctx->batch->max_scissor.maxx = MAX2(ctx->batch->max_scissor.maxx, scissor->maxx); ctx->batch->max_scissor.maxy = MAX2(ctx->batch->max_scissor.maxy, scissor->maxy); } if (dirty & FD_DIRTY_VIEWPORT) { OUT_PKT3(ring, CP_SET_CONSTANT, 7); OUT_RING(ring, CP_REG(REG_A2XX_PA_CL_VPORT_XSCALE)); OUT_RING(ring, fui(ctx->viewport.scale[0])); /* PA_CL_VPORT_XSCALE */ OUT_RING(ring, fui(ctx->viewport.translate[0])); /* PA_CL_VPORT_XOFFSET */ OUT_RING(ring, fui(ctx->viewport.scale[1])); /* PA_CL_VPORT_YSCALE */ OUT_RING(ring, fui(ctx->viewport.translate[1])); /* PA_CL_VPORT_YOFFSET */ OUT_RING(ring, fui(ctx->viewport.scale[2])); /* PA_CL_VPORT_ZSCALE */ OUT_RING(ring, fui(ctx->viewport.translate[2])); /* PA_CL_VPORT_ZOFFSET */ OUT_PKT3(ring, CP_SET_CONSTANT, 2); OUT_RING(ring, CP_REG(REG_A2XX_PA_CL_VTE_CNTL)); OUT_RING(ring, A2XX_PA_CL_VTE_CNTL_VTX_W0_FMT | A2XX_PA_CL_VTE_CNTL_VPORT_X_SCALE_ENA | A2XX_PA_CL_VTE_CNTL_VPORT_X_OFFSET_ENA | A2XX_PA_CL_VTE_CNTL_VPORT_Y_SCALE_ENA | A2XX_PA_CL_VTE_CNTL_VPORT_Y_OFFSET_ENA | A2XX_PA_CL_VTE_CNTL_VPORT_Z_SCALE_ENA | A2XX_PA_CL_VTE_CNTL_VPORT_Z_OFFSET_ENA); } if (dirty & (FD_DIRTY_PROG | FD_DIRTY_VTXSTATE | FD_DIRTY_TEXSTATE)) { fd2_program_validate(ctx); fd2_program_emit(ring, &ctx->prog); } if (dirty & (FD_DIRTY_PROG | FD_DIRTY_CONST)) { emit_constants(ring, VS_CONST_BASE * 4, &ctx->constbuf[PIPE_SHADER_VERTEX], (dirty & FD_DIRTY_PROG) ? ctx->prog.vp : NULL); emit_constants(ring, PS_CONST_BASE * 4, &ctx->constbuf[PIPE_SHADER_FRAGMENT], (dirty & FD_DIRTY_PROG) ? ctx->prog.fp : NULL); } if (dirty & (FD_DIRTY_BLEND | FD_DIRTY_ZSA)) { OUT_PKT3(ring, CP_SET_CONSTANT, 2); OUT_RING(ring, CP_REG(REG_A2XX_RB_COLORCONTROL)); OUT_RING(ring, blend ? zsa->rb_colorcontrol | blend->rb_colorcontrol : 0); } if (dirty & (FD_DIRTY_BLEND | FD_DIRTY_FRAMEBUFFER)) { enum pipe_format format = pipe_surface_format(ctx->batch->framebuffer.cbufs[0]); bool has_alpha = util_format_has_alpha(format); OUT_PKT3(ring, CP_SET_CONSTANT, 2); OUT_RING(ring, CP_REG(REG_A2XX_RB_BLEND_CONTROL)); OUT_RING(ring, blend ? blend->rb_blendcontrol_alpha | COND(has_alpha, blend->rb_blendcontrol_rgb) | COND(!has_alpha, blend->rb_blendcontrol_no_alpha_rgb) : 0); OUT_PKT3(ring, CP_SET_CONSTANT, 2); OUT_RING(ring, CP_REG(REG_A2XX_RB_COLOR_MASK)); OUT_RING(ring, blend ? blend->rb_colormask : 0xf); } if (dirty & FD_DIRTY_BLEND_COLOR) { OUT_PKT3(ring, CP_SET_CONSTANT, 5); OUT_RING(ring, CP_REG(REG_A2XX_RB_BLEND_RED)); OUT_RING(ring, float_to_ubyte(ctx->blend_color.color[0])); OUT_RING(ring, float_to_ubyte(ctx->blend_color.color[1])); OUT_RING(ring, float_to_ubyte(ctx->blend_color.color[2])); OUT_RING(ring, float_to_ubyte(ctx->blend_color.color[3])); } if (dirty & (FD_DIRTY_TEX | FD_DIRTY_PROG)) emit_textures(ring, ctx); }
static void fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info) { struct fd_context *ctx = fd_context(pctx); struct pipe_framebuffer_state *pfb = &ctx->framebuffer; struct pipe_scissor_state *scissor = fd_context_get_scissor(ctx); unsigned i, prims, buffers = 0; /* if we supported transform feedback, we'd have to disable this: */ if (((scissor->maxx - scissor->minx) * (scissor->maxy - scissor->miny)) == 0) { return; } /* TODO: push down the region versions into the tiles */ if (!fd_render_condition_check(pctx)) return; /* emulate unsupported primitives: */ if (!fd_supported_prim(ctx, info->mode)) { if (ctx->streamout.num_targets > 0) debug_error("stream-out with emulated prims"); util_primconvert_save_index_buffer(ctx->primconvert, &ctx->indexbuf); util_primconvert_save_rasterizer_state(ctx->primconvert, ctx->rasterizer); util_primconvert_draw_vbo(ctx->primconvert, info); return; } ctx->needs_flush = true; /* * Figure out the buffers/features we need: */ if (fd_depth_enabled(ctx)) { buffers |= FD_BUFFER_DEPTH; resource_written(ctx, pfb->zsbuf->texture); ctx->gmem_reason |= FD_GMEM_DEPTH_ENABLED; } if (fd_stencil_enabled(ctx)) { buffers |= FD_BUFFER_STENCIL; resource_written(ctx, pfb->zsbuf->texture); ctx->gmem_reason |= FD_GMEM_STENCIL_ENABLED; } if (fd_logicop_enabled(ctx)) ctx->gmem_reason |= FD_GMEM_LOGICOP_ENABLED; for (i = 0; i < pfb->nr_cbufs; i++) { struct pipe_resource *surf; if (!pfb->cbufs[i]) continue; surf = pfb->cbufs[i]->texture; resource_written(ctx, surf); buffers |= PIPE_CLEAR_COLOR0 << i; if (surf->nr_samples > 1) ctx->gmem_reason |= FD_GMEM_MSAA_ENABLED; if (fd_blend_enabled(ctx, i)) ctx->gmem_reason |= FD_GMEM_BLEND_ENABLED; } /* Skip over buffer 0, that is sent along with the command stream */ for (i = 1; i < PIPE_MAX_CONSTANT_BUFFERS; i++) { resource_read(ctx, ctx->constbuf[PIPE_SHADER_VERTEX].cb[i].buffer); resource_read(ctx, ctx->constbuf[PIPE_SHADER_FRAGMENT].cb[i].buffer); } /* Mark VBOs as being read */ for (i = 0; i < ctx->vtx.vertexbuf.count; i++) { assert(!ctx->vtx.vertexbuf.vb[i].user_buffer); resource_read(ctx, ctx->vtx.vertexbuf.vb[i].buffer); } /* Mark index buffer as being read */ resource_read(ctx, ctx->indexbuf.buffer); /* Mark textures as being read */ for (i = 0; i < ctx->verttex.num_textures; i++) if (ctx->verttex.textures[i]) resource_read(ctx, ctx->verttex.textures[i]->texture); for (i = 0; i < ctx->fragtex.num_textures; i++) if (ctx->fragtex.textures[i]) resource_read(ctx, ctx->fragtex.textures[i]->texture); /* Mark streamout buffers as being written.. */ for (i = 0; i < ctx->streamout.num_targets; i++) if (ctx->streamout.targets[i]) resource_written(ctx, ctx->streamout.targets[i]->buffer); ctx->num_draws++; prims = u_reduced_prims_for_vertices(info->mode, info->count); ctx->stats.draw_calls++; /* TODO prims_emitted should be clipped when the stream-out buffer is * not large enough. See max_tf_vtx().. probably need to move that * into common code. Although a bit more annoying since a2xx doesn't * use ir3 so no common way to get at the pipe_stream_output_info * which is needed for this calculation. */ if (ctx->streamout.num_targets > 0) ctx->stats.prims_emitted += prims; ctx->stats.prims_generated += prims; /* any buffers that haven't been cleared yet, we need to restore: */ ctx->restore |= buffers & (FD_BUFFER_ALL & ~ctx->cleared); /* and any buffers used, need to be resolved: */ ctx->resolve |= buffers; DBG("%x num_draws=%u (%s/%s)", buffers, ctx->num_draws, util_format_short_name(pipe_surface_format(pfb->cbufs[0])), util_format_short_name(pipe_surface_format(pfb->zsbuf))); fd_hw_query_set_stage(ctx, ctx->ring, FD_STAGE_DRAW); ctx->draw_vbo(ctx, info); for (i = 0; i < ctx->streamout.num_targets; i++) ctx->streamout.offsets[i] += info->count; if (fd_mesa_debug & FD_DBG_DDRAW) ctx->dirty = 0xffffffff; /* if an app (or, well, piglit test) does many thousands of draws * without flush (or anything which implicitly flushes, like * changing render targets), we can exceed the ringbuffer size. * Since we don't currently have a sane way to wrapparound, and * we use the same buffer for both draw and tiling commands, for * now we need to do this hack and trigger flush if we are running * low on remaining space for cmds: */ if (((ctx->ring->cur - ctx->ring->start) > (ctx->ring->size/4 - FD_TILING_COMMANDS_DWORDS)) || (fd_mesa_debug & FD_DBG_FLUSH)) fd_context_render(pctx); }
void fd3_program_emit(struct fd_ringbuffer *ring, struct fd3_emit *emit, int nr, struct pipe_surface **bufs) { const struct ir3_shader_variant *vp, *fp; const struct ir3_info *vsi, *fsi; enum a3xx_instrbuffermode fpbuffer, vpbuffer; uint32_t fpbuffersz, vpbuffersz, fsoff; uint32_t pos_regid, posz_regid, psize_regid, color_regid[4] = {0}; int constmode; int i, j, k; debug_assert(nr <= ARRAY_SIZE(color_regid)); vp = fd3_emit_get_vp(emit); fp = fd3_emit_get_fp(emit); vsi = &vp->info; fsi = &fp->info; fpbuffer = BUFFER; vpbuffer = BUFFER; fpbuffersz = fp->instrlen; vpbuffersz = vp->instrlen; /* * Decide whether to use BUFFER or CACHE mode for VS and FS. It * appears like 256 is the hard limit, but when the combined size * exceeds 128 then blob will try to keep FS in BUFFER mode and * switch to CACHE for VS until VS is too large. The blob seems * to switch FS out of BUFFER mode at slightly under 128. But * a bit fuzzy on the decision tree, so use slightly conservative * limits. * * TODO check if these thresholds for BUFFER vs CACHE mode are the * same for all a3xx or whether we need to consider the gpuid */ if ((fpbuffersz + vpbuffersz) > 128) { if (fpbuffersz < 112) { /* FP:BUFFER VP:CACHE */ vpbuffer = CACHE; vpbuffersz = 256 - fpbuffersz; } else if (vpbuffersz < 112) { /* FP:CACHE VP:BUFFER */ fpbuffer = CACHE; fpbuffersz = 256 - vpbuffersz; } else { /* FP:CACHE VP:CACHE */ vpbuffer = fpbuffer = CACHE; vpbuffersz = fpbuffersz = 192; } } if (fpbuffer == BUFFER) { fsoff = 128 - fpbuffersz; } else { fsoff = 256 - fpbuffersz; } /* seems like vs->constlen + fs->constlen > 256, then CONSTMODE=1 */ constmode = ((vp->constlen + fp->constlen) > 256) ? 1 : 0; pos_regid = ir3_find_output_regid(vp, VARYING_SLOT_POS); posz_regid = ir3_find_output_regid(fp, FRAG_RESULT_DEPTH); psize_regid = ir3_find_output_regid(vp, VARYING_SLOT_PSIZ); if (fp->color0_mrt) { color_regid[0] = color_regid[1] = color_regid[2] = color_regid[3] = ir3_find_output_regid(fp, FRAG_RESULT_COLOR); } else { color_regid[0] = ir3_find_output_regid(fp, FRAG_RESULT_DATA0); color_regid[1] = ir3_find_output_regid(fp, FRAG_RESULT_DATA1); color_regid[2] = ir3_find_output_regid(fp, FRAG_RESULT_DATA2); color_regid[3] = ir3_find_output_regid(fp, FRAG_RESULT_DATA3); } /* adjust regids for alpha output formats. there is no alpha render * format, so it's just treated like red */ for (i = 0; i < nr; i++) if (util_format_is_alpha(pipe_surface_format(bufs[i]))) color_regid[i] += 3; /* we could probably divide this up into things that need to be * emitted if frag-prog is dirty vs if vert-prog is dirty.. */ OUT_PKT0(ring, REG_A3XX_HLSQ_CONTROL_0_REG, 6); OUT_RING(ring, A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(FOUR_QUADS) | A3XX_HLSQ_CONTROL_0_REG_CONSTMODE(constmode) | /* NOTE: I guess SHADERRESTART and CONSTFULLUPDATE maybe * flush some caches? I think we only need to set those * bits if we have updated const or shader.. */ A3XX_HLSQ_CONTROL_0_REG_SPSHADERRESTART | A3XX_HLSQ_CONTROL_0_REG_SPCONSTFULLUPDATE); OUT_RING(ring, A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE(TWO_QUADS) | A3XX_HLSQ_CONTROL_1_REG_VSSUPERTHREADENABLE | COND(fp->frag_coord, A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID(regid(0,0)) | A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID(regid(0,2)))); OUT_RING(ring, A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD(31)); OUT_RING(ring, A3XX_HLSQ_CONTROL_3_REG_REGID(fp->pos_regid)); OUT_RING(ring, A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH(vp->constlen) | A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET(0) | A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH(vpbuffersz)); OUT_RING(ring, A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH(fp->constlen) | A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET(128) | A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH(fpbuffersz)); OUT_PKT0(ring, REG_A3XX_SP_SP_CTRL_REG, 1); OUT_RING(ring, A3XX_SP_SP_CTRL_REG_CONSTMODE(constmode) | COND(emit->key.binning_pass, A3XX_SP_SP_CTRL_REG_BINNING) | A3XX_SP_SP_CTRL_REG_SLEEPMODE(1) | A3XX_SP_SP_CTRL_REG_L0MODE(0)); OUT_PKT0(ring, REG_A3XX_SP_VS_LENGTH_REG, 1); OUT_RING(ring, A3XX_SP_VS_LENGTH_REG_SHADERLENGTH(vp->instrlen)); OUT_PKT0(ring, REG_A3XX_SP_VS_CTRL_REG0, 3); OUT_RING(ring, A3XX_SP_VS_CTRL_REG0_THREADMODE(MULTI) | A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE(vpbuffer) | COND(vpbuffer == CACHE, A3XX_SP_VS_CTRL_REG0_CACHEINVALID) | A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(vsi->max_half_reg + 1) | A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(vsi->max_reg + 1) | A3XX_SP_VS_CTRL_REG0_THREADSIZE(TWO_QUADS) | A3XX_SP_VS_CTRL_REG0_SUPERTHREADMODE | A3XX_SP_VS_CTRL_REG0_LENGTH(vpbuffersz)); OUT_RING(ring, A3XX_SP_VS_CTRL_REG1_CONSTLENGTH(vp->constlen) | A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING(vp->total_in) | A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT(MAX2(vp->constlen + 1, 0))); OUT_RING(ring, A3XX_SP_VS_PARAM_REG_POSREGID(pos_regid) | A3XX_SP_VS_PARAM_REG_PSIZEREGID(psize_regid) | A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR(fp->varying_in)); for (i = 0, j = -1; (i < 8) && (j < (int)fp->inputs_count); i++) { uint32_t reg = 0; OUT_PKT0(ring, REG_A3XX_SP_VS_OUT_REG(i), 1); j = ir3_next_varying(fp, j); if (j < fp->inputs_count) { k = ir3_find_output(vp, fp->inputs[j].slot); reg |= A3XX_SP_VS_OUT_REG_A_REGID(vp->outputs[k].regid); reg |= A3XX_SP_VS_OUT_REG_A_COMPMASK(fp->inputs[j].compmask); } j = ir3_next_varying(fp, j); if (j < fp->inputs_count) { k = ir3_find_output(vp, fp->inputs[j].slot); reg |= A3XX_SP_VS_OUT_REG_B_REGID(vp->outputs[k].regid); reg |= A3XX_SP_VS_OUT_REG_B_COMPMASK(fp->inputs[j].compmask); } OUT_RING(ring, reg); } for (i = 0, j = -1; (i < 4) && (j < (int)fp->inputs_count); i++) { uint32_t reg = 0; OUT_PKT0(ring, REG_A3XX_SP_VS_VPC_DST_REG(i), 1); j = ir3_next_varying(fp, j); if (j < fp->inputs_count) reg |= A3XX_SP_VS_VPC_DST_REG_OUTLOC0(fp->inputs[j].inloc); j = ir3_next_varying(fp, j); if (j < fp->inputs_count) reg |= A3XX_SP_VS_VPC_DST_REG_OUTLOC1(fp->inputs[j].inloc); j = ir3_next_varying(fp, j); if (j < fp->inputs_count) reg |= A3XX_SP_VS_VPC_DST_REG_OUTLOC2(fp->inputs[j].inloc); j = ir3_next_varying(fp, j); if (j < fp->inputs_count) reg |= A3XX_SP_VS_VPC_DST_REG_OUTLOC3(fp->inputs[j].inloc); OUT_RING(ring, reg); } OUT_PKT0(ring, REG_A3XX_SP_VS_OBJ_OFFSET_REG, 2); OUT_RING(ring, A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(0) | A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET(0)); OUT_RELOC(ring, vp->bo, 0, 0, 0); /* SP_VS_OBJ_START_REG */ if (emit->key.binning_pass) { OUT_PKT0(ring, REG_A3XX_SP_FS_LENGTH_REG, 1); OUT_RING(ring, 0x00000000); OUT_PKT0(ring, REG_A3XX_SP_FS_CTRL_REG0, 2); OUT_RING(ring, A3XX_SP_FS_CTRL_REG0_THREADMODE(MULTI) | A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE(BUFFER)); OUT_RING(ring, 0x00000000); OUT_PKT0(ring, REG_A3XX_SP_FS_OBJ_OFFSET_REG, 1); OUT_RING(ring, A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(128) | A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(0)); } else { OUT_PKT0(ring, REG_A3XX_SP_FS_LENGTH_REG, 1); OUT_RING(ring, A3XX_SP_FS_LENGTH_REG_SHADERLENGTH(fp->instrlen)); OUT_PKT0(ring, REG_A3XX_SP_FS_CTRL_REG0, 2); OUT_RING(ring, A3XX_SP_FS_CTRL_REG0_THREADMODE(MULTI) | A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE(fpbuffer) | COND(fpbuffer == CACHE, A3XX_SP_FS_CTRL_REG0_CACHEINVALID) | A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(fsi->max_half_reg + 1) | A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(fsi->max_reg + 1) | A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP | A3XX_SP_FS_CTRL_REG0_THREADSIZE(FOUR_QUADS) | A3XX_SP_FS_CTRL_REG0_SUPERTHREADMODE | COND(fp->has_samp > 0, A3XX_SP_FS_CTRL_REG0_PIXLODENABLE) | A3XX_SP_FS_CTRL_REG0_LENGTH(fpbuffersz)); OUT_RING(ring, A3XX_SP_FS_CTRL_REG1_CONSTLENGTH(fp->constlen) | A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING(fp->total_in) | A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT(MAX2(fp->constlen + 1, 0)) | A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET(63)); OUT_PKT0(ring, REG_A3XX_SP_FS_OBJ_OFFSET_REG, 2); OUT_RING(ring, A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET( MAX2(128, vp->constlen)) | A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(fsoff)); OUT_RELOC(ring, fp->bo, 0, 0, 0); /* SP_FS_OBJ_START_REG */ } OUT_PKT0(ring, REG_A3XX_SP_FS_OUTPUT_REG, 1); OUT_RING(ring, COND(fp->writes_pos, A3XX_SP_FS_OUTPUT_REG_DEPTH_ENABLE) | A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID(posz_regid) | A3XX_SP_FS_OUTPUT_REG_MRT(MAX2(1, nr) - 1)); OUT_PKT0(ring, REG_A3XX_SP_FS_MRT_REG(0), 4); for (i = 0; i < 4; i++) { uint32_t mrt_reg = A3XX_SP_FS_MRT_REG_REGID(color_regid[i]) | COND(fp->key.half_precision, A3XX_SP_FS_MRT_REG_HALF_PRECISION); if (i < nr) { enum pipe_format fmt = pipe_surface_format(bufs[i]); mrt_reg |= COND(util_format_is_pure_uint(fmt), A3XX_SP_FS_MRT_REG_UINT) | COND(util_format_is_pure_sint(fmt), A3XX_SP_FS_MRT_REG_SINT); } OUT_RING(ring, mrt_reg); } if (emit->key.binning_pass) { OUT_PKT0(ring, REG_A3XX_VPC_ATTR, 2); OUT_RING(ring, A3XX_VPC_ATTR_THRDASSIGN(1) | A3XX_VPC_ATTR_LMSIZE(1) | COND(vp->writes_psize, A3XX_VPC_ATTR_PSIZE)); OUT_RING(ring, 0x00000000); } else { uint32_t vinterp[4], flatshade[2], vpsrepl[4]; memset(vinterp, 0, sizeof(vinterp)); memset(flatshade, 0, sizeof(flatshade)); memset(vpsrepl, 0, sizeof(vpsrepl)); /* figure out VARYING_INTERP / FLAT_SHAD register values: */ for (j = -1; (j = ir3_next_varying(fp, j)) < (int)fp->inputs_count; ) { /* NOTE: varyings are packed, so if compmask is 0xb * then first, third, and fourth component occupy * three consecutive varying slots: */ unsigned compmask = fp->inputs[j].compmask; /* TODO might be cleaner to just +8 in SP_VS_VPC_DST_REG * instead.. rather than -8 everywhere else.. */ uint32_t inloc = fp->inputs[j].inloc - 8; if ((fp->inputs[j].interpolate == INTERP_QUALIFIER_FLAT) || (fp->inputs[j].rasterflat && emit->rasterflat)) { uint32_t loc = inloc; for (i = 0; i < 4; i++) { if (compmask & (1 << i)) { vinterp[loc / 16] |= FLAT << ((loc % 16) * 2); flatshade[loc / 32] |= 1 << (loc % 32); loc++; } } } gl_varying_slot slot = fp->inputs[j].slot; /* since we don't enable PIPE_CAP_TGSI_TEXCOORD: */ if (slot >= VARYING_SLOT_VAR0) { unsigned texmask = 1 << (slot - VARYING_SLOT_VAR0); /* Replace the .xy coordinates with S/T from the point sprite. Set * interpolation bits for .zw such that they become .01 */ if (emit->sprite_coord_enable & texmask) { /* mask is two 2-bit fields, where: * '01' -> S * '10' -> T * '11' -> 1 - T (flip mode) */ unsigned mask = emit->sprite_coord_mode ? 0b1101 : 0b1001; uint32_t loc = inloc; if (compmask & 0x1) { vpsrepl[loc / 16] |= ((mask >> 0) & 0x3) << ((loc % 16) * 2); loc++; } if (compmask & 0x2) { vpsrepl[loc / 16] |= ((mask >> 2) & 0x3) << ((loc % 16) * 2); loc++; } if (compmask & 0x4) { /* .z <- 0.0f */ vinterp[loc / 16] |= 0b10 << ((loc % 16) * 2); loc++; } if (compmask & 0x8) { /* .w <- 1.0f */ vinterp[loc / 16] |= 0b11 << ((loc % 16) * 2); loc++; } }
void fd3_program_emit(struct fd_ringbuffer *ring, struct fd3_emit *emit, int nr, struct pipe_surface **bufs) { const struct ir3_shader_variant *vp, *fp; const struct ir3_info *vsi, *fsi; enum a3xx_instrbuffermode fpbuffer, vpbuffer; uint32_t fpbuffersz, vpbuffersz, fsoff; uint32_t pos_regid, posz_regid, psize_regid, color_regid[4] = {0}; int constmode; int i, j, k; debug_assert(nr <= ARRAY_SIZE(color_regid)); vp = fd3_emit_get_vp(emit); if (emit->key.binning_pass) { /* use dummy stateobj to simplify binning vs non-binning: */ static const struct ir3_shader_variant binning_fp = {}; fp = &binning_fp; } else { fp = fd3_emit_get_fp(emit); } vsi = &vp->info; fsi = &fp->info; fpbuffer = BUFFER; vpbuffer = BUFFER; fpbuffersz = fp->instrlen; vpbuffersz = vp->instrlen; /* * Decide whether to use BUFFER or CACHE mode for VS and FS. It * appears like 256 is the hard limit, but when the combined size * exceeds 128 then blob will try to keep FS in BUFFER mode and * switch to CACHE for VS until VS is too large. The blob seems * to switch FS out of BUFFER mode at slightly under 128. But * a bit fuzzy on the decision tree, so use slightly conservative * limits. * * TODO check if these thresholds for BUFFER vs CACHE mode are the * same for all a3xx or whether we need to consider the gpuid */ if ((fpbuffersz + vpbuffersz) > 128) { if (fpbuffersz < 112) { /* FP:BUFFER VP:CACHE */ vpbuffer = CACHE; vpbuffersz = 256 - fpbuffersz; } else if (vpbuffersz < 112) { /* FP:CACHE VP:BUFFER */ fpbuffer = CACHE; fpbuffersz = 256 - vpbuffersz; } else { /* FP:CACHE VP:CACHE */ vpbuffer = fpbuffer = CACHE; vpbuffersz = fpbuffersz = 192; } } if (fpbuffer == BUFFER) { fsoff = 128 - fpbuffersz; } else { fsoff = 256 - fpbuffersz; } /* seems like vs->constlen + fs->constlen > 256, then CONSTMODE=1 */ constmode = ((vp->constlen + fp->constlen) > 256) ? 1 : 0; pos_regid = ir3_find_output_regid(vp, ir3_semantic_name(TGSI_SEMANTIC_POSITION, 0)); posz_regid = ir3_find_output_regid(fp, ir3_semantic_name(TGSI_SEMANTIC_POSITION, 0)); psize_regid = ir3_find_output_regid(vp, ir3_semantic_name(TGSI_SEMANTIC_PSIZE, 0)); if (fp->color0_mrt) { color_regid[0] = color_regid[1] = color_regid[2] = color_regid[3] = ir3_find_output_regid(fp, ir3_semantic_name(TGSI_SEMANTIC_COLOR, 0)); } else { for (i = 0; i < fp->outputs_count; i++) { ir3_semantic sem = fp->outputs[i].semantic; unsigned idx = sem2idx(sem); if (sem2name(sem) != TGSI_SEMANTIC_COLOR) continue; debug_assert(idx < ARRAY_SIZE(color_regid)); color_regid[idx] = fp->outputs[i].regid; } } /* adjust regids for alpha output formats. there is no alpha render * format, so it's just treated like red */ for (i = 0; i < nr; i++) if (util_format_is_alpha(pipe_surface_format(bufs[i]))) color_regid[i] += 3; /* we could probably divide this up into things that need to be * emitted if frag-prog is dirty vs if vert-prog is dirty.. */ OUT_PKT0(ring, REG_A3XX_HLSQ_CONTROL_0_REG, 6); OUT_RING(ring, A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(FOUR_QUADS) | A3XX_HLSQ_CONTROL_0_REG_CONSTMODE(constmode) | /* NOTE: I guess SHADERRESTART and CONSTFULLUPDATE maybe * flush some caches? I think we only need to set those * bits if we have updated const or shader.. */ A3XX_HLSQ_CONTROL_0_REG_SPSHADERRESTART | A3XX_HLSQ_CONTROL_0_REG_SPCONSTFULLUPDATE); OUT_RING(ring, A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE(TWO_QUADS) | A3XX_HLSQ_CONTROL_1_REG_VSSUPERTHREADENABLE | COND(fp->frag_coord, A3XX_HLSQ_CONTROL_1_REG_ZWCOORD)); OUT_RING(ring, A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD(31)); OUT_RING(ring, A3XX_HLSQ_CONTROL_3_REG_REGID(fp->pos_regid)); OUT_RING(ring, A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH(vp->constlen) | A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET(0) | A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH(vpbuffersz)); OUT_RING(ring, A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH(fp->constlen) | A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET(128) | A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH(fpbuffersz)); OUT_PKT0(ring, REG_A3XX_SP_SP_CTRL_REG, 1); OUT_RING(ring, A3XX_SP_SP_CTRL_REG_CONSTMODE(constmode) | COND(emit->key.binning_pass, A3XX_SP_SP_CTRL_REG_BINNING) | A3XX_SP_SP_CTRL_REG_SLEEPMODE(1) | A3XX_SP_SP_CTRL_REG_L0MODE(0)); OUT_PKT0(ring, REG_A3XX_SP_VS_LENGTH_REG, 1); OUT_RING(ring, A3XX_SP_VS_LENGTH_REG_SHADERLENGTH(vp->instrlen)); OUT_PKT0(ring, REG_A3XX_SP_VS_CTRL_REG0, 3); OUT_RING(ring, A3XX_SP_VS_CTRL_REG0_THREADMODE(MULTI) | A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE(vpbuffer) | COND(vpbuffer == CACHE, A3XX_SP_VS_CTRL_REG0_CACHEINVALID) | A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(vsi->max_half_reg + 1) | A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(vsi->max_reg + 1) | A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP(0) | A3XX_SP_VS_CTRL_REG0_THREADSIZE(TWO_QUADS) | A3XX_SP_VS_CTRL_REG0_SUPERTHREADMODE | COND(vp->has_samp, A3XX_SP_VS_CTRL_REG0_PIXLODENABLE) | A3XX_SP_VS_CTRL_REG0_LENGTH(vpbuffersz)); OUT_RING(ring, A3XX_SP_VS_CTRL_REG1_CONSTLENGTH(vp->constlen) | A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING(vp->total_in) | A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT(MAX2(vp->constlen + 1, 0))); OUT_RING(ring, A3XX_SP_VS_PARAM_REG_POSREGID(pos_regid) | A3XX_SP_VS_PARAM_REG_PSIZEREGID(psize_regid) | A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR(align(fp->total_in, 4) / 4)); for (i = 0, j = -1; (i < 8) && (j < (int)fp->inputs_count); i++) { uint32_t reg = 0; OUT_PKT0(ring, REG_A3XX_SP_VS_OUT_REG(i), 1); j = ir3_next_varying(fp, j); if (j < fp->inputs_count) { k = ir3_find_output(vp, fp->inputs[j].semantic); reg |= A3XX_SP_VS_OUT_REG_A_REGID(vp->outputs[k].regid); reg |= A3XX_SP_VS_OUT_REG_A_COMPMASK(fp->inputs[j].compmask); } j = ir3_next_varying(fp, j); if (j < fp->inputs_count) { k = ir3_find_output(vp, fp->inputs[j].semantic); reg |= A3XX_SP_VS_OUT_REG_B_REGID(vp->outputs[k].regid); reg |= A3XX_SP_VS_OUT_REG_B_COMPMASK(fp->inputs[j].compmask); } OUT_RING(ring, reg); } for (i = 0, j = -1; (i < 4) && (j < (int)fp->inputs_count); i++) { uint32_t reg = 0; OUT_PKT0(ring, REG_A3XX_SP_VS_VPC_DST_REG(i), 1); j = ir3_next_varying(fp, j); if (j < fp->inputs_count) reg |= A3XX_SP_VS_VPC_DST_REG_OUTLOC0(fp->inputs[j].inloc); j = ir3_next_varying(fp, j); if (j < fp->inputs_count) reg |= A3XX_SP_VS_VPC_DST_REG_OUTLOC1(fp->inputs[j].inloc); j = ir3_next_varying(fp, j); if (j < fp->inputs_count) reg |= A3XX_SP_VS_VPC_DST_REG_OUTLOC2(fp->inputs[j].inloc); j = ir3_next_varying(fp, j); if (j < fp->inputs_count) reg |= A3XX_SP_VS_VPC_DST_REG_OUTLOC3(fp->inputs[j].inloc); OUT_RING(ring, reg); } OUT_PKT0(ring, REG_A3XX_SP_VS_OBJ_OFFSET_REG, 2); OUT_RING(ring, A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(0) | A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET(0)); OUT_RELOC(ring, vp->bo, 0, 0, 0); /* SP_VS_OBJ_START_REG */ if (emit->key.binning_pass) { OUT_PKT0(ring, REG_A3XX_SP_FS_LENGTH_REG, 1); OUT_RING(ring, 0x00000000); OUT_PKT0(ring, REG_A3XX_SP_FS_CTRL_REG0, 2); OUT_RING(ring, A3XX_SP_FS_CTRL_REG0_THREADMODE(MULTI) | A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE(BUFFER)); OUT_RING(ring, 0x00000000); OUT_PKT0(ring, REG_A3XX_SP_FS_OBJ_OFFSET_REG, 1); OUT_RING(ring, A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(128) | A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(0)); } else { OUT_PKT0(ring, REG_A3XX_SP_FS_LENGTH_REG, 1); OUT_RING(ring, A3XX_SP_FS_LENGTH_REG_SHADERLENGTH(fp->instrlen)); OUT_PKT0(ring, REG_A3XX_SP_FS_CTRL_REG0, 2); OUT_RING(ring, A3XX_SP_FS_CTRL_REG0_THREADMODE(MULTI) | A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE(fpbuffer) | COND(fpbuffer == CACHE, A3XX_SP_FS_CTRL_REG0_CACHEINVALID) | A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(fsi->max_half_reg + 1) | A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(fsi->max_reg + 1) | A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP(1) | A3XX_SP_FS_CTRL_REG0_THREADSIZE(FOUR_QUADS) | A3XX_SP_FS_CTRL_REG0_SUPERTHREADMODE | COND(fp->has_samp > 0, A3XX_SP_FS_CTRL_REG0_PIXLODENABLE) | A3XX_SP_FS_CTRL_REG0_LENGTH(fpbuffersz)); OUT_RING(ring, A3XX_SP_FS_CTRL_REG1_CONSTLENGTH(fp->constlen) | A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING(fp->total_in) | A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT(MAX2(fp->constlen + 1, 0)) | A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET(63)); OUT_PKT0(ring, REG_A3XX_SP_FS_OBJ_OFFSET_REG, 2); OUT_RING(ring, A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET( MAX2(128, vp->constlen)) | A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(fsoff)); OUT_RELOC(ring, fp->bo, 0, 0, 0); /* SP_FS_OBJ_START_REG */ } OUT_PKT0(ring, REG_A3XX_SP_FS_OUTPUT_REG, 1); OUT_RING(ring, COND(fp->writes_pos, A3XX_SP_FS_OUTPUT_REG_DEPTH_ENABLE) | A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID(posz_regid) | A3XX_SP_FS_OUTPUT_REG_MRT(MAX2(1, nr) - 1)); OUT_PKT0(ring, REG_A3XX_SP_FS_MRT_REG(0), 4); for (i = 0; i < 4; i++) { uint32_t mrt_reg = A3XX_SP_FS_MRT_REG_REGID(color_regid[i]) | COND(fp->key.half_precision, A3XX_SP_FS_MRT_REG_HALF_PRECISION); if (i < nr) { enum pipe_format fmt = pipe_surface_format(bufs[i]); mrt_reg |= COND(util_format_is_pure_uint(fmt), A3XX_SP_FS_MRT_REG_UINT) | COND(util_format_is_pure_sint(fmt), A3XX_SP_FS_MRT_REG_SINT); } OUT_RING(ring, mrt_reg); } if (emit->key.binning_pass) { OUT_PKT0(ring, REG_A3XX_VPC_ATTR, 2); OUT_RING(ring, A3XX_VPC_ATTR_THRDASSIGN(1) | A3XX_VPC_ATTR_LMSIZE(1) | COND(vp->writes_psize, A3XX_VPC_ATTR_PSIZE)); OUT_RING(ring, 0x00000000); } else { uint32_t vinterp[4], flatshade[2], vpsrepl[4]; memset(vinterp, 0, sizeof(vinterp)); memset(flatshade, 0, sizeof(flatshade)); memset(vpsrepl, 0, sizeof(vpsrepl)); /* figure out VARYING_INTERP / FLAT_SHAD register values: */ for (j = -1; (j = ir3_next_varying(fp, j)) < (int)fp->inputs_count; ) { uint32_t interp = fp->inputs[j].interpolate; /* TODO might be cleaner to just +8 in SP_VS_VPC_DST_REG * instead.. rather than -8 everywhere else.. */ uint32_t inloc = fp->inputs[j].inloc - 8; /* currently assuming varyings aligned to 4 (not * packed): */ debug_assert((inloc % 4) == 0); if ((interp == TGSI_INTERPOLATE_CONSTANT) || ((interp == TGSI_INTERPOLATE_COLOR) && emit->rasterflat)) { uint32_t loc = inloc; for (i = 0; i < 4; i++, loc++) { vinterp[loc / 16] |= FLAT << ((loc % 16) * 2); flatshade[loc / 32] |= 1 << (loc % 32); } } /* Replace the .xy coordinates with S/T from the point sprite. Set * interpolation bits for .zw such that they become .01 */ if (emit->sprite_coord_enable & (1 << sem2idx(fp->inputs[j].semantic))) { vpsrepl[inloc / 16] |= (emit->sprite_coord_mode ? 0x0d : 0x09) << ((inloc % 16) * 2); vinterp[(inloc + 2) / 16] |= 2 << (((inloc + 2) % 16) * 2); vinterp[(inloc + 3) / 16] |= 3 << (((inloc + 3) % 16) * 2); } } OUT_PKT0(ring, REG_A3XX_VPC_ATTR, 2); OUT_RING(ring, A3XX_VPC_ATTR_TOTALATTR(fp->total_in) | A3XX_VPC_ATTR_THRDASSIGN(1) | A3XX_VPC_ATTR_LMSIZE(1) | COND(vp->writes_psize, A3XX_VPC_ATTR_PSIZE)); OUT_RING(ring, A3XX_VPC_PACK_NUMFPNONPOSVAR(fp->total_in) | A3XX_VPC_PACK_NUMNONPOSVSVAR(fp->total_in)); OUT_PKT0(ring, REG_A3XX_VPC_VARYING_INTERP_MODE(0), 4); OUT_RING(ring, vinterp[0]); /* VPC_VARYING_INTERP[0].MODE */ OUT_RING(ring, vinterp[1]); /* VPC_VARYING_INTERP[1].MODE */ OUT_RING(ring, vinterp[2]); /* VPC_VARYING_INTERP[2].MODE */ OUT_RING(ring, vinterp[3]); /* VPC_VARYING_INTERP[3].MODE */ OUT_PKT0(ring, REG_A3XX_VPC_VARYING_PS_REPL_MODE(0), 4); OUT_RING(ring, vpsrepl[0]); /* VPC_VARYING_PS_REPL[0].MODE */ OUT_RING(ring, vpsrepl[1]); /* VPC_VARYING_PS_REPL[1].MODE */ OUT_RING(ring, vpsrepl[2]); /* VPC_VARYING_PS_REPL[2].MODE */ OUT_RING(ring, vpsrepl[3]); /* VPC_VARYING_PS_REPL[3].MODE */ OUT_PKT0(ring, REG_A3XX_SP_FS_FLAT_SHAD_MODE_REG_0, 2); OUT_RING(ring, flatshade[0]); /* SP_FS_FLAT_SHAD_MODE_REG_0 */ OUT_RING(ring, flatshade[1]); /* SP_FS_FLAT_SHAD_MODE_REG_1 */ } if (vpbuffer == BUFFER) emit_shader(ring, vp); OUT_PKT0(ring, REG_A3XX_VFD_PERFCOUNTER0_SELECT, 1); OUT_RING(ring, 0x00000000); /* VFD_PERFCOUNTER0_SELECT */ if (!emit->key.binning_pass) { if (fpbuffer == BUFFER) emit_shader(ring, fp); OUT_PKT0(ring, REG_A3XX_VFD_PERFCOUNTER0_SELECT, 1); OUT_RING(ring, 0x00000000); /* VFD_PERFCOUNTER0_SELECT */ } }
static void fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info) { struct fd_context *ctx = fd_context(pctx); struct pipe_framebuffer_state *pfb = &ctx->framebuffer; struct pipe_scissor_state *scissor = fd_context_get_scissor(ctx); unsigned i, buffers = 0; /* if we supported transform feedback, we'd have to disable this: */ if (((scissor->maxx - scissor->minx) * (scissor->maxy - scissor->miny)) == 0) { return; } /* emulate unsupported primitives: */ if (!fd_supported_prim(ctx, info->mode)) { util_primconvert_save_index_buffer(ctx->primconvert, &ctx->indexbuf); util_primconvert_save_rasterizer_state(ctx->primconvert, ctx->rasterizer); util_primconvert_draw_vbo(ctx->primconvert, info); return; } ctx->needs_flush = true; /* * Figure out the buffers/features we need: */ if (fd_depth_enabled(ctx)) { buffers |= FD_BUFFER_DEPTH; fd_resource(pfb->zsbuf->texture)->dirty = true; ctx->gmem_reason |= FD_GMEM_DEPTH_ENABLED; } if (fd_stencil_enabled(ctx)) { buffers |= FD_BUFFER_STENCIL; fd_resource(pfb->zsbuf->texture)->dirty = true; ctx->gmem_reason |= FD_GMEM_STENCIL_ENABLED; } if (fd_logicop_enabled(ctx)) ctx->gmem_reason |= FD_GMEM_LOGICOP_ENABLED; for (i = 0; i < pfb->nr_cbufs; i++) { struct pipe_resource *surf; if (!pfb->cbufs[i]) continue; surf = pfb->cbufs[i]->texture; fd_resource(surf)->dirty = true; buffers |= FD_BUFFER_COLOR; if (surf->nr_samples > 1) ctx->gmem_reason |= FD_GMEM_MSAA_ENABLED; if (fd_blend_enabled(ctx, i)) ctx->gmem_reason |= FD_GMEM_BLEND_ENABLED; } ctx->num_draws++; ctx->stats.draw_calls++; ctx->stats.prims_emitted += u_reduced_prims_for_vertices(info->mode, info->count); /* any buffers that haven't been cleared yet, we need to restore: */ ctx->restore |= buffers & (FD_BUFFER_ALL & ~ctx->cleared); /* and any buffers used, need to be resolved: */ ctx->resolve |= buffers; DBG("%x num_draws=%u (%s/%s)", buffers, ctx->num_draws, util_format_short_name(pipe_surface_format(pfb->cbufs[0])), util_format_short_name(pipe_surface_format(pfb->zsbuf))); fd_hw_query_set_stage(ctx, ctx->ring, FD_STAGE_DRAW); ctx->draw_vbo(ctx, info); /* if an app (or, well, piglit test) does many thousands of draws * without flush (or anything which implicitly flushes, like * changing render targets), we can exceed the ringbuffer size. * Since we don't currently have a sane way to wrapparound, and * we use the same buffer for both draw and tiling commands, for * now we need to do this hack and trigger flush if we are running * low on remaining space for cmds: */ if (((ctx->ring->cur - ctx->ring->start) > (ctx->ring->size/4 - FD_TILING_COMMANDS_DWORDS)) || (fd_mesa_debug & FD_DBG_FLUSH)) fd_context_render(pctx); }