static void check_attrib_edgeflag(struct st_context *st) { const struct gl_client_array **arrays = st->ctx->Array._DrawArrays; GLboolean vertdata_edgeflags, edgeflag_culls_prims, edgeflags_enabled; struct gl_vertex_program *vp = st->ctx->VertexProgram._Current; if (!arrays) return; edgeflags_enabled = st->ctx->Polygon.FrontMode != GL_FILL || st->ctx->Polygon.BackMode != GL_FILL; vertdata_edgeflags = edgeflags_enabled && arrays[VERT_ATTRIB_EDGEFLAG]->StrideB != 0; if (vertdata_edgeflags != st->vertdata_edgeflags) { st->vertdata_edgeflags = vertdata_edgeflags; if (vp) st->dirty |= ST_NEW_VERTEX_PROGRAM(st, st_vertex_program(vp)); } edgeflag_culls_prims = edgeflags_enabled && !vertdata_edgeflags && !st->ctx->Current.Attrib[VERT_ATTRIB_EDGEFLAG][0]; if (edgeflag_culls_prims != st->edgeflag_culls_prims) { st->edgeflag_culls_prims = edgeflag_culls_prims; st->dirty |= ST_NEW_RASTERIZER; } }
/* Too complex to figure out, just check every time: */ static void check_program_state( struct st_context *st ) { struct gl_context *ctx = st->ctx; struct st_vertex_program *old_vp = st->vp; struct st_tessctrl_program *old_tcp = st->tcp; struct st_tesseval_program *old_tep = st->tep; struct st_geometry_program *old_gp = st->gp; struct st_fragment_program *old_fp = st->fp; struct gl_vertex_program *new_vp = ctx->VertexProgram._Current; struct gl_tess_ctrl_program *new_tcp = ctx->TessCtrlProgram._Current; struct gl_tess_eval_program *new_tep = ctx->TessEvalProgram._Current; struct gl_geometry_program *new_gp = ctx->GeometryProgram._Current; struct gl_fragment_program *new_fp = ctx->FragmentProgram._Current; uint64_t dirty = 0; /* Flag states used by both new and old shaders to unbind shader resources * properly when transitioning to shaders that don't use them. */ if (unlikely(new_vp != &old_vp->Base)) { if (old_vp) dirty |= old_vp->affected_states; if (new_vp) dirty |= ST_NEW_VERTEX_PROGRAM(st, st_vertex_program(new_vp)); } if (unlikely(new_tcp != &old_tcp->Base)) { if (old_tcp) dirty |= old_tcp->affected_states; if (new_tcp) dirty |= st_tessctrl_program(new_tcp)->affected_states; } if (unlikely(new_tep != &old_tep->Base)) { if (old_tep) dirty |= old_tep->affected_states; if (new_tep) dirty |= st_tesseval_program(new_tep)->affected_states; } if (unlikely(new_gp != &old_gp->Base)) { if (old_gp) dirty |= old_gp->affected_states; if (new_gp) dirty |= st_geometry_program(new_gp)->affected_states; } if (unlikely(new_fp != &old_fp->Base)) { if (old_fp) dirty |= old_fp->affected_states; if (new_fp) dirty |= st_fragment_program(new_fp)->affected_states; } st->dirty |= dirty; st->gfx_shaders_may_be_dirty = false; }
static void st_deserialise_ir_program(struct gl_context *ctx, struct gl_shader_program *shProg, struct gl_program *prog, bool nir) { struct st_context *st = st_context(ctx); size_t size = prog->driver_cache_blob_size; uint8_t *buffer = (uint8_t *) prog->driver_cache_blob; const struct nir_shader_compiler_options *options = ctx->Const.ShaderCompilerOptions[prog->info.stage].NirOptions; assert(prog->driver_cache_blob && prog->driver_cache_blob_size > 0); struct blob_reader blob_reader; blob_reader_init(&blob_reader, buffer, size); switch (prog->info.stage) { case MESA_SHADER_VERTEX: { struct st_vertex_program *stvp = (struct st_vertex_program *) prog; st_release_vp_variants(st, stvp); stvp->num_inputs = blob_read_uint32(&blob_reader); blob_copy_bytes(&blob_reader, (uint8_t *) stvp->index_to_input, sizeof(stvp->index_to_input)); blob_copy_bytes(&blob_reader, (uint8_t *) stvp->input_to_index, sizeof(stvp->input_to_index)); blob_copy_bytes(&blob_reader, (uint8_t *) stvp->result_to_output, sizeof(stvp->result_to_output)); read_stream_out_from_cache(&blob_reader, &stvp->tgsi); if (nir) { stvp->tgsi.type = PIPE_SHADER_IR_NIR; stvp->shader_program = shProg; stvp->tgsi.ir.nir = nir_deserialize(NULL, options, &blob_reader); prog->nir = stvp->tgsi.ir.nir; } else { read_tgsi_from_cache(&blob_reader, &stvp->tgsi.tokens, &stvp->num_tgsi_tokens); } if (st->vp == stvp) st->dirty |= ST_NEW_VERTEX_PROGRAM(st, stvp); break; } case MESA_SHADER_TESS_CTRL: { struct st_common_program *sttcp = st_common_program(prog); st_release_basic_variants(st, sttcp->Base.Target, &sttcp->variants, &sttcp->tgsi); read_stream_out_from_cache(&blob_reader, &sttcp->tgsi); if (nir) { sttcp->tgsi.type = PIPE_SHADER_IR_NIR; sttcp->shader_program = shProg; sttcp->tgsi.ir.nir = nir_deserialize(NULL, options, &blob_reader); prog->nir = sttcp->tgsi.ir.nir; } else { read_tgsi_from_cache(&blob_reader, &sttcp->tgsi.tokens, &sttcp->num_tgsi_tokens); } if (st->tcp == sttcp) st->dirty |= sttcp->affected_states; break; } case MESA_SHADER_TESS_EVAL: { struct st_common_program *sttep = st_common_program(prog); st_release_basic_variants(st, sttep->Base.Target, &sttep->variants, &sttep->tgsi); read_stream_out_from_cache(&blob_reader, &sttep->tgsi); if (nir) { sttep->tgsi.type = PIPE_SHADER_IR_NIR; sttep->shader_program = shProg; sttep->tgsi.ir.nir = nir_deserialize(NULL, options, &blob_reader); prog->nir = sttep->tgsi.ir.nir; } else { read_tgsi_from_cache(&blob_reader, &sttep->tgsi.tokens, &sttep->num_tgsi_tokens); } if (st->tep == sttep) st->dirty |= sttep->affected_states; break; } case MESA_SHADER_GEOMETRY: { struct st_common_program *stgp = st_common_program(prog); st_release_basic_variants(st, stgp->Base.Target, &stgp->variants, &stgp->tgsi); read_stream_out_from_cache(&blob_reader, &stgp->tgsi); if (nir) { stgp->tgsi.type = PIPE_SHADER_IR_NIR; stgp->shader_program = shProg; stgp->tgsi.ir.nir = nir_deserialize(NULL, options, &blob_reader); prog->nir = stgp->tgsi.ir.nir; } else { read_tgsi_from_cache(&blob_reader, &stgp->tgsi.tokens, &stgp->num_tgsi_tokens); } if (st->gp == stgp) st->dirty |= stgp->affected_states; break; } case MESA_SHADER_FRAGMENT: { struct st_fragment_program *stfp = (struct st_fragment_program *) prog; st_release_fp_variants(st, stfp); if (nir) { stfp->tgsi.type = PIPE_SHADER_IR_NIR; stfp->shader_program = shProg; stfp->tgsi.ir.nir = nir_deserialize(NULL, options, &blob_reader); prog->nir = stfp->tgsi.ir.nir; } else { read_tgsi_from_cache(&blob_reader, &stfp->tgsi.tokens, &stfp->num_tgsi_tokens); } if (st->fp == stfp) st->dirty |= stfp->affected_states; break; } case MESA_SHADER_COMPUTE: { struct st_compute_program *stcp = (struct st_compute_program *) prog; st_release_cp_variants(st, stcp); if (nir) { stcp->tgsi.ir_type = PIPE_SHADER_IR_NIR; stcp->shader_program = shProg; stcp->tgsi.prog = nir_deserialize(NULL, options, &blob_reader); prog->nir = (nir_shader *) stcp->tgsi.prog; } else { read_tgsi_from_cache(&blob_reader, (const struct tgsi_token**) &stcp->tgsi.prog, &stcp->num_tgsi_tokens); } stcp->tgsi.req_local_mem = stcp->Base.info.cs.shared_size; stcp->tgsi.req_private_mem = 0; stcp->tgsi.req_input_mem = 0; if (st->cp == stcp) st->dirty |= stcp->affected_states; break; } default: unreachable("Unsupported stage"); } /* Make sure we don't try to read more data than we wrote. This should * never happen in release builds but its useful to have this check to * catch development bugs. */ if (blob_reader.current != blob_reader.end || blob_reader.overrun) { assert(!"Invalid TGSI shader disk cache item!"); if (ctx->_Shader->Flags & GLSL_CACHE_INFO) { fprintf(stderr, "Error reading program from cache (invalid " "TGSI cache item)\n"); } } st_set_prog_affected_state_flags(prog); _mesa_associate_uniform_storage(ctx, shProg, prog, false); /* Create Gallium shaders now instead of on demand. */ if (ST_DEBUG & DEBUG_PRECOMPILE || st->shader_has_one_variant[prog->info.stage]) st_precompile_shader_variant(st, prog); }
/* Too complex to figure out, just check every time: */ static void check_program_state( struct st_context *st ) { struct gl_context *ctx = st->ctx; struct st_vertex_program *old_vp = st->vp; struct st_common_program *old_tcp = st->tcp; struct st_common_program *old_tep = st->tep; struct st_common_program *old_gp = st->gp; struct st_fragment_program *old_fp = st->fp; struct gl_program *new_vp = ctx->VertexProgram._Current; struct gl_program *new_tcp = ctx->TessCtrlProgram._Current; struct gl_program *new_tep = ctx->TessEvalProgram._Current; struct gl_program *new_gp = ctx->GeometryProgram._Current; struct gl_program *new_fp = ctx->FragmentProgram._Current; uint64_t dirty = 0; unsigned num_viewports = 1; /* Flag states used by both new and old shaders to unbind shader resources * properly when transitioning to shaders that don't use them. */ if (unlikely(new_vp != &old_vp->Base)) { if (old_vp) dirty |= old_vp->affected_states; if (new_vp) dirty |= ST_NEW_VERTEX_PROGRAM(st, st_vertex_program(new_vp)); } if (unlikely(new_tcp != &old_tcp->Base)) { if (old_tcp) dirty |= old_tcp->affected_states; if (new_tcp) dirty |= st_common_program(new_tcp)->affected_states; } if (unlikely(new_tep != &old_tep->Base)) { if (old_tep) dirty |= old_tep->affected_states; if (new_tep) dirty |= st_common_program(new_tep)->affected_states; } if (unlikely(new_gp != &old_gp->Base)) { if (old_gp) dirty |= old_gp->affected_states; if (new_gp) dirty |= st_common_program(new_gp)->affected_states; } if (unlikely(new_fp != &old_fp->Base)) { if (old_fp) dirty |= old_fp->affected_states; if (new_fp) dirty |= st_fragment_program(new_fp)->affected_states; } /* Find out the number of viewports. This determines how many scissors * and viewport states we need to update. */ struct gl_program *last_prim_shader = new_gp ? new_gp : new_tep ? new_tep : new_vp; if (last_prim_shader && last_prim_shader->info.outputs_written & VARYING_BIT_VIEWPORT) num_viewports = ctx->Const.MaxViewports; if (st->state.num_viewports != num_viewports) { st->state.num_viewports = num_viewports; dirty |= ST_NEW_VIEWPORT; if (ctx->Scissor.EnableFlags & u_bit_consecutive(0, num_viewports)) dirty |= ST_NEW_SCISSOR; } st->dirty |= dirty; }