static inline void brw_upload_pipeline_state(struct brw_context *brw, enum brw_pipeline pipeline) { struct gl_context *ctx = &brw->ctx; int i; static int dirty_count = 0; struct brw_state_flags state = brw->state.pipelines[pipeline]; unsigned int fb_samples = _mesa_geometric_samples(ctx->DrawBuffer); brw_select_pipeline(brw, pipeline); if (0) { /* Always re-emit all state. */ brw->NewGLState = ~0; ctx->NewDriverState = ~0ull; } if (pipeline == BRW_RENDER_PIPELINE) { if (brw->fragment_program != ctx->FragmentProgram._Current) { brw->fragment_program = ctx->FragmentProgram._Current; brw->ctx.NewDriverState |= BRW_NEW_FRAGMENT_PROGRAM; } if (brw->tess_eval_program != ctx->TessEvalProgram._Current) { brw->tess_eval_program = ctx->TessEvalProgram._Current; brw->ctx.NewDriverState |= BRW_NEW_TESS_PROGRAMS; } if (brw->tess_ctrl_program != ctx->TessCtrlProgram._Current) { brw->tess_ctrl_program = ctx->TessCtrlProgram._Current; brw->ctx.NewDriverState |= BRW_NEW_TESS_PROGRAMS; } if (brw->geometry_program != ctx->GeometryProgram._Current) { brw->geometry_program = ctx->GeometryProgram._Current; brw->ctx.NewDriverState |= BRW_NEW_GEOMETRY_PROGRAM; } if (brw->vertex_program != ctx->VertexProgram._Current) { brw->vertex_program = ctx->VertexProgram._Current; brw->ctx.NewDriverState |= BRW_NEW_VERTEX_PROGRAM; } } if (brw->compute_program != ctx->ComputeProgram._Current) { brw->compute_program = ctx->ComputeProgram._Current; brw->ctx.NewDriverState |= BRW_NEW_COMPUTE_PROGRAM; } if (brw->meta_in_progress != _mesa_meta_in_progress(ctx)) { brw->meta_in_progress = _mesa_meta_in_progress(ctx); brw->ctx.NewDriverState |= BRW_NEW_META_IN_PROGRESS; } if (brw->num_samples != fb_samples) { brw->num_samples = fb_samples; brw->ctx.NewDriverState |= BRW_NEW_NUM_SAMPLES; } /* Exit early if no state is flagged as dirty */ merge_ctx_state(brw, &state); if ((state.mesa | state.brw) == 0) return; /* Emit Sandybridge workaround flushes on every primitive, for safety. */ if (brw->gen == 6) brw_emit_post_sync_nonzero_flush(brw); brw_upload_programs(brw, pipeline); merge_ctx_state(brw, &state); const struct brw_tracked_state *atoms = brw_get_pipeline_atoms(brw, pipeline); const int num_atoms = brw->num_atoms[pipeline]; if (unlikely(INTEL_DEBUG)) { /* Debug version which enforces various sanity checks on the * state flags which are generated and checked to help ensure * state atoms are ordered correctly in the list. */ struct brw_state_flags examined, prev; memset(&examined, 0, sizeof(examined)); prev = state; for (i = 0; i < num_atoms; i++) { const struct brw_tracked_state *atom = &atoms[i]; struct brw_state_flags generated; check_and_emit_atom(brw, &state, atom); accumulate_state(&examined, &atom->dirty); /* generated = (prev ^ state) * if (examined & generated) * fail; */ xor_states(&generated, &prev, &state); assert(!check_state(&examined, &generated)); prev = state; } } else { for (i = 0; i < num_atoms; i++) { const struct brw_tracked_state *atom = &atoms[i]; check_and_emit_atom(brw, &state, atom); } } if (unlikely(INTEL_DEBUG & DEBUG_STATE)) { STATIC_ASSERT(ARRAY_SIZE(brw_bits) == BRW_NUM_STATE_BITS + 1); brw_update_dirty_count(mesa_bits, state.mesa); brw_update_dirty_count(brw_bits, state.brw); if (dirty_count++ % 1000 == 0) { brw_print_dirty_count(mesa_bits); brw_print_dirty_count(brw_bits); fprintf(stderr, "\n"); } } }
/*********************************************************************** * Emit all state: */ void brw_upload_state(struct brw_context *brw) { struct gl_context *ctx = &brw->intel.ctx; struct intel_context *intel = &brw->intel; struct brw_state_flags *state = &brw->state.dirty; int i; static int dirty_count = 0; state->mesa |= brw->intel.NewGLState; brw->intel.NewGLState = 0; if (brw->emit_state_always) { state->mesa |= ~0; state->brw |= ~0; state->cache |= ~0; } if (brw->fragment_program != ctx->FragmentProgram._Current) { brw->fragment_program = ctx->FragmentProgram._Current; brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM; } if (brw->vertex_program != ctx->VertexProgram._Current) { brw->vertex_program = ctx->VertexProgram._Current; brw->state.dirty.brw |= BRW_NEW_VERTEX_PROGRAM; } if ((state->mesa | state->cache | state->brw) == 0) return; intel_check_front_buffer_rendering(intel); if (unlikely(INTEL_DEBUG)) { /* Debug version which enforces various sanity checks on the * state flags which are generated and checked to help ensure * state atoms are ordered correctly in the list. */ struct brw_state_flags examined, prev; memset(&examined, 0, sizeof(examined)); prev = *state; for (i = 0; i < brw->num_atoms; i++) { const struct brw_tracked_state *atom = brw->atoms[i]; struct brw_state_flags generated; if (check_state(state, &atom->dirty)) { atom->emit(brw); } accumulate_state(&examined, &atom->dirty); /* generated = (prev ^ state) * if (examined & generated) * fail; */ xor_states(&generated, &prev, state); assert(!check_state(&examined, &generated)); prev = *state; } } else { for (i = 0; i < brw->num_atoms; i++) { const struct brw_tracked_state *atom = brw->atoms[i]; if (check_state(state, &atom->dirty)) { atom->emit(brw); } } } if (unlikely(INTEL_DEBUG & DEBUG_STATE)) { brw_update_dirty_count(mesa_bits, state->mesa); brw_update_dirty_count(brw_bits, state->brw); brw_update_dirty_count(cache_bits, state->cache); if (dirty_count++ % 1000 == 0) { brw_print_dirty_count(mesa_bits, state->mesa); brw_print_dirty_count(brw_bits, state->brw); brw_print_dirty_count(cache_bits, state->cache); fprintf(stderr, "\n"); } } memset(state, 0, sizeof(*state)); }
/*********************************************************************** * Emit all state: */ void brw_validate_state( struct brw_context *brw ) { struct brw_state_flags *state = &brw->state.dirty; GLuint i; state->mesa |= brw->intel.NewGLState; brw->intel.NewGLState = 0; if (brw->wrap) state->brw |= BRW_NEW_CONTEXT; if (brw->emit_state_always) { state->mesa |= ~0; state->brw |= ~0; } /* texenv program needs to notify us somehow when this happens: * Some confusion about which state flag should represent this change. */ if (brw->fragment_program != brw->attribs.FragmentProgram->_Current) { brw->fragment_program = brw->attribs.FragmentProgram->_Current; brw->state.dirty.mesa |= _NEW_PROGRAM; brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM; } if (state->mesa == 0 && state->cache == 0 && state->brw == 0) return; if (brw->state.dirty.brw & BRW_NEW_CONTEXT) brw_clear_batch_cache_flush(brw); /* Make an early reference to the state pools, as we don't cope * well with them being evicted from here down. */ (void)bmBufferOffset(&brw->intel, brw->pool[BRW_GS_POOL].buffer); (void)bmBufferOffset(&brw->intel, brw->pool[BRW_SS_POOL].buffer); (void)bmBufferOffset(&brw->intel, brw->intel.batch->buffer); if (INTEL_DEBUG) { /* Debug version which enforces various sanity checks on the * state flags which are generated and checked to help ensure * state atoms are ordered correctly in the list. */ struct brw_state_flags examined, prev; _mesa_memset(&examined, 0, sizeof(examined)); prev = *state; for (i = 0; i < brw->state.nr_atoms; i++) { const struct brw_tracked_state *atom = brw->state.atoms[i]; struct brw_state_flags generated; assert(atom->dirty.mesa || atom->dirty.brw || atom->dirty.cache); assert(atom->update); if (check_state(state, &atom->dirty)) { brw->state.atoms[i]->update( brw ); /* emit_foo(brw); */ } accumulate_state(&examined, &atom->dirty); /* generated = (prev ^ state) * if (examined & generated) * fail; */ xor_states(&generated, &prev, state); assert(!check_state(&examined, &generated)); prev = *state; } } else { for (i = 0; i < Elements(atoms); i++) { if (check_state(state, &brw->state.atoms[i]->dirty)) brw->state.atoms[i]->update( brw ); } } memset(state, 0, sizeof(*state)); }
void st_validate_state( struct st_context *st ) { struct st_state_flags *state = &st->dirty; GLuint i; /* The bitmap cache is immune to pixel unpack changes. * Note that GLUT makes several calls to glPixelStore for each * bitmap char it draws so this is an important check. */ if (state->mesa & ~_NEW_PACKUNPACK) st_flush_bitmap_cache(st); check_program_state( st ); st_manager_validate_framebuffers(st); if (state->st == 0) return; /*printf("%s %x/%x\n", __FUNCTION__, state->mesa, state->st);*/ if (1) { /* Debug version which enforces various sanity checks on the * state flags which are generated and checked to help ensure * state atoms are ordered correctly in the list. */ struct st_state_flags examined, prev; memset(&examined, 0, sizeof(examined)); prev = *state; for (i = 0; i < Elements(atoms); i++) { const struct st_tracked_state *atom = atoms[i]; struct st_state_flags generated; /*printf("atom %s %x/%x\n", atom->name, atom->dirty.mesa, atom->dirty.st);*/ if (!(atom->dirty.mesa || atom->dirty.st) || !atom->update) { printf("malformed atom %s\n", atom->name); assert(0); } if (check_state(state, &atom->dirty)) { atoms[i]->update( st ); /*printf("after: %x\n", atom->dirty.mesa);*/ } accumulate_state(&examined, &atom->dirty); /* generated = (prev ^ state) * if (examined & generated) * fail; */ xor_states(&generated, &prev, state); assert(!check_state(&examined, &generated)); prev = *state; } /*printf("\n");*/ } else { for (i = 0; i < Elements(atoms); i++) { if (check_state(state, &atoms[i]->dirty)) atoms[i]->update( st ); } } memset(state, 0, sizeof(*state)); }
static enum pipe_error update_state(struct svga_context *svga, const struct svga_tracked_state *atoms[], unsigned *state) { #ifdef DEBUG boolean debug = TRUE; #else boolean debug = FALSE; #endif enum pipe_error ret = PIPE_OK; unsigned i; ret = svga_hwtnl_flush( svga->hwtnl ); if (ret != PIPE_OK) return ret; if (debug) { /* Debug version which enforces various sanity checks on the * state flags which are generated and checked to help ensure * state atoms are ordered correctly in the list. */ unsigned examined, prev; examined = 0; prev = *state; for (i = 0; atoms[i] != NULL; i++) { unsigned generated; assert(atoms[i]->dirty); assert(atoms[i]->update); if (check_state(*state, atoms[i]->dirty)) { if (0) debug_printf("update: %s\n", atoms[i]->name); ret = atoms[i]->update( svga, *state ); if (ret != PIPE_OK) return ret; } /* generated = (prev ^ state) * if (examined & generated) * fail; */ xor_states(&generated, prev, *state); if (check_state(examined, generated)) { debug_printf("state atom %s generated state already examined\n", atoms[i]->name); assert(0); } prev = *state; accumulate_state(&examined, atoms[i]->dirty); } } else { for (i = 0; atoms[i] != NULL; i++) { if (check_state(*state, atoms[i]->dirty)) { ret = atoms[i]->update( svga, *state ); if (ret != PIPE_OK) return ret; } } } return PIPE_OK; }
enum pipe_error brw_upload_state(struct brw_context *brw) { struct brw_state_flags *state = &brw->state.dirty; int ret; int i; brw_clear_validated_bos(brw); if (BRW_DEBUG) { /* Debug version which enforces various sanity checks on the * state flags which are generated and checked to help ensure * state atoms are ordered correctly in the list. */ struct brw_state_flags examined, prev; memset(&examined, 0, sizeof(examined)); prev = *state; for (i = 0; i < Elements(atoms); i++) { const struct brw_tracked_state *atom = atoms[i]; struct brw_state_flags generated; assert(atom->dirty.mesa || atom->dirty.brw || atom->dirty.cache); if (check_state(state, &atom->dirty)) { if (atom->emit) { ret = atom->emit( brw ); if (ret) return ret; } } accumulate_state(&examined, &atom->dirty); /* generated = (prev ^ state) * if (examined & generated) * fail; */ xor_states(&generated, &prev, state); assert(!check_state(&examined, &generated)); prev = *state; } } else { for (i = 0; i < Elements(atoms); i++) { const struct brw_tracked_state *atom = atoms[i]; if (check_state(state, &atom->dirty)) { if (atom->emit) { ret = atom->emit( brw ); if (ret) return ret; } } } } if (BRW_DEBUG & DEBUG_STATE) { brw_update_dirty_counts( state->mesa, state->brw, state->cache ); } /* Clear dirty flags: */ memset(state, 0, sizeof(*state)); return 0; }
void st_validate_state( struct st_context *st, enum st_pipeline pipeline ) { const struct st_tracked_state **atoms; struct st_state_flags *state; GLuint num_atoms; GLuint i; /* Get pipeline state. */ switch (pipeline) { case ST_PIPELINE_RENDER: atoms = render_atoms; num_atoms = ARRAY_SIZE(render_atoms); state = &st->dirty; break; case ST_PIPELINE_COMPUTE: atoms = compute_atoms; num_atoms = ARRAY_SIZE(compute_atoms); state = &st->dirty_cp; break; default: unreachable("Invalid pipeline specified"); } /* Get Mesa driver state. */ st->dirty.st |= st->ctx->NewDriverState; st->dirty_cp.st |= st->ctx->NewDriverState; st->ctx->NewDriverState = 0; if (pipeline == ST_PIPELINE_RENDER) { check_attrib_edgeflag(st); check_program_state(st); st_manager_validate_framebuffers(st); } if (state->st == 0 && state->mesa == 0) return; /*printf("%s %x/%x\n", __func__, state->mesa, state->st);*/ #ifdef DEBUG if (1) { #else if (0) { #endif /* Debug version which enforces various sanity checks on the * state flags which are generated and checked to help ensure * state atoms are ordered correctly in the list. */ struct st_state_flags examined, prev; memset(&examined, 0, sizeof(examined)); prev = *state; for (i = 0; i < num_atoms; i++) { const struct st_tracked_state *atom = atoms[i]; struct st_state_flags generated; /*printf("atom %s %x/%x\n", atom->name, atom->dirty.mesa, atom->dirty.st);*/ if (!(atom->dirty.mesa || atom->dirty.st) || !atom->update) { printf("malformed atom %s\n", atom->name); assert(0); } if (check_state(state, &atom->dirty)) { atoms[i]->update( st ); /*printf("after: %x\n", atom->dirty.mesa);*/ } accumulate_state(&examined, &atom->dirty); /* generated = (prev ^ state) * if (examined & generated) * fail; */ xor_states(&generated, &prev, state); assert(!check_state(&examined, &generated)); prev = *state; } /*printf("\n");*/ } else { for (i = 0; i < num_atoms; i++) { if (check_state(state, &atoms[i]->dirty)) atoms[i]->update( st ); } } memset(state, 0, sizeof(*state)); }
/*********************************************************************** * Emit all state: */ void brw_upload_state(struct brw_context *brw) { struct gl_context *ctx = &brw->ctx; struct brw_state_flags *state = &brw->state.dirty; int i; static int dirty_count = 0; state->mesa |= brw->NewGLState; brw->NewGLState = 0; state->brw |= ctx->NewDriverState; ctx->NewDriverState = 0; if (0) { /* Always re-emit all state. */ state->mesa |= ~0; state->brw |= ~0ull; } if (brw->fragment_program != ctx->FragmentProgram._Current) { brw->fragment_program = ctx->FragmentProgram._Current; brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM; } if (brw->geometry_program != ctx->GeometryProgram._Current) { brw->geometry_program = ctx->GeometryProgram._Current; brw->state.dirty.brw |= BRW_NEW_GEOMETRY_PROGRAM; } if (brw->vertex_program != ctx->VertexProgram._Current) { brw->vertex_program = ctx->VertexProgram._Current; brw->state.dirty.brw |= BRW_NEW_VERTEX_PROGRAM; } if (brw->meta_in_progress != _mesa_meta_in_progress(ctx)) { brw->meta_in_progress = _mesa_meta_in_progress(ctx); brw->state.dirty.brw |= BRW_NEW_META_IN_PROGRESS; } if (brw->num_samples != ctx->DrawBuffer->Visual.samples) { brw->num_samples = ctx->DrawBuffer->Visual.samples; brw->state.dirty.brw |= BRW_NEW_NUM_SAMPLES; } if ((state->mesa | state->brw) == 0) return; if (unlikely(INTEL_DEBUG)) { /* Debug version which enforces various sanity checks on the * state flags which are generated and checked to help ensure * state atoms are ordered correctly in the list. */ struct brw_state_flags examined, prev; memset(&examined, 0, sizeof(examined)); prev = *state; for (i = 0; i < brw->num_atoms; i++) { const struct brw_tracked_state *atom = brw->atoms[i]; struct brw_state_flags generated; if (check_state(state, &atom->dirty)) { atom->emit(brw); } accumulate_state(&examined, &atom->dirty); /* generated = (prev ^ state) * if (examined & generated) * fail; */ xor_states(&generated, &prev, state); assert(!check_state(&examined, &generated)); prev = *state; } } else { for (i = 0; i < brw->num_atoms; i++) { const struct brw_tracked_state *atom = brw->atoms[i]; if (check_state(state, &atom->dirty)) { atom->emit(brw); } } } if (unlikely(INTEL_DEBUG & DEBUG_STATE)) { STATIC_ASSERT(ARRAY_SIZE(brw_bits) == BRW_NUM_STATE_BITS + 1); brw_update_dirty_count(mesa_bits, state->mesa); brw_update_dirty_count(brw_bits, state->brw); if (dirty_count++ % 1000 == 0) { brw_print_dirty_count(mesa_bits); brw_print_dirty_count(brw_bits); fprintf(stderr, "\n"); } } }
void st_validate_state( struct st_context *st ) { struct st_state_flags *state = &st->dirty; GLuint i; /* Get Mesa driver state. */ st->dirty.st |= st->ctx->NewDriverState; st->ctx->NewDriverState = 0; check_attrib_edgeflag(st); if (state->mesa) st_flush_bitmap_cache(st); check_program_state( st ); st_manager_validate_framebuffers(st); if (state->st == 0) return; /*printf("%s %x/%x\n", __FUNCTION__, state->mesa, state->st);*/ #ifdef DEBUG if (1) { #else if (0) { #endif /* Debug version which enforces various sanity checks on the * state flags which are generated and checked to help ensure * state atoms are ordered correctly in the list. */ struct st_state_flags examined, prev; memset(&examined, 0, sizeof(examined)); prev = *state; for (i = 0; i < ARRAY_SIZE(atoms); i++) { const struct st_tracked_state *atom = atoms[i]; struct st_state_flags generated; /*printf("atom %s %x/%x\n", atom->name, atom->dirty.mesa, atom->dirty.st);*/ if (!(atom->dirty.mesa || atom->dirty.st) || !atom->update) { printf("malformed atom %s\n", atom->name); assert(0); } if (check_state(state, &atom->dirty)) { atoms[i]->update( st ); /*printf("after: %x\n", atom->dirty.mesa);*/ } accumulate_state(&examined, &atom->dirty); /* generated = (prev ^ state) * if (examined & generated) * fail; */ xor_states(&generated, &prev, state); assert(!check_state(&examined, &generated)); prev = *state; } /*printf("\n");*/ } else { for (i = 0; i < ARRAY_SIZE(atoms); i++) { if (check_state(state, &atoms[i]->dirty)) atoms[i]->update( st ); } } memset(state, 0, sizeof(*state)); }