/*********************************************************************** * Emit all state: */ enum pipe_error brw_validate_state( struct brw_context *brw ) { struct brw_state_flags *state = &brw->state.dirty; GLuint i; int ret; brw_clear_validated_bos(brw); brw_add_validated_bo(brw, brw->batch->buf); if (brw->flags.always_emit_state) { state->mesa |= ~0; state->brw |= ~0; state->cache |= ~0; } if (state->mesa == 0 && state->cache == 0 && state->brw == 0) return 0; if (brw->state.dirty.brw & BRW_NEW_CONTEXT) brw_clear_batch_cache(brw); /* do prepare stage for all atoms */ for (i = 0; i < Elements(atoms); i++) { const struct brw_tracked_state *atom = atoms[i]; if (check_state(state, &atom->dirty)) { if (atom->prepare) { ret = atom->prepare(brw); if (ret) return ret; } } } /* Make sure that the textures which are referenced by the current * brw fragment program are actually present/valid. * If this fails, we can experience GPU lock-ups. */ { const struct brw_fragment_shader *fp = brw->curr.fragment_shader; if (fp) { assert(fp->info.file_max[TGSI_FILE_SAMPLER] < (int)brw->curr.num_samplers); /*assert(fp->info.texture_max <= brw->curr.num_textures);*/ } } return 0; }
void brw_destroy_batch_cache( struct brw_context *brw ) { brw_clear_batch_cache(brw); }