/* All batchbuffer flushes must go through this function. */ void brw_context_flush( struct brw_context *brw ) { /* * */ brw_emit_query_end(brw); /* Move to the end of the current upload buffer so that we'll force choosing * a new buffer next time. */ u_upload_flush( brw->vb.upload_vertex ); u_upload_flush( brw->vb.upload_index ); _brw_batchbuffer_flush( brw->batch, __FILE__, __LINE__ ); /* Mark all context state as needing to be re-emitted. * This is probably not as severe as on 915, since almost all of our state * is just in referenced buffers. */ brw->state.dirty.brw |= BRW_NEW_CONTEXT; brw->state.dirty.mesa |= ~0; brw->state.dirty.brw |= ~0; brw->state.dirty.cache |= ~0; brw->curbe.need_new_bo = GL_TRUE; }
/** * called from intel_batchbuffer_flush and children before sending a * batchbuffer off. * * Note that ALL state emitted here must fit in the reserved space * at the end of a batchbuffer. If you add more GPU state, increase * the BATCH_RESERVED macro. */ static void brw_finish_batch(struct intel_context *intel) { struct brw_context *brw = brw_context(&intel->ctx); brw_emit_query_end(brw); if (brw->curbe.curbe_bo) { drm_intel_gem_bo_unmap_gtt(brw->curbe.curbe_bo); drm_intel_bo_unreference(brw->curbe.curbe_bo); brw->curbe.curbe_bo = NULL; } }