void intelRestartInlinePrimitive( intelContextPtr intel ) { GLuint prim = intel->prim.primitive; intel_flush_inline_primitive( &intel->ctx ); if (1) intelFlushBatch(intel, GL_TRUE); /* GL_TRUE - is critical */ intelStartInlinePrimitive( intel, prim ); }
void intelWrapInlinePrimitive( intelContextPtr intel ) { GLuint prim = intel->prim.primitive; if (0) fprintf(stderr, "%s\n", __FUNCTION__); intel_flush_inline_primitive( &intel->ctx ); intelFlushBatch(intel, GL_TRUE); intelStartInlinePrimitive( intel, prim ); }
/** * NOT directly called via glFlush. */ void intelFlush( GLcontext *ctx ) { intelContextPtr intel = INTEL_CONTEXT( ctx ); if (intel->Fallback) _swrast_flush( ctx ); INTEL_FIREVERTICES( intel ); if (intel->batch.size != intel->batch.space) intelFlushBatch( intel, GL_FALSE ); }
void intelWaitForIdle( intelContextPtr intel ) { if (0) fprintf(stderr, "%s\n", __FUNCTION__); intel->vtbl.emit_flush( intel ); intelFlushBatch( intel, GL_TRUE ); /* Use an irq to wait for dma idle -- Need to track lost contexts * to shortcircuit consecutive calls to this function: */ intelWaitIrq( intel, intel->alloc.irq_emitted ); intel->alloc.irq_emitted = 0; }
/* Emit a primitive referencing vertices in a vertex buffer. */ void intelStartInlinePrimitive( intelContextPtr intel, GLuint prim ) { BATCH_LOCALS; if (0) fprintf(stderr, "%s %x\n", __FUNCTION__, prim); /* Finish any in-progress primitive: */ INTEL_FIREVERTICES( intel ); /* Emit outstanding state: */ intel->vtbl.emit_state( intel ); /* Make sure there is some space in this buffer: */ if (intel->vertex_size * 10 * sizeof(GLuint) >= intel->batch.space) intelFlushBatch(intel, GL_TRUE); #if 1 if (((int)intel->batch.ptr) & 0x4) { BEGIN_BATCH(1); OUT_BATCH(0); ADVANCE_BATCH(); } #endif /* Emit a slot which will be filled with the inline primitive * command later. */ BEGIN_BATCH(2); OUT_BATCH( 0 ); intel->prim.start_ptr = batch_ptr; intel->prim.primitive = prim; intel->prim.flush = intel_flush_inline_primitive; OUT_BATCH( 0 ); ADVANCE_BATCH(); }