void _tnl_run_pipeline( struct gl_context *ctx ) { TNLcontext *tnl = TNL_CONTEXT(ctx); unsigned short __tmp; GLuint i; if (!tnl->vb.Count) return; /* Check for changed input sizes or change in stride to/from zero * (ie const or non-const). */ if (check_input_changes( ctx ) || tnl->pipeline.new_state) { if (ctx->VertexProgram._MaintainTnlProgram) _tnl_UpdateFixedFunctionProgram( ctx ); for (i = 0; i < tnl->pipeline.nr_stages ; i++) { struct tnl_pipeline_stage *s = &tnl->pipeline.stages[i]; if (s->validate) s->validate( ctx, s ); } tnl->pipeline.new_state = 0; tnl->pipeline.input_changes = 0; /* Pipeline can only change its output in response to either a * statechange or an input size/stride change. No other changes * are allowed. */ if (check_output_changes( ctx )) _tnl_notify_pipeline_output_change( ctx ); } #ifndef _OPENMP /* Don't adjust FPU precision mode in case multiple threads are to be used. * This would require that the additional threads also changed the FPU mode * which is quite a mess as this had to be done in all parallelized sections; * otherwise the master thread and all other threads are running in different * modes, producing inconsistent results. * Note that all x64 implementations don't define/use START_FAST_MATH, so * this is "hack" is only used in i386 mode */ START_FAST_MATH(__tmp); #endif for (i = 0; i < tnl->pipeline.nr_stages ; i++) { struct tnl_pipeline_stage *s = &tnl->pipeline.stages[i]; if (!s->run( ctx, s )) break; } #ifndef _OPENMP END_FAST_MATH(__tmp); #endif }
void _tnl_run_pipeline( GLcontext *ctx ) { TNLcontext *tnl = TNL_CONTEXT(ctx); struct vertex_buffer *VB = &tnl->vb; struct gl_pipeline *pipe = &tnl->pipeline; struct gl_pipeline_stage *s = pipe->stages; GLuint changed_state = pipe->run_state_changes; GLuint changed_inputs = pipe->run_input_changes; GLboolean running = GL_TRUE; unsigned short __tmp; pipe->run_state_changes = 0; pipe->run_input_changes = 0; /* Done elsewhere. */ ASSERT(pipe->build_state_changes == 0); START_FAST_MATH(__tmp); /* If something changes in the pipeline, tag all subsequent stages * using this value for recalculation. Inactive stages have their * state and inputs examined to try to keep cached data alive over * state-changes. */ for ( ; s->run ; s++) { s->changed_inputs |= s->inputs & changed_inputs; if (s->run_state & changed_state) s->changed_inputs = s->inputs; if (s->active && running) { if (s->changed_inputs) changed_inputs |= s->outputs; running = s->run( ctx, s ); s->changed_inputs = 0; VB->importable_data &= ~s->outputs; } } END_FAST_MATH(__tmp); }
void _tnl_run_pipeline( GLcontext *ctx ) { TNLcontext *tnl = TNL_CONTEXT(ctx); unsigned short __tmp; GLuint i; if (!tnl->vb.Count) return; /* Check for changed input sizes or change in stride to/from zero * (ie const or non-const). */ if (check_input_changes( ctx ) || tnl->pipeline.new_state) { if (ctx->_MaintainTnlProgram) _tnl_UpdateFixedFunctionProgram( ctx ); for (i = 0; i < tnl->pipeline.nr_stages ; i++) { struct tnl_pipeline_stage *s = &tnl->pipeline.stages[i]; if (s->validate) s->validate( ctx, s ); } tnl->pipeline.new_state = 0; tnl->pipeline.input_changes = 0; /* Pipeline can only change its output in response to either a * statechange or an input size/stride change. No other changes * are allowed. */ if (check_output_changes( ctx )) _tnl_notify_pipeline_output_change( ctx ); } START_FAST_MATH(__tmp); for (i = 0; i < tnl->pipeline.nr_stages ; i++) { struct tnl_pipeline_stage *s = &tnl->pipeline.stages[i]; if (!s->run( ctx, s )) break; } END_FAST_MATH(__tmp); }
/****************************************************************************** * pipeline */ void tnl_run_pipeline (void) { LOG(("%s:\n", __FUNCTION__)); ctx_validate_state(-1U); tnl_render_state = tnl_light_state | tnl_texgen_state; if (tnl_vb.len) { #ifdef X86 GLuint tmp; START_FAST_MATH(tmp); #endif tnl_vertex_transform(); tnl_lighting(); tnl_fogging(); tnl_texgen(); tnl_texture_transform(); tnl_render(); #ifdef X86 END_FAST_MATH(tmp); #endif } }