static void r200UpdateFSRouting( struct gl_context *ctx ) { r200ContextPtr rmesa = R200_CONTEXT(ctx); const struct ati_fragment_shader *shader = ctx->ATIFragmentShader.Current; GLuint reg; R200_STATECHANGE( rmesa, ctx ); R200_STATECHANGE( rmesa, cst ); for (reg = 0; reg < R200_MAX_TEXTURE_UNITS; reg++) { if (shader->swizzlerq & (1 << (2 * reg))) /* r coord */ set_re_cntl_d3d( ctx, reg, 1); /* q coord */ else set_re_cntl_d3d( ctx, reg, 0); } rmesa->hw.ctx.cmd[CTX_PP_CNTL] &= ~(R200_MULTI_PASS_ENABLE | R200_TEX_BLEND_ENABLE_MASK | R200_TEX_ENABLE_MASK); rmesa->hw.cst.cmd[CST_PP_CNTL_X] &= ~(R200_PPX_PFS_INST_ENABLE_MASK | R200_PPX_TEX_ENABLE_MASK | R200_PPX_OUTPUT_REG_MASK); /* first pass registers use slots 8 - 15 but single pass shaders use slots 0 - 7 */ if (shader->NumPasses < 2) { rmesa->hw.ctx.cmd[CTX_PP_CNTL] |= shader->numArithInstr[0] == 8 ? 0xff << (R200_TEX_BLEND_0_ENABLE_SHIFT - 1) : (0xff >> (8 - shader->numArithInstr[0])) << R200_TEX_BLEND_0_ENABLE_SHIFT; } else {
void r200TclPrimitive( struct gl_context *ctx, GLenum prim, int hw_prim ) { r200ContextPtr rmesa = R200_CONTEXT(ctx); GLuint newprim = hw_prim | R200_VF_TCL_OUTPUT_VTX_ENABLE; radeon_prepare_render(&rmesa->radeon); if (rmesa->radeon.NewGLState) r200ValidateState( ctx ); if (newprim != rmesa->tcl.hw_primitive || !discrete_prim[hw_prim&0xf]) { /* need to disable perspective-correct texturing for point sprites */ if ((prim & PRIM_MODE_MASK) == GL_POINTS && ctx->Point.PointSprite) { if (rmesa->hw.set.cmd[SET_RE_CNTL] & R200_PERSPECTIVE_ENABLE) { R200_STATECHANGE( rmesa, set ); rmesa->hw.set.cmd[SET_RE_CNTL] &= ~R200_PERSPECTIVE_ENABLE; } } else if (!(rmesa->hw.set.cmd[SET_RE_CNTL] & R200_PERSPECTIVE_ENABLE)) { R200_STATECHANGE( rmesa, set ); rmesa->hw.set.cmd[SET_RE_CNTL] |= R200_PERSPECTIVE_ENABLE; } R200_NEWPRIM( rmesa ); rmesa->tcl.hw_primitive = newprim; } }
static GLboolean update_tex_common( GLcontext *ctx, int unit ) { r200ContextPtr rmesa = R200_CONTEXT(ctx); struct gl_texture_unit *texUnit = &ctx->Texture.Unit[unit]; struct gl_texture_object *tObj = texUnit->_Current; r200TexObjPtr t = (r200TexObjPtr) tObj->DriverData; GLenum format; /* Fallback if there's a texture border */ if ( tObj->Image[tObj->BaseLevel]->Border > 0 ) return GL_FALSE; /* Update state if this is a different texture object to last * time. */ if ( rmesa->state.texture.unit[unit].texobj != t ) { rmesa->state.texture.unit[unit].texobj = t; t->dirty_state |= 1<<unit; r200UpdateTexLRU( rmesa, t ); /* XXX: should be locked! */ } /* Newly enabled? */ if ( 1|| !(rmesa->hw.ctx.cmd[CTX_PP_CNTL] & (R200_TEX_0_ENABLE<<unit))) { R200_STATECHANGE( rmesa, ctx ); rmesa->hw.ctx.cmd[CTX_PP_CNTL] |= (R200_TEX_0_ENABLE | R200_TEX_BLEND_0_ENABLE) << unit; R200_STATECHANGE( rmesa, vtx ); rmesa->hw.vtx.cmd[VTX_TCL_OUTPUT_VTXFMT_1] |= 4 << (unit * 3); rmesa->recheck_texgen[unit] = GL_TRUE; } if (t->dirty_state & (1<<unit)) { import_tex_obj_state( rmesa, unit, t ); } if (rmesa->recheck_texgen[unit]) { GLboolean fallback = !r200_validate_texgen( ctx, unit ); TCL_FALLBACK( ctx, (R200_TCL_FALLBACK_TEXGEN_0<<unit), fallback); rmesa->recheck_texgen[unit] = 0; rmesa->NewGLState |= _NEW_TEXTURE_MATRIX; } format = tObj->Image[tObj->BaseLevel]->Format; if ( rmesa->state.texture.unit[unit].format != format || rmesa->state.texture.unit[unit].envMode != texUnit->EnvMode ) { rmesa->state.texture.unit[unit].format = format; rmesa->state.texture.unit[unit].envMode = texUnit->EnvMode; r200UpdateTextureEnv( ctx, unit ); } return GL_TRUE; }
static GLboolean r200VertexProgUpdateParams(GLcontext *ctx, struct r200_vertex_program *vp) { r200ContextPtr rmesa = R200_CONTEXT( ctx ); GLfloat *fcmd = (GLfloat *)&rmesa->hw.vpp[0].cmd[VPP_CMD_0 + 1]; int pi; struct gl_vertex_program *mesa_vp = &vp->mesa_program; struct gl_program_parameter_list *paramList; drm_radeon_cmd_header_t tmp; R200_STATECHANGE( rmesa, vpp[0] ); R200_STATECHANGE( rmesa, vpp[1] ); assert(mesa_vp->Base.Parameters); _mesa_load_state_parameters(ctx, mesa_vp->Base.Parameters); paramList = mesa_vp->Base.Parameters; if(paramList->NumParameters > R200_VSF_MAX_PARAM){ fprintf(stderr, "%s:Params exhausted\n", __FUNCTION__); return GL_FALSE; } for(pi = 0; pi < paramList->NumParameters; pi++) { switch(paramList->Parameters[pi].Type) { case PROGRAM_STATE_VAR: case PROGRAM_NAMED_PARAM: //fprintf(stderr, "%s", vp->Parameters->Parameters[pi].Name); case PROGRAM_CONSTANT: *fcmd++ = paramList->ParameterValues[pi][0]; *fcmd++ = paramList->ParameterValues[pi][1]; *fcmd++ = paramList->ParameterValues[pi][2]; *fcmd++ = paramList->ParameterValues[pi][3]; break; default: _mesa_problem(NULL, "Bad param type in %s", __FUNCTION__); break; } if (pi == 95) { fcmd = (GLfloat *)&rmesa->hw.vpp[1].cmd[VPP_CMD_0 + 1]; } } /* hack up the cmd_size so not the whole state atom is emitted always. */ rmesa->hw.vpp[0].cmd_size = 1 + 4 * ((paramList->NumParameters > 96) ? 96 : paramList->NumParameters); tmp.i = rmesa->hw.vpp[0].cmd[VPP_CMD_0]; tmp.veclinear.count = (paramList->NumParameters > 96) ? 96 : paramList->NumParameters; rmesa->hw.vpp[0].cmd[VPP_CMD_0] = tmp.i; if (paramList->NumParameters > 96) { rmesa->hw.vpp[1].cmd_size = 1 + 4 * (paramList->NumParameters - 96); tmp.i = rmesa->hw.vpp[1].cmd[VPP_CMD_0]; tmp.veclinear.count = paramList->NumParameters - 96; rmesa->hw.vpp[1].cmd[VPP_CMD_0] = tmp.i; } return GL_TRUE; }
static void r200RegainedLock(r200ContextPtr r200) { __DRIdrawablePrivate *dPriv = r200->radeon.dri.drawable; int i; if (r200->radeon.lastStamp != dPriv->lastStamp) { radeonUpdatePageFlipping(&r200->radeon); R200_STATECHANGE(r200, ctx); r200->hw.ctx.cmd[CTX_RB3D_COLOROFFSET] = r200->radeon.state.color.drawOffset + r200->radeon.radeonScreen->fbLocation; r200->hw.ctx.cmd[CTX_RB3D_COLORPITCH] = r200->radeon.state.color.drawPitch; if (r200->radeon.glCtx->DrawBuffer->_ColorDrawBufferMask[0] == BUFFER_BIT_BACK_LEFT) radeonSetCliprects(&r200->radeon, GL_BACK_LEFT); else radeonSetCliprects(&r200->radeon, GL_FRONT_LEFT); r200UpdateViewportOffset(r200->radeon.glCtx); r200->radeon.lastStamp = dPriv->lastStamp; } for (i = 0; i < r200->nr_heaps; i++) { DRI_AGE_TEXTURES(r200->texture_heaps[i]); } }
static void disable_tex( GLcontext *ctx, int unit ) { r200ContextPtr rmesa = R200_CONTEXT(ctx); if (rmesa->hw.ctx.cmd[CTX_PP_CNTL] & (R200_TEX_0_ENABLE<<unit)) { /* Texture unit disabled */ rmesa->state.texture.unit[unit].texobj = 0; R200_STATECHANGE( rmesa, ctx ); rmesa->hw.ctx.cmd[CTX_PP_CNTL] &= ~((R200_TEX_0_ENABLE | R200_TEX_BLEND_0_ENABLE) << unit); rmesa->hw.ctx.cmd[CTX_PP_CNTL] |= R200_TEX_BLEND_0_ENABLE; R200_STATECHANGE( rmesa, tcl ); rmesa->hw.vtx.cmd[VTX_TCL_OUTPUT_VTXFMT_1] &= ~(7 << (unit * 3)); if (rmesa->TclFallback & (R200_TCL_FALLBACK_TEXGEN_0<<unit)) { TCL_FALLBACK( ctx, (R200_TCL_FALLBACK_TEXGEN_0<<unit), GL_FALSE); } /* Actually want to keep all units less than max active texture * enabled, right? Fix this for >2 texunits. */ if (unit == 0) r200UpdateTextureEnv( ctx, unit ); { GLuint inputshift = R200_TEXGEN_0_INPUT_SHIFT + unit*4; GLuint tmp = rmesa->TexGenEnabled; rmesa->TexGenEnabled &= ~(R200_TEXGEN_TEXMAT_0_ENABLE<<unit); rmesa->TexGenEnabled &= ~(R200_TEXMAT_0_ENABLE<<unit); rmesa->TexGenEnabled &= ~(R200_TEXGEN_INPUT_MASK<<inputshift); rmesa->TexGenNeedNormals[unit] = 0; rmesa->TexGenCompSel &= ~(R200_OUTPUT_TEX_0 << unit); rmesa->TexGenInputs &= ~(R200_TEXGEN_INPUT_MASK<<inputshift); if (tmp != rmesa->TexGenEnabled) { rmesa->recheck_texgen[unit] = GL_TRUE; rmesa->NewGLState |= _NEW_TEXTURE_MATRIX; } } } }
static void transition_to_hwtnl( GLcontext *ctx ) { r200ContextPtr rmesa = R200_CONTEXT(ctx); TNLcontext *tnl = TNL_CONTEXT(ctx); _tnl_need_projected_coords( ctx, GL_FALSE ); r200UpdateMaterial( ctx ); tnl->Driver.NotifyMaterialChange = r200UpdateMaterial; if ( rmesa->dma.flush ) rmesa->dma.flush( rmesa ); rmesa->dma.flush = NULL; if (rmesa->swtcl.indexed_verts.buf) r200ReleaseDmaRegion( rmesa, &rmesa->swtcl.indexed_verts, __FUNCTION__ ); R200_STATECHANGE( rmesa, vap ); rmesa->hw.vap.cmd[VAP_SE_VAP_CNTL] |= R200_VAP_TCL_ENABLE; rmesa->hw.vap.cmd[VAP_SE_VAP_CNTL] &= ~R200_VAP_FORCE_W_TO_ONE; if (ctx->VertexProgram._Enabled) { rmesa->hw.vap.cmd[VAP_SE_VAP_CNTL] |= R200_VAP_PROG_VTX_SHADER_ENABLE; } if ( ((rmesa->hw.ctx.cmd[CTX_PP_FOG_COLOR] & R200_FOG_USE_MASK) == R200_FOG_USE_SPEC_ALPHA) && (ctx->Fog.FogCoordinateSource == GL_FOG_COORD )) { R200_STATECHANGE( rmesa, ctx ); rmesa->hw.ctx.cmd[CTX_PP_FOG_COLOR] &= ~R200_FOG_USE_MASK; rmesa->hw.ctx.cmd[CTX_PP_FOG_COLOR] |= R200_FOG_USE_VTX_FOG; } R200_STATECHANGE( rmesa, vte ); rmesa->hw.vte.cmd[VTE_SE_VTE_CNTL] &= ~(R200_VTX_XY_FMT|R200_VTX_Z_FMT); rmesa->hw.vte.cmd[VTE_SE_VTE_CNTL] |= R200_VTX_W0_FMT; if (R200_DEBUG & DEBUG_FALLBACKS) fprintf(stderr, "R200 end tcl fallback\n"); }
static void transition_to_hwtnl( struct gl_context *ctx ) { r200ContextPtr rmesa = R200_CONTEXT(ctx); TNLcontext *tnl = TNL_CONTEXT(ctx); _tnl_need_projected_coords( ctx, GL_FALSE ); r200UpdateMaterial( ctx ); tnl->Driver.NotifyMaterialChange = r200UpdateMaterial; if ( rmesa->radeon.dma.flush ) rmesa->radeon.dma.flush( &rmesa->radeon.glCtx ); rmesa->radeon.dma.flush = NULL; R200_STATECHANGE( rmesa, vap ); rmesa->hw.vap.cmd[VAP_SE_VAP_CNTL] |= R200_VAP_TCL_ENABLE; rmesa->hw.vap.cmd[VAP_SE_VAP_CNTL] &= ~R200_VAP_FORCE_W_TO_ONE; if (_mesa_arb_vertex_program_enabled(ctx)) { rmesa->hw.vap.cmd[VAP_SE_VAP_CNTL] |= R200_VAP_PROG_VTX_SHADER_ENABLE; } if ( ((rmesa->hw.ctx.cmd[CTX_PP_FOG_COLOR] & R200_FOG_USE_MASK) == R200_FOG_USE_SPEC_ALPHA) && (ctx->Fog.FogCoordinateSource == GL_FOG_COORD )) { R200_STATECHANGE( rmesa, ctx ); rmesa->hw.ctx.cmd[CTX_PP_FOG_COLOR] &= ~R200_FOG_USE_MASK; rmesa->hw.ctx.cmd[CTX_PP_FOG_COLOR] |= R200_FOG_USE_VTX_FOG; } R200_STATECHANGE( rmesa, vte ); rmesa->hw.vte.cmd[VTE_SE_VTE_CNTL] &= ~(R200_VTX_XY_FMT|R200_VTX_Z_FMT); rmesa->hw.vte.cmd[VTE_SE_VTE_CNTL] |= R200_VTX_W0_FMT; if (R200_DEBUG & RADEON_FALLBACKS) fprintf(stderr, "R200 end tcl fallback\n"); }
static void r200_get_lock(radeonContextPtr radeon) { r200ContextPtr rmesa = (r200ContextPtr)radeon; drm_radeon_sarea_t *sarea = radeon->sarea; R200_STATECHANGE( rmesa, ctx ); if (rmesa->radeon.sarea->tiling_enabled) { rmesa->hw.ctx.cmd[CTX_RB3D_COLORPITCH] |= R200_COLOR_TILE_ENABLE; } else rmesa->hw.ctx.cmd[CTX_RB3D_COLORPITCH] &= ~R200_COLOR_TILE_ENABLE; if ( sarea->ctx_owner != rmesa->radeon.dri.hwContext ) { sarea->ctx_owner = rmesa->radeon.dri.hwContext; } }
/* Update the hardware state. This is called if another context has * grabbed the hardware lock, which includes the X server. This * function also updates the driver's window state after the X server * moves, resizes or restacks a window -- the change will be reflected * in the drawable position and clip rects. Since the X server grabs * the hardware lock when it changes the window state, this routine will * automatically be called after such a change. */ void r200GetLock( r200ContextPtr rmesa, GLuint flags ) { __DRIdrawablePrivate *dPriv = rmesa->dri.drawable; __DRIscreenPrivate *sPriv = rmesa->dri.screen; drm_radeon_sarea_t *sarea = rmesa->sarea; int i; drmGetLock( rmesa->dri.fd, rmesa->dri.hwContext, flags ); /* The window might have moved, so we might need to get new clip * rects. * * NOTE: This releases and regrabs the hw lock to allow the X server * to respond to the DRI protocol request for new drawable info. * Since the hardware state depends on having the latest drawable * clip rects, all state checking must be done _after_ this call. */ DRI_VALIDATE_DRAWABLE_INFO( sPriv, dPriv ); if ( rmesa->lastStamp != dPriv->lastStamp ) { r200UpdatePageFlipping( rmesa ); if (rmesa->glCtx->DrawBuffer->_ColorDrawBufferMask[0] == BUFFER_BIT_BACK_LEFT) r200SetCliprects( rmesa, GL_BACK_LEFT ); else r200SetCliprects( rmesa, GL_FRONT_LEFT ); r200UpdateViewportOffset( rmesa->glCtx ); rmesa->lastStamp = dPriv->lastStamp; } R200_STATECHANGE( rmesa, ctx ); if (rmesa->sarea->tiling_enabled) { rmesa->hw.ctx.cmd[CTX_RB3D_COLORPITCH] |= R200_COLOR_TILE_ENABLE; } else rmesa->hw.ctx.cmd[CTX_RB3D_COLORPITCH] &= ~R200_COLOR_TILE_ENABLE; if ( sarea->ctx_owner != rmesa->dri.hwContext ) { sarea->ctx_owner = rmesa->dri.hwContext; } for ( i = 0 ; i < rmesa->nr_heaps ; i++ ) { DRI_AGE_TEXTURES( rmesa->texture_heaps[ i ] ); } rmesa->lost_context = GL_TRUE; }
/* Turn on/off page flipping according to the flags in the sarea: */ static void r200UpdatePageFlipping( r200ContextPtr rmesa ) { int use_back; rmesa->doPageFlip = rmesa->sarea->pfState; use_back = (rmesa->glCtx->DrawBuffer->_ColorDrawBufferMask[0] == BUFFER_BIT_BACK_LEFT); use_back ^= (rmesa->sarea->pfCurrentPage == 1); if (use_back) { rmesa->state.color.drawOffset = rmesa->r200Screen->backOffset; rmesa->state.color.drawPitch = rmesa->r200Screen->backPitch; } else { rmesa->state.color.drawOffset = rmesa->r200Screen->frontOffset; rmesa->state.color.drawPitch = rmesa->r200Screen->frontPitch; } R200_STATECHANGE( rmesa, ctx ); rmesa->hw.ctx.cmd[CTX_RB3D_COLOROFFSET] = rmesa->state.color.drawOffset + rmesa->r200Screen->fbLocation; rmesa->hw.ctx.cmd[CTX_RB3D_COLORPITCH] = rmesa->state.color.drawPitch; }
static void transition_to_swtnl( struct gl_context *ctx ) { r200ContextPtr rmesa = R200_CONTEXT(ctx); TNLcontext *tnl = TNL_CONTEXT(ctx); R200_NEWPRIM( rmesa ); r200ChooseVertexState( ctx ); r200ChooseRenderState( ctx ); _tnl_validate_shine_tables( ctx ); tnl->Driver.NotifyMaterialChange = _tnl_validate_shine_tables; radeonReleaseArrays( ctx, ~0 ); /* Still using the D3D based hardware-rasterizer from the radeon; * need to put the card into D3D mode to make it work: */ R200_STATECHANGE( rmesa, vap ); rmesa->hw.vap.cmd[VAP_SE_VAP_CNTL] &= ~(R200_VAP_TCL_ENABLE|R200_VAP_PROG_VTX_SHADER_ENABLE); }
/* TCL render. */ static GLboolean r200_run_tcl_render( struct gl_context *ctx, struct tnl_pipeline_stage *stage ) { r200ContextPtr rmesa = R200_CONTEXT(ctx); TNLcontext *tnl = TNL_CONTEXT(ctx); struct vertex_buffer *VB = &tnl->vb; GLuint i; GLubyte *vimap_rev; /* use hw fixed order for simplicity, pos 0, weight 1, normal 2, fog 3, color0 - color3 4-7, texcoord0 - texcoord5 8-13, pos 1 14. Must not use more than 12 of those at the same time. */ GLubyte map_rev_fixed[15] = {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}; /* TODO: separate this from the swtnl pipeline */ if (rmesa->radeon.TclFallback) return GL_TRUE; /* fallback to software t&l */ radeon_print(RADEON_RENDER, RADEON_NORMAL, "%s\n", __func__); if (VB->Count == 0) return GL_FALSE; /* Validate state: */ if (rmesa->radeon.NewGLState) if (!r200ValidateState( ctx )) return GL_TRUE; /* fallback to sw t&l */ if (!_mesa_arb_vertex_program_enabled(ctx)) { /* NOTE: inputs != tnl->render_inputs - these are the untransformed * inputs. */ map_rev_fixed[0] = VERT_ATTRIB_POS; /* technically there is no reason we always need VA_COLOR0. In theory could disable it depending on lighting, color materials, texturing... */ map_rev_fixed[4] = VERT_ATTRIB_COLOR0; if (ctx->Light.Enabled) { map_rev_fixed[2] = VERT_ATTRIB_NORMAL; } /* this also enables VA_COLOR1 when using separate specular lighting model, which is unnecessary. FIXME: OTOH, we're missing the case where a ATI_fragment_shader accesses the secondary color (if lighting is disabled). The chip seems misconfigured for that though elsewhere (tcl output, might lock up) */ if (_mesa_need_secondary_color(ctx)) { map_rev_fixed[5] = VERT_ATTRIB_COLOR1; } if ( (ctx->Fog.FogCoordinateSource == GL_FOG_COORD) && ctx->Fog.Enabled ) { map_rev_fixed[3] = VERT_ATTRIB_FOG; } for (i = 0 ; i < ctx->Const.MaxTextureUnits; i++) { if (ctx->Texture.Unit[i]._Current) { if (rmesa->TexGenNeedNormals[i]) { map_rev_fixed[2] = VERT_ATTRIB_NORMAL; } map_rev_fixed[8 + i] = VERT_ATTRIB_TEX0 + i; } } vimap_rev = &map_rev_fixed[0]; } else { /* vtx_tcl_output_vtxfmt_0/1 need to match configuration of "fragment part", since using some vertex interpolator later which is not in out_vtxfmt0/1 will lock up. It seems to be ok to write in vertex prog to a not enabled output however, so just don't mess with it. We only need to change compsel. */ GLuint out_compsel = 0; const GLbitfield64 vp_out = rmesa->curr_vp_hw->mesa_program.info.outputs_written; vimap_rev = &rmesa->curr_vp_hw->inputmap_rev[0]; assert(vp_out & BITFIELD64_BIT(VARYING_SLOT_POS)); out_compsel = R200_OUTPUT_XYZW; if (vp_out & BITFIELD64_BIT(VARYING_SLOT_COL0)) { out_compsel |= R200_OUTPUT_COLOR_0; } if (vp_out & BITFIELD64_BIT(VARYING_SLOT_COL1)) { out_compsel |= R200_OUTPUT_COLOR_1; } if (vp_out & BITFIELD64_BIT(VARYING_SLOT_FOGC)) { out_compsel |= R200_OUTPUT_DISCRETE_FOG; } if (vp_out & BITFIELD64_BIT(VARYING_SLOT_PSIZ)) { out_compsel |= R200_OUTPUT_PT_SIZE; } for (i = VARYING_SLOT_TEX0; i < VARYING_SLOT_TEX6; i++) { if (vp_out & BITFIELD64_BIT(i)) { out_compsel |= R200_OUTPUT_TEX_0 << (i - VARYING_SLOT_TEX0); } } if (rmesa->hw.vtx.cmd[VTX_TCL_OUTPUT_COMPSEL] != out_compsel) { R200_STATECHANGE( rmesa, vtx ); rmesa->hw.vtx.cmd[VTX_TCL_OUTPUT_COMPSEL] = out_compsel; } } /* Do the actual work: */ radeonReleaseArrays( ctx, ~0 /* stage->changed_inputs */ ); GLuint emit_end = r200EnsureEmitSize( ctx, vimap_rev ) + rmesa->radeon.cmdbuf.cs->cdw; r200EmitArrays( ctx, vimap_rev ); for (i = 0 ; i < VB->PrimitiveCount ; i++) { GLuint prim = _tnl_translate_prim(&VB->Primitive[i]); GLuint start = VB->Primitive[i].start; GLuint length = VB->Primitive[i].count; if (!length) continue; if (VB->Elts) r200EmitEltPrimitive( ctx, start, start+length, prim ); else r200EmitPrimitive( ctx, start, start+length, prim ); } if ( emit_end < rmesa->radeon.cmdbuf.cs->cdw ) WARN_ONCE("Rendering was %d commands larger than predicted size." " We might overflow command buffer.\n", rmesa->radeon.cmdbuf.cs->cdw - emit_end); return GL_FALSE; /* finished the pipe */ }
void r200SetupVertexProg( struct gl_context *ctx ) { r200ContextPtr rmesa = R200_CONTEXT(ctx); struct r200_vertex_program *vp = (struct r200_vertex_program *)ctx->VertexProgram.Current; GLboolean fallback; GLint i; if (!vp->translated || (ctx->Fog.Enabled && ctx->Fog.Mode != vp->fogmode)) { rmesa->curr_vp_hw = NULL; r200_translate_vertex_program(ctx, vp); } /* could optimize setting up vertex progs away for non-tcl hw */ fallback = !(vp->native && r200VertexProgUpdateParams(ctx, vp)); TCL_FALLBACK(ctx, R200_TCL_FALLBACK_VERTEX_PROGRAM, fallback); if (rmesa->radeon.TclFallback) return; R200_STATECHANGE( rmesa, vap ); /* FIXME: fglrx sets R200_VAP_SINGLE_BUF_STATE_ENABLE too. Do we need it? maybe only when using more than 64 inst / 96 param? */ rmesa->hw.vap.cmd[VAP_SE_VAP_CNTL] |= R200_VAP_PROG_VTX_SHADER_ENABLE /*| R200_VAP_SINGLE_BUF_STATE_ENABLE*/; R200_STATECHANGE( rmesa, pvs ); rmesa->hw.pvs.cmd[PVS_CNTL_1] = (0 << R200_PVS_CNTL_1_PROGRAM_START_SHIFT) | ((vp->mesa_program.Base.NumNativeInstructions - 1) << R200_PVS_CNTL_1_PROGRAM_END_SHIFT) | (vp->pos_end << R200_PVS_CNTL_1_POS_END_SHIFT); rmesa->hw.pvs.cmd[PVS_CNTL_2] = (0 << R200_PVS_CNTL_2_PARAM_OFFSET_SHIFT) | (vp->mesa_program.Base.NumNativeParameters << R200_PVS_CNTL_2_PARAM_COUNT_SHIFT); /* maybe user clip planes just work with vertex progs... untested */ if (ctx->Transform.ClipPlanesEnabled) { R200_STATECHANGE( rmesa, tcl ); if (vp->mesa_program.IsPositionInvariant) { rmesa->hw.tcl.cmd[TCL_UCP_VERT_BLEND_CTL] |= (ctx->Transform.ClipPlanesEnabled << 2); } else { rmesa->hw.tcl.cmd[TCL_UCP_VERT_BLEND_CTL] &= ~(0xfc); } } if (vp != rmesa->curr_vp_hw) { GLuint count = vp->mesa_program.Base.NumNativeInstructions; drm_radeon_cmd_header_t tmp; R200_STATECHANGE( rmesa, vpi[0] ); R200_STATECHANGE( rmesa, vpi[1] ); /* FIXME: what about using a memcopy... */ for (i = 0; (i < 64) && i < count; i++) { rmesa->hw.vpi[0].cmd[VPI_OPDST_0 + 4 * i] = vp->instr[i].op; rmesa->hw.vpi[0].cmd[VPI_SRC0_0 + 4 * i] = vp->instr[i].src0; rmesa->hw.vpi[0].cmd[VPI_SRC1_0 + 4 * i] = vp->instr[i].src1; rmesa->hw.vpi[0].cmd[VPI_SRC2_0 + 4 * i] = vp->instr[i].src2; } /* hack up the cmd_size so not the whole state atom is emitted always. This may require some more thought, we may emit half progs on lost state, but hopefully it won't matter? WARNING: must not use R200_DB_STATECHANGE, this will produce bogus (and rejected) packet emits (due to the mismatched cmd_size and count in cmd/last_cmd) */ rmesa->hw.vpi[0].cmd_size = 1 + 4 * ((count > 64) ? 64 : count); tmp.i = rmesa->hw.vpi[0].cmd[VPI_CMD_0]; tmp.veclinear.count = (count > 64) ? 64 : count; rmesa->hw.vpi[0].cmd[VPI_CMD_0] = tmp.i; if (count > 64) { for (i = 0; i < (count - 64); i++) { rmesa->hw.vpi[1].cmd[VPI_OPDST_0 + 4 * i] = vp->instr[i + 64].op; rmesa->hw.vpi[1].cmd[VPI_SRC0_0 + 4 * i] = vp->instr[i + 64].src0; rmesa->hw.vpi[1].cmd[VPI_SRC1_0 + 4 * i] = vp->instr[i + 64].src1; rmesa->hw.vpi[1].cmd[VPI_SRC2_0 + 4 * i] = vp->instr[i + 64].src2; } rmesa->hw.vpi[1].cmd_size = 1 + 4 * (count - 64); tmp.i = rmesa->hw.vpi[1].cmd[VPI_CMD_0]; tmp.veclinear.count = count - 64; rmesa->hw.vpi[1].cmd[VPI_CMD_0] = tmp.i; } rmesa->curr_vp_hw = vp; } }
static void flush_prims( r200ContextPtr rmesa ) { int i,j; struct r200_dma_region tmp = rmesa->dma.current; tmp.buf->refcount++; tmp.aos_size = rmesa->vb.vertex_size; tmp.aos_stride = rmesa->vb.vertex_size; tmp.aos_start = GET_START(&tmp); rmesa->dma.current.ptr = rmesa->dma.current.start += (rmesa->vb.initial_counter - rmesa->vb.counter) * rmesa->vb.vertex_size * 4; rmesa->tcl.vertex_format = rmesa->vb.vtxfmt_0; rmesa->tcl.aos_components[0] = &tmp; rmesa->tcl.nr_aos_components = 1; rmesa->dma.flush = NULL; /* Optimize the primitive list: */ if (rmesa->vb.nrprims > 1) { for (j = 0, i = 1 ; i < rmesa->vb.nrprims; i++) { int pj = rmesa->vb.primlist[j].prim & 0xf; int pi = rmesa->vb.primlist[i].prim & 0xf; if (pj == pi && discreet_gl_prim[pj] && rmesa->vb.primlist[i].start == rmesa->vb.primlist[j].end) { rmesa->vb.primlist[j].end = rmesa->vb.primlist[i].end; } else { j++; if (j != i) rmesa->vb.primlist[j] = rmesa->vb.primlist[i]; } } rmesa->vb.nrprims = j+1; } if (rmesa->vb.vtxfmt_0 != rmesa->hw.vtx.cmd[VTX_VTXFMT_0] || rmesa->vb.vtxfmt_1 != rmesa->hw.vtx.cmd[VTX_VTXFMT_1]) { R200_STATECHANGE( rmesa, vtx ); rmesa->hw.vtx.cmd[VTX_VTXFMT_0] = rmesa->vb.vtxfmt_0; rmesa->hw.vtx.cmd[VTX_VTXFMT_1] = rmesa->vb.vtxfmt_1; } for (i = 0 ; i < rmesa->vb.nrprims; i++) { if (R200_DEBUG & DEBUG_PRIMS) fprintf(stderr, "vtxfmt prim %d: %s %d..%d\n", i, _mesa_lookup_enum_by_nr( rmesa->vb.primlist[i].prim & PRIM_MODE_MASK ), rmesa->vb.primlist[i].start, rmesa->vb.primlist[i].end); if (rmesa->vb.primlist[i].start < rmesa->vb.primlist[i].end) r200EmitPrimitive( rmesa->glCtx, rmesa->vb.primlist[i].start, rmesa->vb.primlist[i].end, rmesa->vb.primlist[i].prim ); } rmesa->vb.nrprims = 0; r200ReleaseDmaRegion( rmesa, &tmp, __FUNCTION__ ); }
static void r200UpdateFSArith( struct gl_context *ctx ) { r200ContextPtr rmesa = R200_CONTEXT(ctx); GLuint *afs_cmd; const struct ati_fragment_shader *shader = ctx->ATIFragmentShader.Current; GLuint pass; R200_STATECHANGE( rmesa, afs[0] ); R200_STATECHANGE( rmesa, afs[1] ); if (shader->NumPasses < 2) { afs_cmd = (GLuint *) rmesa->hw.afs[1].cmd; } else { afs_cmd = (GLuint *) rmesa->hw.afs[0].cmd; } for (pass = 0; pass < shader->NumPasses; pass++) { GLuint opnum = 0; GLuint pc; for (pc = 0; pc < shader->numArithInstr[pass]; pc++) { GLuint optype; struct atifs_instruction *inst = &shader->Instructions[pass][pc]; SET_INST(opnum, 0) = 0; SET_INST_2(opnum, 0) = 0; SET_INST(opnum, 1) = 0; SET_INST_2(opnum, 1) = 0; for (optype = 0; optype < 2; optype++) { GLuint tfactor = 0; if (inst->Opcode[optype]) { switch (inst->Opcode[optype]) { /* these are all MADD in disguise MADD is A * B + C so for GL_ADD use arg B/C and make A complement 0 for GL_SUB use arg B/C, negate C and make A complement 0 for GL_MOV use arg C for GL_MUL use arg A for GL_MAD all good */ case GL_SUB_ATI: /* negate C */ SET_INST(opnum, optype) |= R200_TXC_NEG_ARG_C; /* fallthrough */ case GL_ADD_ATI: r200SetFragShaderArg(afs_cmd, opnum, optype, inst->SrcReg[optype][0], 1, &tfactor); r200SetFragShaderArg(afs_cmd, opnum, optype, inst->SrcReg[optype][1], 2, &tfactor); /* A = complement 0 */ SET_INST(opnum, optype) |= R200_TXC_COMP_ARG_A; SET_INST(opnum, optype) |= R200_TXC_OP_MADD; break; case GL_MOV_ATI: /* put arg0 in C */ r200SetFragShaderArg(afs_cmd, opnum, optype, inst->SrcReg[optype][0], 2, &tfactor); SET_INST(opnum, optype) |= R200_TXC_OP_MADD; break; case GL_MAD_ATI: r200SetFragShaderArg(afs_cmd, opnum, optype, inst->SrcReg[optype][2], 2, &tfactor); /* fallthrough */ case GL_MUL_ATI: r200SetFragShaderArg(afs_cmd, opnum, optype, inst->SrcReg[optype][0], 0, &tfactor); r200SetFragShaderArg(afs_cmd, opnum, optype, inst->SrcReg[optype][1], 1, &tfactor); SET_INST(opnum, optype) |= R200_TXC_OP_MADD; break; case GL_LERP_ATI: /* arg order is not native chip order, swap A and C */ r200SetFragShaderArg(afs_cmd, opnum, optype, inst->SrcReg[optype][0], 2, &tfactor); r200SetFragShaderArg(afs_cmd, opnum, optype, inst->SrcReg[optype][1], 1, &tfactor); r200SetFragShaderArg(afs_cmd, opnum, optype, inst->SrcReg[optype][2], 0, &tfactor); SET_INST(opnum, optype) |= R200_TXC_OP_LERP; break; case GL_CND_ATI: r200SetFragShaderArg(afs_cmd, opnum, optype, inst->SrcReg[optype][0], 0, &tfactor); r200SetFragShaderArg(afs_cmd, opnum, optype, inst->SrcReg[optype][1], 1, &tfactor); r200SetFragShaderArg(afs_cmd, opnum, optype, inst->SrcReg[optype][2], 2, &tfactor); SET_INST(opnum, optype) |= R200_TXC_OP_CONDITIONAL; break; case GL_CND0_ATI: r200SetFragShaderArg(afs_cmd, opnum, optype, inst->SrcReg[optype][0], 0, &tfactor); r200SetFragShaderArg(afs_cmd, opnum, optype, inst->SrcReg[optype][1], 1, &tfactor); r200SetFragShaderArg(afs_cmd, opnum, optype, inst->SrcReg[optype][2], 2, &tfactor); SET_INST(opnum, optype) |= R200_TXC_OP_CND0; break; /* cannot specify dot ops as alpha ops directly */ case GL_DOT2_ADD_ATI: if (optype) SET_INST_2(opnum, 1) |= R200_TXA_DOT_ALPHA; else { r200SetFragShaderArg(afs_cmd, opnum, 0, inst->SrcReg[0][0], 0, &tfactor); r200SetFragShaderArg(afs_cmd, opnum, 0, inst->SrcReg[0][1], 1, &tfactor); r200SetFragShaderArg(afs_cmd, opnum, 0, inst->SrcReg[0][2], 2, &tfactor); SET_INST(opnum, 0) |= R200_TXC_OP_DOT2_ADD; } break; case GL_DOT3_ATI: if (optype) SET_INST_2(opnum, 1) |= R200_TXA_DOT_ALPHA; else { r200SetFragShaderArg(afs_cmd, opnum, 0, inst->SrcReg[0][0], 0, &tfactor); r200SetFragShaderArg(afs_cmd, opnum, 0, inst->SrcReg[0][1], 1, &tfactor); SET_INST(opnum, 0) |= R200_TXC_OP_DOT3; } break; case GL_DOT4_ATI: /* experimental verification: for dot4 setup of alpha args is needed (dstmod is ignored, though, so dot2/dot3 should be safe) the hardware apparently does R1*R2 + G1*G2 + B1*B2 + A3*A4 but the API doesn't allow it */ if (optype) SET_INST_2(opnum, 1) |= R200_TXA_DOT_ALPHA; else { r200SetFragShaderArg(afs_cmd, opnum, 0, inst->SrcReg[0][0], 0, &tfactor); r200SetFragShaderArg(afs_cmd, opnum, 0, inst->SrcReg[0][1], 1, &tfactor); r200SetFragShaderArg(afs_cmd, opnum, 1, inst->SrcReg[0][0], 0, &tfactor); r200SetFragShaderArg(afs_cmd, opnum, 1, inst->SrcReg[0][1], 1, &tfactor); SET_INST(opnum, optype) |= R200_TXC_OP_DOT4; } break; } } /* destination */ if (inst->DstReg[optype].Index) { GLuint dstreg = inst->DstReg[optype].Index - GL_REG_0_ATI; GLuint dstmask = inst->DstReg[optype].dstMask; GLuint sat = inst->DstReg[optype].dstMod & GL_SATURATE_BIT_ATI; GLuint dstmod = inst->DstReg[optype].dstMod; dstmod &= ~GL_SATURATE_BIT_ATI; SET_INST_2(opnum, optype) |= (dstreg + 1) << R200_TXC_OUTPUT_REG_SHIFT; SET_INST_2(opnum, optype) |= dstmask_table[dstmask]; /* fglrx does clamp the last instructions to 0_1 it seems */ /* this won't necessarily catch the last instruction which writes to reg0 */ if (sat || (pc == (shader->numArithInstr[pass] - 1) && ((pass == 1) || (shader->NumPasses == 1)))) SET_INST_2(opnum, optype) |= R200_TXC_CLAMP_0_1; else /*should we clamp or not? spec is vague, I would suppose yes but fglrx doesn't */ SET_INST_2(opnum, optype) |= R200_TXC_CLAMP_8_8; /* SET_INST_2(opnum, optype) |= R200_TXC_CLAMP_WRAP;*/ switch(dstmod) { case GL_2X_BIT_ATI: SET_INST_2(opnum, optype) |= R200_TXC_SCALE_2X; break; case GL_4X_BIT_ATI: SET_INST_2(opnum, optype) |= R200_TXC_SCALE_4X; break; case GL_8X_BIT_ATI: SET_INST_2(opnum, optype) |= R200_TXC_SCALE_8X; break; case GL_HALF_BIT_ATI: SET_INST_2(opnum, optype) |= R200_TXC_SCALE_INV2; break; case GL_QUARTER_BIT_ATI: SET_INST_2(opnum, optype) |= R200_TXC_SCALE_INV4; break; case GL_EIGHTH_BIT_ATI: SET_INST_2(opnum, optype) |= R200_TXC_SCALE_INV8; break; default: break; } } } /* fprintf(stderr, "pass %d nr %d inst 0x%.8x 0x%.8x 0x%.8x 0x%.8x\n", pass, opnum, SET_INST(opnum, 0), SET_INST_2(opnum, 0), SET_INST(opnum, 1), SET_INST_2(opnum, 1));*/ opnum++; } afs_cmd = (GLuint *) rmesa->hw.afs[1].cmd; } rmesa->afs_loaded = ctx->ATIFragmentShader.Current; }
/* TCL render. */ static GLboolean r200_run_tcl_render( GLcontext *ctx, struct tnl_pipeline_stage *stage ) { r200ContextPtr rmesa = R200_CONTEXT(ctx); TNLcontext *tnl = TNL_CONTEXT(ctx); struct vertex_buffer *VB = &tnl->vb; GLuint i; GLubyte *vimap_rev; /* use hw fixed order for simplicity, pos 0, weight 1, normal 2, fog 3, color0 - color3 4-7, texcoord0 - texcoord5 8-13, pos 1 14. Must not use more than 12 of those at the same time. */ GLubyte map_rev_fixed[15] = {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}; /* TODO: separate this from the swtnl pipeline */ if (rmesa->TclFallback) return GL_TRUE; /* fallback to software t&l */ if (R200_DEBUG & DEBUG_PRIMS) fprintf(stderr, "%s\n", __FUNCTION__); if (VB->Count == 0) return GL_FALSE; /* Validate state: */ if (rmesa->NewGLState) r200ValidateState( ctx ); if (!ctx->VertexProgram._Enabled) { /* NOTE: inputs != tnl->render_inputs - these are the untransformed * inputs. */ map_rev_fixed[0] = VERT_ATTRIB_POS; /* technically there is no reason we always need VA_COLOR0. In theory could disable it depending on lighting, color materials, texturing... */ map_rev_fixed[4] = VERT_ATTRIB_COLOR0; if (ctx->Light.Enabled) { map_rev_fixed[2] = VERT_ATTRIB_NORMAL; } /* this also enables VA_COLOR1 when using separate specular lighting model, which is unnecessary. FIXME: OTOH, we're missing the case where a ATI_fragment_shader accesses the secondary color (if lighting is disabled). The chip seems misconfigured for that though elsewhere (tcl output, might lock up) */ if (ctx->_TriangleCaps & DD_SEPARATE_SPECULAR) { map_rev_fixed[5] = VERT_ATTRIB_COLOR1; } if ( (ctx->Fog.FogCoordinateSource == GL_FOG_COORD) && ctx->Fog.Enabled ) { map_rev_fixed[3] = VERT_ATTRIB_FOG; } for (i = 0 ; i < ctx->Const.MaxTextureUnits; i++) { if (ctx->Texture.Unit[i]._ReallyEnabled) { if (rmesa->TexGenNeedNormals[i]) { map_rev_fixed[2] = VERT_ATTRIB_NORMAL; } map_rev_fixed[8 + i] = VERT_ATTRIB_TEX0 + i; } } vimap_rev = &map_rev_fixed[0]; } else { /* vtx_tcl_output_vtxfmt_0/1 need to match configuration of "fragment part", since using some vertex interpolator later which is not in out_vtxfmt0/1 will lock up. It seems to be ok to write in vertex prog to a not enabled output however, so just don't mess with it. We only need to change compsel. */ GLuint out_compsel = 0; GLuint vp_out = rmesa->curr_vp_hw->mesa_program.Base.OutputsWritten; vimap_rev = &rmesa->curr_vp_hw->inputmap_rev[0]; assert(vp_out & (1 << VERT_RESULT_HPOS)); out_compsel = R200_OUTPUT_XYZW; if (vp_out & (1 << VERT_RESULT_COL0)) { out_compsel |= R200_OUTPUT_COLOR_0; } if (vp_out & (1 << VERT_RESULT_COL1)) { out_compsel |= R200_OUTPUT_COLOR_1; } if (vp_out & (1 << VERT_RESULT_FOGC)) { out_compsel |= R200_OUTPUT_DISCRETE_FOG; } if (vp_out & (1 << VERT_RESULT_PSIZ)) { out_compsel |= R200_OUTPUT_PT_SIZE; } for (i = VERT_RESULT_TEX0; i < VERT_RESULT_TEX6; i++) { if (vp_out & (1 << i)) { out_compsel |= R200_OUTPUT_TEX_0 << (i - VERT_RESULT_TEX0); } } if (rmesa->hw.vtx.cmd[VTX_TCL_OUTPUT_COMPSEL] != out_compsel) { R200_STATECHANGE( rmesa, vtx ); rmesa->hw.vtx.cmd[VTX_TCL_OUTPUT_COMPSEL] = out_compsel; } } /* Do the actual work: */ r200ReleaseArrays( ctx, ~0 /* stage->changed_inputs */ ); r200EmitArrays( ctx, vimap_rev ); rmesa->tcl.Elts = VB->Elts; for (i = 0 ; i < VB->PrimitiveCount ; i++) { GLuint prim = _tnl_translate_prim(&VB->Primitive[i]); GLuint start = VB->Primitive[i].start; GLuint length = VB->Primitive[i].count; if (!length) continue; if (rmesa->tcl.Elts) r200EmitEltPrimitive( ctx, start, start+length, prim ); else r200EmitPrimitive( ctx, start, start+length, prim ); } return GL_FALSE; /* finished the pipe */ }
/* Emit any changed arrays to new GART memory, re-emit a packet to * update the arrays. */ void r200EmitArrays( struct gl_context *ctx, GLubyte *vimap_rev ) { r200ContextPtr rmesa = R200_CONTEXT( ctx ); struct vertex_buffer *VB = &TNL_CONTEXT( ctx )->vb; GLuint nr = 0; GLuint vfmt0 = 0, vfmt1 = 0; GLuint count = VB->Count; GLuint i, emitsize; // fprintf(stderr,"emit arrays\n"); for ( i = 0; i < 15; i++ ) { GLubyte attrib = vimap_rev[i]; if (attrib != 255) { switch (i) { case 0: emitsize = (VB->AttribPtr[attrib]->size); switch (emitsize) { case 4: vfmt0 |= R200_VTX_W0; /* fallthrough */ case 3: vfmt0 |= R200_VTX_Z0; break; case 2: break; default: assert(0); } break; case 1: assert(attrib == VERT_ATTRIB_WEIGHT); emitsize = (VB->AttribPtr[attrib]->size); vfmt0 |= emitsize << R200_VTX_WEIGHT_COUNT_SHIFT; break; case 2: assert(attrib == VERT_ATTRIB_NORMAL); emitsize = 3; vfmt0 |= R200_VTX_N0; break; case 3: /* special handling to fix up fog. Will get us into trouble with vbos...*/ assert(attrib == VERT_ATTRIB_FOG); if (!rmesa->radeon.tcl.aos[i].bo) { if (ctx->VertexProgram._Enabled) rcommon_emit_vector( ctx, &(rmesa->radeon.tcl.aos[nr]), (char *)VB->AttribPtr[attrib]->data, 1, VB->AttribPtr[attrib]->stride, count); else rcommon_emit_vecfog( ctx, &(rmesa->radeon.tcl.aos[nr]), (char *)VB->AttribPtr[attrib]->data, VB->AttribPtr[attrib]->stride, count); } vfmt0 |= R200_VTX_DISCRETE_FOG; goto after_emit; break; case 4: case 5: case 6: case 7: if (VB->AttribPtr[attrib]->size == 4 && (VB->AttribPtr[attrib]->stride != 0 || VB->AttribPtr[attrib]->data[0][3] != 1.0)) emitsize = 4; else emitsize = 3; if (emitsize == 4) vfmt0 |= R200_VTX_FP_RGBA << (R200_VTX_COLOR_0_SHIFT + (i - 4) * 2); else { vfmt0 |= R200_VTX_FP_RGB << (R200_VTX_COLOR_0_SHIFT + (i - 4) * 2); } break; case 8: case 9: case 10: case 11: case 12: case 13: emitsize = VB->AttribPtr[attrib]->size; vfmt1 |= emitsize << (R200_VTX_TEX0_COMP_CNT_SHIFT + (i - 8) * 3); break; case 14: emitsize = VB->AttribPtr[attrib]->size >= 2 ? VB->AttribPtr[attrib]->size : 2; switch (emitsize) { case 2: vfmt0 |= R200_VTX_XY1; /* fallthrough */ case 3: vfmt0 |= R200_VTX_Z1; /* fallthrough */ case 4: vfmt0 |= R200_VTX_W1; /* fallthrough */ } break; default: assert(0); emitsize = 0; } if (!rmesa->radeon.tcl.aos[nr].bo) { rcommon_emit_vector( ctx, &(rmesa->radeon.tcl.aos[nr]), (char *)VB->AttribPtr[attrib]->data, emitsize, VB->AttribPtr[attrib]->stride, count ); } after_emit: assert(nr < 12); nr++; } } if (vfmt0 != rmesa->hw.vtx.cmd[VTX_VTXFMT_0] || vfmt1 != rmesa->hw.vtx.cmd[VTX_VTXFMT_1]) { R200_STATECHANGE( rmesa, vtx ); rmesa->hw.vtx.cmd[VTX_VTXFMT_0] = vfmt0; rmesa->hw.vtx.cmd[VTX_VTXFMT_1] = vfmt1; } rmesa->radeon.tcl.aos_count = nr; }
/* Emit any changed arrays to new GART memory, re-emit a packet to * update the arrays. */ void r200EmitArrays( GLcontext *ctx, GLuint inputs ) { r200ContextPtr rmesa = R200_CONTEXT( ctx ); struct vertex_buffer *VB = &TNL_CONTEXT( ctx )->vb; struct r200_dma_region **component = rmesa->tcl.aos_components; GLuint nr = 0; GLuint vfmt0 = 0, vfmt1 = 0; GLuint count = VB->Count; GLuint i; if (1) { if (!rmesa->tcl.obj.buf) emit_vector( ctx, &rmesa->tcl.obj, (char *)VB->ObjPtr->data, VB->ObjPtr->size, VB->ObjPtr->stride, count); switch( VB->ObjPtr->size ) { case 4: vfmt0 |= R200_VTX_W0; case 3: vfmt0 |= R200_VTX_Z0; case 2: default: break; } component[nr++] = &rmesa->tcl.obj; } if (inputs & VERT_BIT_NORMAL) { if (!rmesa->tcl.norm.buf) emit_vector( ctx, &(rmesa->tcl.norm), (char *)VB->NormalPtr->data, 3, VB->NormalPtr->stride, count); vfmt0 |= R200_VTX_N0; component[nr++] = &rmesa->tcl.norm; } if (inputs & VERT_BIT_FOG) { if (!rmesa->tcl.fog.buf) emit_vecfog( ctx, &(rmesa->tcl.fog), (char *)VB->FogCoordPtr->data, VB->FogCoordPtr->stride, count); vfmt0 |= R200_VTX_DISCRETE_FOG; component[nr++] = &rmesa->tcl.fog; } if (inputs & VERT_BIT_COLOR0) { int emitsize; if (VB->ColorPtr[0]->size == 4 && (VB->ColorPtr[0]->stride != 0 || VB->ColorPtr[0]->data[0][3] != 1.0)) { vfmt0 |= R200_VTX_FP_RGBA << R200_VTX_COLOR_0_SHIFT; emitsize = 4; } else { vfmt0 |= R200_VTX_FP_RGB << R200_VTX_COLOR_0_SHIFT; emitsize = 3; } if (!rmesa->tcl.rgba.buf) emit_vector( ctx, &(rmesa->tcl.rgba), (char *)VB->ColorPtr[0]->data, emitsize, VB->ColorPtr[0]->stride, count); component[nr++] = &rmesa->tcl.rgba; } if (inputs & VERT_BIT_COLOR1) { if (!rmesa->tcl.spec.buf) { emit_vector( ctx, &rmesa->tcl.spec, (char *)VB->SecondaryColorPtr[0]->data, 3, VB->SecondaryColorPtr[0]->stride, count); } /* How does this work? */ vfmt0 |= R200_VTX_FP_RGB << R200_VTX_COLOR_1_SHIFT; component[nr++] = &rmesa->tcl.spec; } for ( i = 0 ; i < ctx->Const.MaxTextureUnits ; i++ ) { if (inputs & (VERT_BIT_TEX0 << i)) { if (!rmesa->tcl.tex[i].buf) emit_vector( ctx, &(rmesa->tcl.tex[i]), (char *)VB->TexCoordPtr[i]->data, VB->TexCoordPtr[i]->size, VB->TexCoordPtr[i]->stride, count ); vfmt1 |= VB->TexCoordPtr[i]->size << (i * 3); component[nr++] = &rmesa->tcl.tex[i]; } } if (vfmt0 != rmesa->hw.vtx.cmd[VTX_VTXFMT_0] || vfmt1 != rmesa->hw.vtx.cmd[VTX_VTXFMT_1]) { R200_STATECHANGE( rmesa, vtx ); rmesa->hw.vtx.cmd[VTX_VTXFMT_0] = vfmt0; rmesa->hw.vtx.cmd[VTX_VTXFMT_1] = vfmt1; } rmesa->tcl.nr_aos_components = nr; rmesa->tcl.vertex_format = vfmt0; }
void r200UpdateTextureState( GLcontext *ctx ) { r200ContextPtr rmesa = R200_CONTEXT(ctx); GLboolean ok; GLuint dbg; ok = (r200UpdateTextureUnit( ctx, 0 ) && r200UpdateTextureUnit( ctx, 1 )); FALLBACK( rmesa, R200_FALLBACK_TEXTURE, !ok ); if (rmesa->TclFallback) r200ChooseVertexState( ctx ); /* * T0 hang workaround ------------- */ #if 1 if ((rmesa->hw.ctx.cmd[CTX_PP_CNTL] & R200_TEX_ENABLE_MASK) == R200_TEX_0_ENABLE && (rmesa->hw.tex[0].cmd[TEX_PP_TXFILTER] & R200_MIN_FILTER_MASK) > R200_MIN_FILTER_LINEAR) { R200_STATECHANGE(rmesa, ctx); R200_STATECHANGE(rmesa, tex[1]); rmesa->hw.ctx.cmd[CTX_PP_CNTL] |= R200_TEX_1_ENABLE; rmesa->hw.tex[1].cmd[TEX_PP_TXFORMAT] &= ~TEXOBJ_TXFORMAT_MASK; rmesa->hw.tex[1].cmd[TEX_PP_TXFORMAT] |= 0x08000000; } else { if ((rmesa->hw.ctx.cmd[CTX_PP_CNTL] & R200_TEX_1_ENABLE) && (rmesa->hw.tex[1].cmd[TEX_PP_TXFORMAT] & 0x08000000)) { R200_STATECHANGE(rmesa, tex[1]); rmesa->hw.tex[1].cmd[TEX_PP_TXFORMAT] &= ~0x08000000; } } #endif #if 1 /* * Texture cache LRU hang workaround ------------- */ dbg = 0x0; if (((rmesa->hw.ctx.cmd[CTX_PP_CNTL] & R200_TEX_0_ENABLE) && ((((rmesa->hw.tex[0].cmd[TEX_PP_TXFILTER] & R200_MIN_FILTER_MASK)) & 0x04) == 0))) { dbg |= 0x02; } if (((rmesa->hw.ctx.cmd[CTX_PP_CNTL] & R200_TEX_1_ENABLE) && ((((rmesa->hw.tex[1].cmd[TEX_PP_TXFILTER] & R200_MIN_FILTER_MASK)) & 0x04) == 0))) { dbg |= 0x04; } if (dbg != rmesa->hw.tam.cmd[TAM_DEBUG3]) { R200_STATECHANGE( rmesa, tam ); rmesa->hw.tam.cmd[TAM_DEBUG3] = dbg; if (0) printf("TEXCACHE LRU HANG WORKAROUND %x\n", dbg); } #endif }
static void r200UpdateTextureEnv( GLcontext *ctx, int unit ) { r200ContextPtr rmesa = R200_CONTEXT(ctx); const struct gl_texture_unit *texUnit = &ctx->Texture.Unit[unit]; GLuint color_combine, alpha_combine; GLuint color_scale = rmesa->hw.pix[unit].cmd[PIX_PP_TXCBLEND2]; GLuint alpha_scale = rmesa->hw.pix[unit].cmd[PIX_PP_TXABLEND2]; if ( R200_DEBUG & DEBUG_TEXTURE ) { fprintf( stderr, "%s( %p, %d )\n", __FUNCTION__, ctx, unit ); } /* Set the texture environment state. Isn't this nice and clean? * The R200 will automagically set the texture alpha to 0xff when * the texture format does not include an alpha component. This * reduces the amount of special-casing we have to do, alpha-only * textures being a notable exception. */ if ( !texUnit->_ReallyEnabled ) { /* Don't cache these results. */ rmesa->state.texture.unit[unit].format = 0; rmesa->state.texture.unit[unit].envMode = 0; color_combine = r200_color_combine[unit][R200_DISABLE]; alpha_combine = r200_alpha_combine[unit][R200_DISABLE]; } else { const struct gl_texture_object *tObj = texUnit->_Current; const GLenum format = tObj->Image[tObj->BaseLevel]->Format; GLuint color_arg[3], alpha_arg[3]; GLuint i, numColorArgs = 0, numAlphaArgs = 0; GLuint RGBshift = texUnit->CombineScaleShiftRGB; GLuint Ashift = texUnit->CombineScaleShiftA; switch ( texUnit->EnvMode ) { case GL_REPLACE: switch ( format ) { case GL_RGBA: case GL_LUMINANCE_ALPHA: case GL_INTENSITY: color_combine = r200_color_combine[unit][R200_REPLACE]; alpha_combine = r200_alpha_combine[unit][R200_REPLACE]; break; case GL_ALPHA: color_combine = r200_color_combine[unit][R200_DISABLE]; alpha_combine = r200_alpha_combine[unit][R200_REPLACE]; break; case GL_LUMINANCE: case GL_RGB: case GL_YCBCR_MESA: color_combine = r200_color_combine[unit][R200_REPLACE]; alpha_combine = r200_alpha_combine[unit][R200_DISABLE]; break; case GL_COLOR_INDEX: default: return; } break; case GL_MODULATE: switch ( format ) { case GL_RGBA: case GL_LUMINANCE_ALPHA: case GL_INTENSITY: color_combine = r200_color_combine[unit][R200_MODULATE]; alpha_combine = r200_alpha_combine[unit][R200_MODULATE]; break; case GL_ALPHA: color_combine = r200_color_combine[unit][R200_DISABLE]; alpha_combine = r200_alpha_combine[unit][R200_MODULATE]; break; case GL_RGB: case GL_LUMINANCE: case GL_YCBCR_MESA: color_combine = r200_color_combine[unit][R200_MODULATE]; alpha_combine = r200_alpha_combine[unit][R200_DISABLE]; break; case GL_COLOR_INDEX: default: return; } break; case GL_DECAL: switch ( format ) { case GL_RGBA: case GL_RGB: case GL_YCBCR_MESA: color_combine = r200_color_combine[unit][R200_DECAL]; alpha_combine = r200_alpha_combine[unit][R200_DISABLE]; break; case GL_ALPHA: case GL_LUMINANCE: case GL_LUMINANCE_ALPHA: case GL_INTENSITY: color_combine = r200_color_combine[unit][R200_DISABLE]; alpha_combine = r200_alpha_combine[unit][R200_DISABLE]; break; case GL_COLOR_INDEX: default: return; } break; case GL_BLEND: switch ( format ) { case GL_RGBA: case GL_RGB: case GL_LUMINANCE: case GL_LUMINANCE_ALPHA: case GL_YCBCR_MESA: color_combine = r200_color_combine[unit][R200_BLEND]; alpha_combine = r200_alpha_combine[unit][R200_MODULATE]; break; case GL_ALPHA: color_combine = r200_color_combine[unit][R200_DISABLE]; alpha_combine = r200_alpha_combine[unit][R200_MODULATE]; break; case GL_INTENSITY: color_combine = r200_color_combine[unit][R200_BLEND]; alpha_combine = r200_alpha_combine[unit][R200_BLEND]; break; case GL_COLOR_INDEX: default: return; } break; case GL_ADD: switch ( format ) { case GL_RGBA: case GL_RGB: case GL_LUMINANCE: case GL_LUMINANCE_ALPHA: case GL_YCBCR_MESA: color_combine = r200_color_combine[unit][R200_ADD]; alpha_combine = r200_alpha_combine[unit][R200_MODULATE]; break; case GL_ALPHA: color_combine = r200_color_combine[unit][R200_DISABLE]; alpha_combine = r200_alpha_combine[unit][R200_MODULATE]; break; case GL_INTENSITY: color_combine = r200_color_combine[unit][R200_ADD]; alpha_combine = r200_alpha_combine[unit][R200_ADD]; break; case GL_COLOR_INDEX: default: return; } break; case GL_COMBINE: /* Don't cache these results. */ rmesa->state.texture.unit[unit].format = 0; rmesa->state.texture.unit[unit].envMode = 0; /* Step 0: * Calculate how many arguments we need to process. */ switch ( texUnit->CombineModeRGB ) { case GL_REPLACE: numColorArgs = 1; break; case GL_MODULATE: case GL_ADD: case GL_ADD_SIGNED: case GL_SUBTRACT: case GL_DOT3_RGB: case GL_DOT3_RGBA: case GL_DOT3_RGB_EXT: case GL_DOT3_RGBA_EXT: numColorArgs = 2; break; case GL_INTERPOLATE: numColorArgs = 3; break; default: return; } switch ( texUnit->CombineModeA ) { case GL_REPLACE: numAlphaArgs = 1; break; case GL_SUBTRACT: case GL_MODULATE: case GL_ADD: case GL_ADD_SIGNED: numAlphaArgs = 2; break; case GL_INTERPOLATE: numAlphaArgs = 3; break; default: return; } /* Step 1: * Extract the color and alpha combine function arguments. */ for ( i = 0 ; i < numColorArgs ; i++ ) { const GLuint op = texUnit->CombineOperandRGB[i] - GL_SRC_COLOR; assert(op >= 0); assert(op <= 3); switch ( texUnit->CombineSourceRGB[i] ) { case GL_TEXTURE: color_arg[i] = r200_register_color[op][unit]; break; case GL_CONSTANT: color_arg[i] = r200_tfactor_color[op]; break; case GL_PRIMARY_COLOR: color_arg[i] = r200_primary_color[op]; break; case GL_PREVIOUS: if (unit == 0) color_arg[i] = r200_primary_color[op]; else color_arg[i] = r200_register_color[op][0]; break; default: return; } } for ( i = 0 ; i < numAlphaArgs ; i++ ) { const GLuint op = texUnit->CombineOperandA[i] - GL_SRC_ALPHA; assert(op >= 0); assert(op <= 1); switch ( texUnit->CombineSourceA[i] ) { case GL_TEXTURE: alpha_arg[i] = r200_register_alpha[op][unit]; break; case GL_CONSTANT: alpha_arg[i] = r200_tfactor_alpha[op]; break; case GL_PRIMARY_COLOR: alpha_arg[i] = r200_primary_alpha[op]; break; case GL_PREVIOUS: if (unit == 0) alpha_arg[i] = r200_primary_alpha[op]; else alpha_arg[i] = r200_register_alpha[op][0]; break; default: return; } } /* Step 2: * Build up the color and alpha combine functions. */ switch ( texUnit->CombineModeRGB ) { case GL_REPLACE: color_combine = (R200_TXC_ARG_A_ZERO | R200_TXC_ARG_B_ZERO | R200_TXC_OP_MADD); R200_COLOR_ARG( 0, C ); break; case GL_MODULATE: color_combine = (R200_TXC_ARG_C_ZERO | R200_TXC_OP_MADD); R200_COLOR_ARG( 0, A ); R200_COLOR_ARG( 1, B ); break; case GL_ADD: color_combine = (R200_TXC_ARG_B_ZERO | R200_TXC_COMP_ARG_B | R200_TXC_OP_MADD); R200_COLOR_ARG( 0, A ); R200_COLOR_ARG( 1, C ); break; case GL_SUBTRACT: color_combine = (R200_TXC_ARG_B_ZERO | R200_TXC_COMP_ARG_B | R200_TXC_NEG_ARG_C | R200_TXC_OP_MADD); R200_COLOR_ARG( 0, A ); R200_COLOR_ARG( 1, C ); break; case GL_ADD_SIGNED: color_combine = (R200_TXC_ARG_B_ZERO | R200_TXC_COMP_ARG_B | R200_TXC_BIAS_ARG_C | /* new */ R200_TXC_OP_MADD); /* was ADDSIGNED */ R200_COLOR_ARG( 0, A ); R200_COLOR_ARG( 1, C ); break; case GL_INTERPOLATE: color_combine = (R200_TXC_OP_LERP); R200_COLOR_ARG( 0, B ); R200_COLOR_ARG( 1, A ); R200_COLOR_ARG( 2, C ); break; case GL_DOT3_RGB_EXT: case GL_DOT3_RGBA_EXT: RGBshift = 0; Ashift = 0; /* FALLTHROUGH */ case GL_DOT3_RGB: case GL_DOT3_RGBA: /* DOT3 works differently on R200 than on R100. On R100, just * setting the DOT3 mode did everything for you. On R200, the * driver has to enable the biasing (the -0.5 in the combine * equation), and it has add the 4x scale factor. The hardware * only supports up to 8x in the post filter, so 2x part of it * happens on the inputs going into the combiner. */ RGBshift++; Ashift = RGBshift; color_combine = (R200_TXC_ARG_C_ZERO | R200_TXC_OP_DOT3 | R200_TXC_BIAS_ARG_A | R200_TXC_BIAS_ARG_B | R200_TXC_SCALE_ARG_A | R200_TXC_SCALE_ARG_B); R200_COLOR_ARG( 0, A ); R200_COLOR_ARG( 1, B ); break; default: return; } switch ( texUnit->CombineModeA ) { case GL_REPLACE: alpha_combine = (R200_TXA_ARG_A_ZERO | R200_TXA_ARG_B_ZERO | R200_TXA_OP_MADD); R200_ALPHA_ARG( 0, C ); break; case GL_MODULATE: alpha_combine = (R200_TXA_ARG_C_ZERO | R200_TXA_OP_MADD); R200_ALPHA_ARG( 0, A ); R200_ALPHA_ARG( 1, B ); break; case GL_ADD: alpha_combine = (R200_TXA_ARG_B_ZERO | R200_TXC_COMP_ARG_B | R200_TXA_OP_MADD); R200_ALPHA_ARG( 0, A ); R200_ALPHA_ARG( 1, C ); break; case GL_SUBTRACT: alpha_combine = (R200_TXA_ARG_B_ZERO | R200_TXC_COMP_ARG_B | R200_TXC_NEG_ARG_C | R200_TXA_OP_MADD); R200_ALPHA_ARG( 0, A ); R200_ALPHA_ARG( 1, C ); break; case GL_ADD_SIGNED: alpha_combine = (R200_TXA_ARG_B_ZERO | R200_TXC_COMP_ARG_B | R200_TXC_BIAS_ARG_C | /* new */ R200_TXA_OP_MADD); /* was ADDSIGNED */ R200_ALPHA_ARG( 0, A ); R200_ALPHA_ARG( 1, C ); break; case GL_INTERPOLATE: alpha_combine = (R200_TXA_OP_LERP); R200_ALPHA_ARG( 0, B ); R200_ALPHA_ARG( 1, A ); R200_ALPHA_ARG( 2, C ); break; default: return; } if ( texUnit->CombineModeRGB == GL_DOT3_RGB ) { alpha_scale |= R200_TXA_DOT_ALPHA; } /* Step 3: * Apply the scale factor. The EXT version of the DOT3 extension does * not support the scale factor, but the ARB version (and the version in * OpenGL 1.3) does. */ color_scale &= ~R200_TXC_SCALE_MASK; alpha_scale &= ~R200_TXA_SCALE_MASK; color_scale |= (RGBshift << R200_TXC_SCALE_SHIFT); alpha_scale |= (Ashift << R200_TXA_SCALE_SHIFT); /* All done! */ break; default: return; } } if ( rmesa->hw.pix[unit].cmd[PIX_PP_TXCBLEND] != color_combine || rmesa->hw.pix[unit].cmd[PIX_PP_TXABLEND] != alpha_combine || rmesa->hw.pix[unit].cmd[PIX_PP_TXCBLEND2] != color_scale || rmesa->hw.pix[unit].cmd[PIX_PP_TXABLEND2] != alpha_scale) { R200_STATECHANGE( rmesa, pix[unit] ); rmesa->hw.pix[unit].cmd[PIX_PP_TXCBLEND] = color_combine; rmesa->hw.pix[unit].cmd[PIX_PP_TXABLEND] = alpha_combine; rmesa->hw.pix[unit].cmd[PIX_PP_TXCBLEND2] = color_scale; rmesa->hw.pix[unit].cmd[PIX_PP_TXABLEND2] = alpha_scale; } }
static void r200TexEnv( struct gl_context *ctx, GLenum target, GLenum pname, const GLfloat *param ) { r200ContextPtr rmesa = R200_CONTEXT(ctx); GLuint unit = ctx->Texture.CurrentUnit; struct gl_texture_unit *texUnit = &ctx->Texture.Unit[unit]; radeon_print(RADEON_TEXTURE | RADEON_STATE, RADEON_VERBOSE, "%s( %s )\n", __FUNCTION__, _mesa_lookup_enum_by_nr( pname ) ); /* This is incorrect: Need to maintain this data for each of * GL_TEXTURE_{123}D, GL_TEXTURE_RECTANGLE_NV, etc, and switch * between them according to _Current->Target. */ switch ( pname ) { case GL_TEXTURE_ENV_COLOR: { GLubyte c[4]; GLuint envColor; _mesa_unclamped_float_rgba_to_ubyte(c, texUnit->EnvColor); envColor = radeonPackColor( 4, c[0], c[1], c[2], c[3] ); if ( rmesa->hw.tf.cmd[TF_TFACTOR_0 + unit] != envColor ) { R200_STATECHANGE( rmesa, tf ); rmesa->hw.tf.cmd[TF_TFACTOR_0 + unit] = envColor; } break; } case GL_TEXTURE_LOD_BIAS_EXT: { GLfloat bias, min; GLuint b; const int fixed_one = R200_LOD_BIAS_FIXED_ONE; /* The R200's LOD bias is a signed 2's complement value with a * range of -16.0 <= bias < 16.0. * * NOTE: Add a small bias to the bias for conform mipsel.c test. */ bias = *param; min = driQueryOptionb (&rmesa->radeon.optionCache, "no_neg_lod_bias") ? 0.0 : -16.0; bias = CLAMP( bias, min, 16.0 ); b = ((int)(bias * fixed_one) + R200_LOD_BIAS_CORRECTION) & R200_LOD_BIAS_MASK; if ( (rmesa->hw.tex[unit].cmd[TEX_PP_TXFORMAT_X] & R200_LOD_BIAS_MASK) != b ) { R200_STATECHANGE( rmesa, tex[unit] ); rmesa->hw.tex[unit].cmd[TEX_PP_TXFORMAT_X] &= ~R200_LOD_BIAS_MASK; rmesa->hw.tex[unit].cmd[TEX_PP_TXFORMAT_X] |= b; } break; } case GL_COORD_REPLACE_ARB: if (ctx->Point.PointSprite) { R200_STATECHANGE( rmesa, spr ); if ((GLenum)param[0]) { rmesa->hw.spr.cmd[SPR_POINT_SPRITE_CNTL] |= R200_PS_GEN_TEX_0 << unit; } else { rmesa->hw.spr.cmd[SPR_POINT_SPRITE_CNTL] &= ~(R200_PS_GEN_TEX_0 << unit); } } break; default: return; } }
void r200PageFlip( const __DRIdrawablePrivate *dPriv ) { r200ContextPtr rmesa; GLint ret; GLboolean missed_target; assert(dPriv); assert(dPriv->driContextPriv); assert(dPriv->driContextPriv->driverPrivate); rmesa = (r200ContextPtr) dPriv->driContextPriv->driverPrivate; if ( R200_DEBUG & DEBUG_IOCTL ) { fprintf(stderr, "%s: pfCurrentPage: %d\n", __FUNCTION__, rmesa->sarea->pfCurrentPage); } R200_FIREVERTICES( rmesa ); LOCK_HARDWARE( rmesa ); if (!dPriv->numClipRects) { UNLOCK_HARDWARE( rmesa ); usleep( 10000 ); /* throttle invisible client 10ms */ return; } /* Need to do this for the perf box placement: */ { drm_clip_rect_t *box = dPriv->pClipRects; drm_clip_rect_t *b = rmesa->sarea->boxes; b[0] = box[0]; rmesa->sarea->nbox = 1; } /* Throttle the frame rate -- only allow a few pending swap buffers * request at a time. */ r200WaitForFrameCompletion( rmesa ); UNLOCK_HARDWARE( rmesa ); driWaitForVBlank( dPriv, & rmesa->vbl_seq, rmesa->vblank_flags, & missed_target ); if ( missed_target ) { rmesa->swap_missed_count++; (void) (*rmesa->get_ust)( & rmesa->swap_missed_ust ); } LOCK_HARDWARE( rmesa ); ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_FLIP ); UNLOCK_HARDWARE( rmesa ); if ( ret ) { fprintf( stderr, "DRM_RADEON_FLIP: return = %d\n", ret ); exit( 1 ); } rmesa->swap_count++; (void) (*rmesa->get_ust)( & rmesa->swap_ust ); if ( rmesa->sarea->pfCurrentPage == 1 ) { rmesa->state.color.drawOffset = rmesa->r200Screen->frontOffset; rmesa->state.color.drawPitch = rmesa->r200Screen->frontPitch; } else { rmesa->state.color.drawOffset = rmesa->r200Screen->backOffset; rmesa->state.color.drawPitch = rmesa->r200Screen->backPitch; } R200_STATECHANGE( rmesa, ctx ); rmesa->hw.ctx.cmd[CTX_RB3D_COLOROFFSET] = rmesa->state.color.drawOffset + rmesa->r200Screen->fbLocation; rmesa->hw.ctx.cmd[CTX_RB3D_COLORPITCH] = rmesa->state.color.drawPitch; }