static void cube_emit_cs(struct gl_context *ctx, struct radeon_state_atom *atom) { r100ContextPtr r100 = R100_CONTEXT(ctx); BATCH_LOCALS(&r100->radeon); uint32_t dwords = atom->check(ctx, atom); int i = atom->idx, j; radeonTexObj *t = r100->state.texture.unit[i].texobj; radeon_mipmap_level *lvl; uint32_t base_reg; if (!(ctx->Texture.Unit[i]._ReallyEnabled & TEXTURE_CUBE_BIT)) return; if (!t) return; if (!t->mt) return; switch(i) { case 1: base_reg = RADEON_PP_CUBIC_OFFSET_T1_0; break; case 2: base_reg = RADEON_PP_CUBIC_OFFSET_T2_0; break; default: case 0: base_reg = RADEON_PP_CUBIC_OFFSET_T0_0; break; }; BEGIN_BATCH_NO_AUTOSTATE(dwords); OUT_BATCH_TABLE(atom->cmd, 2); lvl = &t->mt->levels[0]; for (j = 0; j < 5; j++) { OUT_BATCH(CP_PACKET0(base_reg + (4 * j), 0)); OUT_BATCH_RELOC(lvl->faces[j].offset, t->mt->bo, lvl->faces[j].offset, RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0); } END_BATCH(); }
void radeonTclPrimitive( struct gl_context *ctx, GLenum prim, int hw_prim ) { r100ContextPtr rmesa = R100_CONTEXT(ctx); GLuint se_cntl; GLuint newprim = hw_prim | RADEON_CP_VC_CNTL_TCL_ENABLE; radeon_prepare_render(&rmesa->radeon); if (rmesa->radeon.NewGLState) radeonValidateState( ctx ); if (newprim != rmesa->tcl.hw_primitive || !discrete_prim[hw_prim&0xf]) { RADEON_NEWPRIM( rmesa ); rmesa->tcl.hw_primitive = newprim; } se_cntl = rmesa->hw.set.cmd[SET_SE_CNTL]; se_cntl &= ~RADEON_FLAT_SHADE_VTX_LAST; if (prim == GL_POLYGON && (ctx->_TriangleCaps & DD_FLATSHADE)) se_cntl |= RADEON_FLAT_SHADE_VTX_0; else se_cntl |= RADEON_FLAT_SHADE_VTX_LAST; if (se_cntl != rmesa->hw.set.cmd[SET_SE_CNTL]) { RADEON_STATECHANGE( rmesa, set ); rmesa->hw.set.cmd[SET_SE_CNTL] = se_cntl; } }
void r100_swtcl_flush(struct gl_context *ctx, uint32_t current_offset) { r100ContextPtr rmesa = R100_CONTEXT(ctx); radeonEmitState(&rmesa->radeon); radeonEmitVertexAOS( rmesa, rmesa->radeon.swtcl.vertex_size, rmesa->radeon.swtcl.bo, current_offset); radeonEmitVbufPrim( rmesa, rmesa->swtcl.vertex_format, rmesa->radeon.swtcl.hw_primitive, rmesa->radeon.swtcl.numverts); if ( rmesa->radeon.swtcl.emit_prediction < rmesa->radeon.cmdbuf.cs->cdw ) WARN_ONCE("Rendering was %d commands larger than predicted size." " We might overflow command buffer.\n", rmesa->radeon.cmdbuf.cs->cdw - rmesa->radeon.swtcl.emit_prediction ); rmesa->radeon.swtcl.emit_prediction = 0; }
static void radeonClear( struct gl_context *ctx, GLbitfield mask ) { r100ContextPtr rmesa = R100_CONTEXT(ctx); GLuint hwmask, swmask; GLuint hwbits = BUFFER_BIT_FRONT_LEFT | BUFFER_BIT_BACK_LEFT | BUFFER_BIT_DEPTH | BUFFER_BIT_STENCIL | BUFFER_BIT_COLOR0; if (mask & (BUFFER_BIT_FRONT_LEFT | BUFFER_BIT_FRONT_RIGHT)) { rmesa->radeon.front_buffer_dirty = GL_TRUE; } if ( RADEON_DEBUG & RADEON_IOCTL ) { fprintf( stderr, "radeonClear\n"); } radeon_firevertices(&rmesa->radeon); hwmask = mask & hwbits; swmask = mask & ~hwbits; if ( swmask ) { if (RADEON_DEBUG & RADEON_FALLBACKS) fprintf(stderr, "%s: swrast clear, mask: %x\n", __FUNCTION__, swmask); _swrast_Clear( ctx, swmask ); } if ( !hwmask ) return; radeonUserClear(ctx, hwmask); }
static void transition_to_swtnl( struct gl_context *ctx ) { r100ContextPtr rmesa = R100_CONTEXT(ctx); TNLcontext *tnl = TNL_CONTEXT(ctx); GLuint se_cntl; RADEON_NEWPRIM( rmesa ); rmesa->swtcl.vertex_format = 0; radeonChooseVertexState( ctx ); radeonChooseRenderState( ctx ); _mesa_validate_all_lighting_tables( ctx ); tnl->Driver.NotifyMaterialChange = _mesa_validate_all_lighting_tables; radeonReleaseArrays( ctx, ~0 ); se_cntl = rmesa->hw.set.cmd[SET_SE_CNTL]; se_cntl |= RADEON_FLAT_SHADE_VTX_LAST; if (se_cntl != rmesa->hw.set.cmd[SET_SE_CNTL]) { RADEON_STATECHANGE( rmesa, set ); rmesa->hw.set.cmd[SET_SE_CNTL] = se_cntl; } }
static void cube_emit(GLcontext *ctx, struct radeon_state_atom *atom) { r100ContextPtr r100 = R100_CONTEXT(ctx); BATCH_LOCALS(&r100->radeon); uint32_t dwords = atom->check(ctx, atom); int i = atom->idx, j; radeonTexObj *t = r100->state.texture.unit[i].texobj; radeon_mipmap_level *lvl; if (!(ctx->Texture.Unit[i]._ReallyEnabled & TEXTURE_CUBE_BIT)) return; if (!t) return; if (!t->mt) return; BEGIN_BATCH_NO_AUTOSTATE(dwords); OUT_BATCH_TABLE(atom->cmd, 3); lvl = &t->mt->levels[0]; for (j = 0; j < 5; j++) { OUT_BATCH_RELOC(lvl->faces[j].offset, t->mt->bo, lvl->faces[j].offset, RADEON_GEM_DOMAIN_VRAM, 0, 0); } END_BATCH(); }
static void tex_emit(GLcontext *ctx, struct radeon_state_atom *atom) { r100ContextPtr r100 = R100_CONTEXT(ctx); BATCH_LOCALS(&r100->radeon); uint32_t dwords = atom->cmd_size; int i = atom->idx; radeonTexObj *t = r100->state.texture.unit[i].texobj; radeon_mipmap_level *lvl; if (t && t->mt && !t->image_override) dwords += 2; BEGIN_BATCH_NO_AUTOSTATE(dwords); OUT_BATCH_TABLE(atom->cmd, 3); if (t && t->mt && !t->image_override) { if ((ctx->Texture.Unit[i]._ReallyEnabled & TEXTURE_CUBE_BIT)) { lvl = &t->mt->levels[0]; OUT_BATCH_RELOC(lvl->faces[5].offset, t->mt->bo, lvl->faces[5].offset, RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0); } else { OUT_BATCH_RELOC(t->tile_bits, t->mt->bo, 0, RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0); } } else if (!t) { /* workaround for old CS mechanism */ OUT_BATCH(r100->radeon.radeonScreen->texOffset[RADEON_LOCAL_TEX_HEAP]); // OUT_BATCH(r100->radeon.radeonScreen); } else { OUT_BATCH(t->override_offset); } OUT_BATCH_TABLE((atom->cmd+4), 5); END_BATCH(); }
void radeonChooseRenderState( struct gl_context *ctx ) { TNLcontext *tnl = TNL_CONTEXT(ctx); r100ContextPtr rmesa = R100_CONTEXT(ctx); GLuint index = 0; GLuint flags = ctx->_TriangleCaps; if (!rmesa->radeon.TclFallback || rmesa->radeon.Fallback) return; if (flags & DD_TRI_LIGHT_TWOSIDE) index |= RADEON_TWOSIDE_BIT; if (flags & DD_TRI_UNFILLED) index |= RADEON_UNFILLED_BIT; if (index != rmesa->radeon.swtcl.RenderIndex) { tnl->Driver.Render.Points = rast_tab[index].points; tnl->Driver.Render.Line = rast_tab[index].line; tnl->Driver.Render.ClippedLine = rast_tab[index].line; tnl->Driver.Render.Triangle = rast_tab[index].triangle; tnl->Driver.Render.Quad = rast_tab[index].quad; if (index == 0) { tnl->Driver.Render.PrimTabVerts = radeon_render_tab_verts; tnl->Driver.Render.PrimTabElts = radeon_render_tab_elts; tnl->Driver.Render.ClippedPolygon = radeon_fast_clipped_poly; } else { tnl->Driver.Render.PrimTabVerts = _tnl_render_tab_verts; tnl->Driver.Render.PrimTabElts = _tnl_render_tab_elts; tnl->Driver.Render.ClippedPolygon = _tnl_RenderClippedPolygon; } rmesa->radeon.swtcl.RenderIndex = index; } }
static void radeonRenderPrimitive( struct gl_context *ctx, GLenum prim ) { r100ContextPtr rmesa = R100_CONTEXT(ctx); rmesa->radeon.swtcl.render_primitive = prim; if (prim < GL_TRIANGLES || !(ctx->_TriangleCaps & DD_TRI_UNFILLED)) radeonRasterPrimitive( ctx, reduced_hw_prim[prim] ); }
static void transition_to_hwtnl( struct gl_context *ctx ) { r100ContextPtr rmesa = R100_CONTEXT(ctx); TNLcontext *tnl = TNL_CONTEXT(ctx); GLuint se_coord_fmt = rmesa->hw.set.cmd[SET_SE_COORDFMT]; se_coord_fmt &= ~(RADEON_VTX_XY_PRE_MULT_1_OVER_W0 | RADEON_VTX_Z_PRE_MULT_1_OVER_W0 | RADEON_VTX_W0_IS_NOT_1_OVER_W0); se_coord_fmt |= RADEON_VTX_W0_IS_NOT_1_OVER_W0; if ( se_coord_fmt != rmesa->hw.set.cmd[SET_SE_COORDFMT] ) { RADEON_STATECHANGE( rmesa, set ); rmesa->hw.set.cmd[SET_SE_COORDFMT] = se_coord_fmt; _tnl_need_projected_coords( ctx, GL_FALSE ); } radeonUpdateMaterial( ctx ); tnl->Driver.NotifyMaterialChange = radeonUpdateMaterial; if ( rmesa->radeon.dma.flush ) rmesa->radeon.dma.flush( rmesa->radeon.glCtx ); rmesa->radeon.dma.flush = NULL; rmesa->swtcl.vertex_format = 0; // if (rmesa->swtcl.indexed_verts.buf) // radeonReleaseDmaRegion( rmesa, &rmesa->swtcl.indexed_verts, // __FUNCTION__ ); if (RADEON_DEBUG & RADEON_FALLBACKS) fprintf(stderr, "Radeon end tcl fallback\n"); }
static void radeonDeleteTexture( GLcontext *ctx, struct gl_texture_object *texObj ) { r100ContextPtr rmesa = R100_CONTEXT(ctx); radeonTexObj* t = radeon_tex_obj(texObj); int i; radeon_print(RADEON_TEXTURE, RADEON_NORMAL, "%s( %p (target = %s) )\n", __FUNCTION__, (void *)texObj, _mesa_lookup_enum_by_nr( texObj->Target ) ); if ( rmesa ) { radeon_firevertices(&rmesa->radeon); for ( i = 0 ; i < rmesa->radeon.glCtx->Const.MaxTextureUnits ; i++ ) { if ( t == rmesa->state.texture.unit[i].texobj ) { rmesa->state.texture.unit[i].texobj = NULL; rmesa->hw.tex[i].dirty = GL_FALSE; rmesa->hw.cube[i].dirty = GL_FALSE; } } } radeon_miptree_unreference(&t->mt); /* Free mipmap images and the texture object itself */ _mesa_delete_texture_object(ctx, texObj); }
void radeonInitSwtcl( struct gl_context *ctx ) { TNLcontext *tnl = TNL_CONTEXT(ctx); r100ContextPtr rmesa = R100_CONTEXT(ctx); static int firsttime = 1; if (firsttime) { init_rast_tab(); firsttime = 0; } rmesa->radeon.swtcl.emit_prediction = 0; tnl->Driver.Render.Start = radeonRenderStart; tnl->Driver.Render.Finish = radeonRenderFinish; tnl->Driver.Render.PrimitiveNotify = radeonRenderPrimitive; tnl->Driver.Render.ResetLineStipple = radeonResetLineStipple; tnl->Driver.Render.BuildVertices = _tnl_build_vertices; tnl->Driver.Render.CopyPV = _tnl_copy_pv; tnl->Driver.Render.Interp = _tnl_interp; _tnl_init_vertices( ctx, ctx->Const.MaxArrayLockSize + 12, RADEON_MAX_TNL_VERTEX_SIZE); rmesa->radeon.swtcl.verts = (GLubyte *)tnl->clipspace.vertex_buf; rmesa->radeon.swtcl.RenderIndex = ~0; rmesa->radeon.swtcl.render_primitive = GL_TRIANGLES; rmesa->radeon.swtcl.hw_primitive = 0; }
/* Need: * - Same GEN_MODE for all active bits * - Same EyePlane/ObjPlane for all active bits when using Eye/Obj * - STRQ presumably all supported (matrix means incoming R values * can end up in STQ, this has implications for vertex support, * presumably ok if maos is used, though?) * * Basically impossible to do this on the fly - just collect some * basic info & do the checks from ValidateState(). */ static void radeonTexGen( GLcontext *ctx, GLenum coord, GLenum pname, const GLfloat *params ) { r100ContextPtr rmesa = R100_CONTEXT(ctx); GLuint unit = ctx->Texture.CurrentUnit; rmesa->recheck_texgen[unit] = GL_TRUE; }
static void radeonRasterPrimitive( struct gl_context *ctx, GLuint hwprim ) { r100ContextPtr rmesa = R100_CONTEXT(ctx); if (rmesa->radeon.swtcl.hw_primitive != hwprim) { RADEON_NEWPRIM( rmesa ); rmesa->radeon.swtcl.hw_primitive = hwprim; } }
static void radeonClear( struct gl_context *ctx, GLbitfield mask ) { r100ContextPtr rmesa = R100_CONTEXT(ctx); GLuint flags = 0; GLuint orig_mask = mask; if (mask & (BUFFER_BIT_FRONT_LEFT | BUFFER_BIT_FRONT_RIGHT)) { rmesa->radeon.front_buffer_dirty = GL_TRUE; } if ( RADEON_DEBUG & RADEON_IOCTL ) { fprintf( stderr, "radeonClear\n"); } radeon_firevertices(&rmesa->radeon); if ( mask & BUFFER_BIT_FRONT_LEFT ) { flags |= RADEON_FRONT; mask &= ~BUFFER_BIT_FRONT_LEFT; } if ( mask & BUFFER_BIT_BACK_LEFT ) { flags |= RADEON_BACK; mask &= ~BUFFER_BIT_BACK_LEFT; } if ( mask & BUFFER_BIT_DEPTH ) { flags |= RADEON_DEPTH; mask &= ~BUFFER_BIT_DEPTH; } if ( (mask & BUFFER_BIT_STENCIL) ) { flags |= RADEON_STENCIL; mask &= ~BUFFER_BIT_STENCIL; } if ( mask ) { if (RADEON_DEBUG & RADEON_FALLBACKS) fprintf(stderr, "%s: swrast clear, mask: %x\n", __FUNCTION__, mask); _swrast_Clear( ctx, mask ); } if ( !flags ) return; if (rmesa->using_hyperz) { flags |= RADEON_USE_COMP_ZBUF; /* if (rmesa->radeon.radeonScreen->chipset & RADEON_CHIPSET_TCL) flags |= RADEON_USE_HIERZ; */ if (((flags & RADEON_DEPTH) && (flags & RADEON_STENCIL) && ((rmesa->radeon.state.stencil.clear & RADEON_STENCIL_WRITE_MASK) == RADEON_STENCIL_WRITE_MASK))) { flags |= RADEON_CLEAR_FASTZ; } } radeonUserClear(ctx, orig_mask); }
static void radeonTexEnv( GLcontext *ctx, GLenum target, GLenum pname, const GLfloat *param ) { r100ContextPtr rmesa = R100_CONTEXT(ctx); GLuint unit = ctx->Texture.CurrentUnit; struct gl_texture_unit *texUnit = &ctx->Texture.Unit[unit]; if ( RADEON_DEBUG & RADEON_STATE ) { fprintf( stderr, "%s( %s )\n", __FUNCTION__, _mesa_lookup_enum_by_nr( pname ) ); } switch ( pname ) { case GL_TEXTURE_ENV_COLOR: { GLubyte c[4]; GLuint envColor; UNCLAMPED_FLOAT_TO_RGBA_CHAN( c, texUnit->EnvColor ); envColor = radeonPackColor( 4, c[0], c[1], c[2], c[3] ); if ( rmesa->hw.tex[unit].cmd[TEX_PP_TFACTOR] != envColor ) { RADEON_STATECHANGE( rmesa, tex[unit] ); rmesa->hw.tex[unit].cmd[TEX_PP_TFACTOR] = envColor; } break; } case GL_TEXTURE_LOD_BIAS_EXT: { GLfloat bias, min; GLuint b; /* The Radeon's LOD bias is a signed 2's complement value with a * range of -1.0 <= bias < 4.0. We break this into two linear * functions, one mapping [-1.0,0.0] to [-128,0] and one mapping * [0.0,4.0] to [0,127]. */ min = driQueryOptionb (&rmesa->radeon.optionCache, "no_neg_lod_bias") ? 0.0 : -1.0; bias = CLAMP( *param, min, 4.0 ); if ( bias == 0 ) { b = 0; } else if ( bias > 0 ) { b = ((GLuint)SCALED_FLOAT_TO_BYTE( bias, 4.0 )) << RADEON_LOD_BIAS_SHIFT; } else { b = ((GLuint)SCALED_FLOAT_TO_BYTE( bias, 1.0 )) << RADEON_LOD_BIAS_SHIFT; } if ( (rmesa->hw.tex[unit].cmd[TEX_PP_TXFILTER] & RADEON_LOD_BIAS_MASK) != b ) { RADEON_STATECHANGE( rmesa, tex[unit] ); rmesa->hw.tex[unit].cmd[TEX_PP_TXFILTER] &= ~RADEON_LOD_BIAS_MASK; rmesa->hw.tex[unit].cmd[TEX_PP_TXFILTER] |= (b & RADEON_LOD_BIAS_MASK); } break; } default: return; } }
static void radeonRenderStart( struct gl_context *ctx ) { r100ContextPtr rmesa = R100_CONTEXT( ctx ); radeonSetVertexFormat( ctx ); if (rmesa->radeon.dma.flush != 0 && rmesa->radeon.dma.flush != rcommon_flush_last_swtcl_prim) rmesa->radeon.dma.flush( ctx ); }
static void vec_emit(struct gl_context *ctx, struct radeon_state_atom *atom) { r100ContextPtr r100 = R100_CONTEXT(ctx); BATCH_LOCALS(&r100->radeon); uint32_t dwords = atom->check(ctx, atom); BEGIN_BATCH_NO_AUTOSTATE(dwords); OUT_VEC(atom->cmd[0], atom->cmd+1); END_BATCH(); }
static void radeonRenderPrimitive( struct gl_context *ctx, GLenum prim ) { r100ContextPtr rmesa = R100_CONTEXT(ctx); GLboolean unfilled = (ctx->Polygon.FrontMode != GL_FILL || ctx->Polygon.BackMode != GL_FILL); rmesa->radeon.swtcl.render_primitive = prim; if (prim < GL_TRIANGLES || !unfilled) radeonRasterPrimitive( ctx, reduced_hw_prim[prim] ); }
void radeonFallback( struct gl_context *ctx, GLuint bit, GLboolean mode ) { r100ContextPtr rmesa = R100_CONTEXT(ctx); TNLcontext *tnl = TNL_CONTEXT(ctx); GLuint oldfallback = rmesa->radeon.Fallback; if (mode) { rmesa->radeon.Fallback |= bit; if (oldfallback == 0) { radeon_firevertices(&rmesa->radeon); TCL_FALLBACK( ctx, RADEON_TCL_FALLBACK_RASTER, GL_TRUE ); _swsetup_Wakeup( ctx ); rmesa->radeon.swtcl.RenderIndex = ~0; if (RADEON_DEBUG & RADEON_FALLBACKS) { fprintf(stderr, "Radeon begin rasterization fallback: 0x%x %s\n", bit, getFallbackString(bit)); } } } else { rmesa->radeon.Fallback &= ~bit; if (oldfallback == bit) { _swrast_flush( ctx ); tnl->Driver.Render.Start = radeonRenderStart; tnl->Driver.Render.PrimitiveNotify = radeonRenderPrimitive; tnl->Driver.Render.Finish = radeonRenderFinish; tnl->Driver.Render.BuildVertices = _tnl_build_vertices; tnl->Driver.Render.CopyPV = _tnl_copy_pv; tnl->Driver.Render.Interp = _tnl_interp; tnl->Driver.Render.ResetLineStipple = radeonResetLineStipple; TCL_FALLBACK( ctx, RADEON_TCL_FALLBACK_RASTER, GL_FALSE ); if (rmesa->radeon.TclFallback) { /* These are already done if rmesa->radeon.TclFallback goes to * zero above. But not if it doesn't (RADEON_NO_TCL for * example?) */ _tnl_invalidate_vertex_state( ctx, ~0 ); _tnl_invalidate_vertices( ctx, ~0 ); rmesa->radeon.tnl_index_bitset = 0; radeonChooseVertexState( ctx ); radeonChooseRenderState( ctx ); } if (RADEON_DEBUG & RADEON_FALLBACKS) { fprintf(stderr, "Radeon end rasterization fallback: 0x%x %s\n", bit, getFallbackString(bit)); } } } }
void radeonFlushElts( GLcontext *ctx ) { r100ContextPtr rmesa = R100_CONTEXT(ctx); BATCH_LOCALS(&rmesa->radeon); int nr; uint32_t *cmd = (uint32_t *)(rmesa->radeon.cmdbuf.cs->packets + rmesa->tcl.elt_cmd_start); int dwords = (rmesa->radeon.cmdbuf.cs->section_ndw - rmesa->radeon.cmdbuf.cs->section_cdw); if (RADEON_DEBUG & RADEON_IOCTL) fprintf(stderr, "%s\n", __FUNCTION__); assert( rmesa->radeon.dma.flush == radeonFlushElts ); rmesa->radeon.dma.flush = NULL; nr = rmesa->tcl.elt_used; #if RADEON_OLD_PACKETS if (rmesa->radeon.radeonScreen->kernel_mm) { dwords -= 2; } #endif #if RADEON_OLD_PACKETS cmd[1] |= (dwords + 3) << 16; cmd[5] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT; #else cmd[1] |= (dwords + 2) << 16; cmd[3] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT; #endif rmesa->radeon.cmdbuf.cs->cdw += dwords; rmesa->radeon.cmdbuf.cs->section_cdw += dwords; #if RADEON_OLD_PACKETS if (rmesa->radeon.radeonScreen->kernel_mm) { radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs, rmesa->ioctl.bo, RADEON_GEM_DOMAIN_GTT, 0, 0); } #endif END_BATCH(); if (RADEON_DEBUG & RADEON_SYNC) { fprintf(stderr, "%s: Syncing\n", __FUNCTION__); radeonFinish( rmesa->radeon.glCtx ); } }
static void tex_emit_cs(struct gl_context *ctx, struct radeon_state_atom *atom) { r100ContextPtr r100 = R100_CONTEXT(ctx); BATCH_LOCALS(&r100->radeon); uint32_t dwords = atom->cmd_size; int i = atom->idx; radeonTexObj *t = r100->state.texture.unit[i].texobj; radeon_mipmap_level *lvl; int hastexture = 1; if (!t) hastexture = 0; else { if (!t->mt && !t->bo) hastexture = 0; } dwords += 1; if (hastexture) dwords += 2; else dwords -= 2; BEGIN_BATCH_NO_AUTOSTATE(dwords); OUT_BATCH(CP_PACKET0(RADEON_PP_TXFILTER_0 + (24 * i), 1)); OUT_BATCH_TABLE((atom->cmd + 1), 2); if (hastexture) { OUT_BATCH(CP_PACKET0(RADEON_PP_TXOFFSET_0 + (24 * i), 0)); if (t->mt && !t->image_override) { if ((ctx->Texture.Unit[i]._ReallyEnabled & TEXTURE_CUBE_BIT)) { lvl = &t->mt->levels[t->minLod]; OUT_BATCH_RELOC(lvl->faces[5].offset, t->mt->bo, lvl->faces[5].offset, RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0); } else { OUT_BATCH_RELOC(t->tile_bits, t->mt->bo, get_base_teximage_offset(t), RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0); } } else { if (t->bo) OUT_BATCH_RELOC(t->tile_bits, t->bo, 0, RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0); } } OUT_BATCH(CP_PACKET0(RADEON_PP_TXCBLEND_0 + (i * 24), 1)); OUT_BATCH_TABLE((atom->cmd+4), 2); OUT_BATCH(CP_PACKET0(RADEON_PP_BORDER_COLOR_0 + (i * 4), 0)); OUT_BATCH((atom->cmd[TEX_PP_BORDER_COLOR])); END_BATCH(); }
/** * Set vertex state for SW TCL. The primary purpose of this function is to * determine in advance whether or not the hardware can / should do the * projection divide or Mesa should do it. */ void radeonChooseVertexState( struct gl_context *ctx ) { r100ContextPtr rmesa = R100_CONTEXT( ctx ); TNLcontext *tnl = TNL_CONTEXT(ctx); GLuint se_coord_fmt = rmesa->hw.set.cmd[SET_SE_COORDFMT]; GLboolean unfilled = (ctx->Polygon.FrontMode != GL_FILL || ctx->Polygon.BackMode != GL_FILL); GLboolean twosided = ctx->Light.Enabled && ctx->Light.Model.TwoSide; se_coord_fmt &= ~(RADEON_VTX_XY_PRE_MULT_1_OVER_W0 | RADEON_VTX_Z_PRE_MULT_1_OVER_W0 | RADEON_VTX_W0_IS_NOT_1_OVER_W0); /* We must ensure that we don't do _tnl_need_projected_coords while in a * rasterization fallback. As this function will be called again when we * leave a rasterization fallback, we can just skip it for now. */ if (rmesa->radeon.Fallback != 0) return; /* HW perspective divide is a win, but tiny vertex formats are a * bigger one. */ if ((0 == (tnl->render_inputs_bitset & (BITFIELD64_RANGE(_TNL_ATTRIB_TEX0, _TNL_NUM_TEX) | BITFIELD64_BIT(_TNL_ATTRIB_COLOR1)))) || twosided || unfilled) { rmesa->swtcl.needproj = GL_TRUE; se_coord_fmt |= (RADEON_VTX_XY_PRE_MULT_1_OVER_W0 | RADEON_VTX_Z_PRE_MULT_1_OVER_W0); } else { rmesa->swtcl.needproj = GL_FALSE; se_coord_fmt |= (RADEON_VTX_W0_IS_NOT_1_OVER_W0); } _tnl_need_projected_coords( ctx, rmesa->swtcl.needproj ); if ( se_coord_fmt != rmesa->hw.set.cmd[SET_SE_COORDFMT] ) { RADEON_STATECHANGE( rmesa, set ); rmesa->hw.set.cmd[SET_SE_COORDFMT] = se_coord_fmt; } }
/* TODO: Try to extend existing primitive if both are identical, * discrete and there are no intervening state changes. (Somewhat * duplicates changes to DrawArrays code) */ static void radeonEmitPrim( struct gl_context *ctx, GLenum prim, GLuint hwprim, GLuint start, GLuint count) { r100ContextPtr rmesa = R100_CONTEXT( ctx ); radeonTclPrimitive( ctx, prim, hwprim ); radeonEmitAOS( rmesa, rmesa->radeon.tcl.aos_count, start ); /* Why couldn't this packet have taken an offset param? */ radeonEmitVbufPrim( rmesa, rmesa->tcl.vertex_format, rmesa->tcl.hw_primitive, count - start ); }
/** * Set vertex state for SW TCL. The primary purpose of this function is to * determine in advance whether or not the hardware can / should do the * projection divide or Mesa should do it. */ void radeonChooseVertexState( GLcontext *ctx ) { r100ContextPtr rmesa = R100_CONTEXT( ctx ); TNLcontext *tnl = TNL_CONTEXT(ctx); GLuint se_coord_fmt = rmesa->hw.set.cmd[SET_SE_COORDFMT]; se_coord_fmt &= ~(RADEON_VTX_XY_PRE_MULT_1_OVER_W0 | RADEON_VTX_Z_PRE_MULT_1_OVER_W0 | RADEON_VTX_W0_IS_NOT_1_OVER_W0); /* We must ensure that we don't do _tnl_need_projected_coords while in a * rasterization fallback. As this function will be called again when we * leave a rasterization fallback, we can just skip it for now. */ if (rmesa->radeon.Fallback != 0) return; /* HW perspective divide is a win, but tiny vertex formats are a * bigger one. */ if ((!RENDERINPUTS_TEST_RANGE( tnl->render_inputs_bitset, _TNL_FIRST_TEX, _TNL_LAST_TEX ) && !RENDERINPUTS_TEST( tnl->render_inputs_bitset, _TNL_ATTRIB_COLOR1 )) || (ctx->_TriangleCaps & (DD_TRI_LIGHT_TWOSIDE|DD_TRI_UNFILLED))) { rmesa->swtcl.needproj = GL_TRUE; se_coord_fmt |= (RADEON_VTX_XY_PRE_MULT_1_OVER_W0 | RADEON_VTX_Z_PRE_MULT_1_OVER_W0); } else { rmesa->swtcl.needproj = GL_FALSE; se_coord_fmt |= (RADEON_VTX_W0_IS_NOT_1_OVER_W0); } _tnl_need_projected_coords( ctx, rmesa->swtcl.needproj ); if ( se_coord_fmt != rmesa->hw.set.cmd[SET_SE_COORDFMT] ) { RADEON_STATECHANGE( rmesa, set ); rmesa->hw.set.cmd[SET_SE_COORDFMT] = se_coord_fmt; } }
static GLboolean radeon_run_render( struct gl_context *ctx, struct tnl_pipeline_stage *stage ) { r100ContextPtr rmesa = R100_CONTEXT(ctx); TNLcontext *tnl = TNL_CONTEXT(ctx); struct vertex_buffer *VB = &tnl->vb; tnl_render_func *tab = TAG(render_tab_verts); GLuint i; if (rmesa->radeon.swtcl.RenderIndex != 0 || !radeon_dma_validate_render( ctx, VB )) return GL_TRUE; radeon_prepare_render(&rmesa->radeon); if (rmesa->radeon.NewGLState) radeonValidateState( ctx ); tnl->Driver.Render.Start( ctx ); for (i = 0 ; i < VB->PrimitiveCount ; i++) { GLuint prim = VB->Primitive[i].mode; GLuint start = VB->Primitive[i].start; GLuint length = VB->Primitive[i].count; if (!length) continue; radeon_print(RADEON_SWRENDER, RADEON_NORMAL, "radeon_render.c: prim %s %d..%d\n", _mesa_enum_to_string(prim & PRIM_MODE_MASK), start, start+length); if (length) tab[prim & PRIM_MODE_MASK]( ctx, start, start + length, prim ); } tnl->Driver.Render.Finish( ctx ); return GL_FALSE; /* finished the pipe */ }
/** * Allocate a new texture object. * Called via ctx->Driver.NewTextureObject. * Note: we could use containment here to 'derive' the driver-specific * texture object from the core mesa gl_texture_object. Not done at this time. */ static struct gl_texture_object * radeonNewTextureObject( GLcontext *ctx, GLuint name, GLenum target ) { r100ContextPtr rmesa = R100_CONTEXT(ctx); radeonTexObj* t = CALLOC_STRUCT(radeon_tex_obj); _mesa_initialize_texture_object(&t->base, name, target); t->base.MaxAnisotropy = rmesa->radeon.initialMaxAnisotropy; t->border_fallback = GL_FALSE; t->pp_txfilter = RADEON_BORDER_MODE_OGL; t->pp_txformat = (RADEON_TXFORMAT_ENDIAN_NO_SWAP | RADEON_TXFORMAT_PERSPECTIVE_ENABLE); radeonSetTexWrap( t, t->base.WrapS, t->base.WrapT ); radeonSetTexMaxAnisotropy( t, t->base.MaxAnisotropy ); radeonSetTexFilter( t, t->base.MinFilter, t->base.MagFilter ); radeonSetTexBorderColor( t, t->base.BorderColor.f ); return &t->base; }
static int check_always_ctx( struct gl_context *ctx, struct radeon_state_atom *atom) { r100ContextPtr r100 = R100_CONTEXT(ctx); struct radeon_renderbuffer *rrb, *drb; uint32_t dwords; rrb = radeon_get_colorbuffer(&r100->radeon); if (!rrb || !rrb->bo) { return 0; } drb = radeon_get_depthbuffer(&r100->radeon); dwords = 10; if (drb) dwords += 6; if (rrb) dwords += 8; return dwords; }
void radeonChooseRenderState( struct gl_context *ctx ) { TNLcontext *tnl = TNL_CONTEXT(ctx); r100ContextPtr rmesa = R100_CONTEXT(ctx); GLuint index = 0; GLboolean unfilled = (ctx->Polygon.FrontMode != GL_FILL || ctx->Polygon.BackMode != GL_FILL); GLboolean twosided = ctx->Light.Enabled && ctx->Light.Model.TwoSide; if (!rmesa->radeon.TclFallback || rmesa->radeon.Fallback) return; if (twosided) index |= RADEON_TWOSIDE_BIT; if (unfilled) index |= RADEON_UNFILLED_BIT; if (index != rmesa->radeon.swtcl.RenderIndex) { tnl->Driver.Render.Points = rast_tab[index].points; tnl->Driver.Render.Line = rast_tab[index].line; tnl->Driver.Render.ClippedLine = rast_tab[index].line; tnl->Driver.Render.Triangle = rast_tab[index].triangle; tnl->Driver.Render.Quad = rast_tab[index].quad; if (index == 0) { tnl->Driver.Render.PrimTabVerts = radeon_render_tab_verts; tnl->Driver.Render.PrimTabElts = radeon_render_tab_elts; tnl->Driver.Render.ClippedPolygon = radeon_fast_clipped_poly; } else { tnl->Driver.Render.PrimTabVerts = _tnl_render_tab_verts; tnl->Driver.Render.PrimTabElts = _tnl_render_tab_elts; tnl->Driver.Render.ClippedPolygon = _tnl_RenderClippedPolygon; } rmesa->radeon.swtcl.RenderIndex = index; } }
void radeonTclFallback( struct gl_context *ctx, GLuint bit, GLboolean mode ) { r100ContextPtr rmesa = R100_CONTEXT(ctx); GLuint oldfallback = rmesa->radeon.TclFallback; if (mode) { rmesa->radeon.TclFallback |= bit; if (oldfallback == 0) { if (RADEON_DEBUG & RADEON_FALLBACKS) fprintf(stderr, "Radeon begin tcl fallback %s\n", getFallbackString( bit )); transition_to_swtnl( ctx ); } } else { rmesa->radeon.TclFallback &= ~bit; if (oldfallback == bit) { if (RADEON_DEBUG & RADEON_FALLBACKS) fprintf(stderr, "Radeon end tcl fallback %s\n", getFallbackString( bit )); transition_to_hwtnl( ctx ); } } }