int radeonUploadTexImages( radeonContextPtr rmesa, radeonTexObjPtr t, GLuint face ) { const int numLevels = t->base.lastLevel - t->base.firstLevel + 1; if ( RADEON_DEBUG & (DEBUG_TEXTURE|DEBUG_IOCTL) ) { fprintf( stderr, "%s( %p, %p ) sz=%d lvls=%d-%d\n", __FUNCTION__, (void *)rmesa->glCtx, (void *)t->base.tObj, t->base.totalSize, t->base.firstLevel, t->base.lastLevel ); } if ( !t || t->base.totalSize == 0 ) return 0; LOCK_HARDWARE( rmesa ); if ( t->base.memBlock == NULL ) { int heap; heap = driAllocateTexture( rmesa->texture_heaps, rmesa->nr_heaps, (driTextureObject *) t ); if ( heap == -1 ) { UNLOCK_HARDWARE( rmesa ); return -1; } /* Set the base offset of the texture image */ t->bufAddr = rmesa->radeonScreen->texOffset[heap] + t->base.memBlock->ofs; t->pp_txoffset = t->bufAddr; /* Mark this texobj as dirty on all units: */ t->dirty_state = TEX_ALL; } /* Let the world know we've used this memory recently. */ driUpdateTextureLRU( (driTextureObject *) t ); UNLOCK_HARDWARE( rmesa ); /* Upload any images that are new */ if (t->base.dirty_images[face]) { int i; for ( i = 0 ; i < numLevels ; i++ ) { if ( (t->base.dirty_images[face] & (1 << (i+t->base.firstLevel))) != 0 ) { uploadSubImage( rmesa, t, i, 0, 0, t->image[face][i].width, t->image[face][i].height, face ); } } t->base.dirty_images[face] = 0; } return 0; }
/* This is called with the lock held. May have to eject our own and/or * other client's texture objects to make room for the upload. */ int i810UploadTexImagesLocked( i810ContextPtr imesa, i810TextureObjectPtr t ) { int i; int ofs; int numLevels; /* Do we need to eject LRU texture objects? */ if (!t->base.memBlock) { int heap; heap = driAllocateTexture( imesa->texture_heaps, imesa->nr_heaps, (driTextureObject *) t); if ( heap == -1 ) { return -1; } ofs = t->base.memBlock->ofs; t->BufAddr = imesa->i810Screen->tex.map + ofs; t->Setup[I810_TEXREG_MI3] = imesa->i810Screen->textureOffset + ofs; if (t == imesa->CurrentTexObj[0]) I810_STATECHANGE(imesa, I810_UPLOAD_TEX0); if (t == imesa->CurrentTexObj[1]) I810_STATECHANGE(imesa, I810_UPLOAD_TEX1); /* i810UpdateTexLRU( imesa, t );*/ } driUpdateTextureLRU( (driTextureObject *) t ); if (imesa->texture_heaps[0]->timestamp >= GET_DISPATCH_AGE(imesa)) i810WaitAgeLocked( imesa, imesa->texture_heaps[0]->timestamp ); numLevels = t->base.lastLevel - t->base.firstLevel + 1; for (i = 0 ; i < numLevels ; i++) if (t->base.dirty_images[0] & (1<<i)) i810UploadTexLevel( imesa, t, i ); t->base.dirty_images[0] = 0; return 0; }
static GLboolean enable_tex_common( struct gl_context *ctx, GLuint unit ) { i810ContextPtr imesa = I810_CONTEXT(ctx); struct gl_texture_unit *texUnit = &ctx->Texture.Unit[unit]; struct gl_texture_object *tObj = texUnit->_Current; i810TextureObjectPtr t = (i810TextureObjectPtr)tObj->DriverData; if (tObj->Image[0][tObj->BaseLevel]->Border > 0) { return GL_FALSE; } /* Upload teximages (not pipelined) */ if (t->base.dirty_images[0]) { I810_FIREVERTICES(imesa); i810SetTexImages( imesa, tObj ); if (!t->base.memBlock) { return GL_FALSE; } } /* Update state if this is a different texture object to last * time. */ if (imesa->CurrentTexObj[unit] != t) { I810_STATECHANGE(imesa, (I810_UPLOAD_TEX0<<unit)); imesa->CurrentTexObj[unit] = t; t->base.bound |= (1U << unit); /* XXX: should be locked */ driUpdateTextureLRU( (driTextureObject *) t ); } imesa->TexEnvImageFmt[unit] = tObj->Image[0][tObj->BaseLevel]->_BaseFormat; return GL_TRUE; }
static GLboolean update_tex_common( GLcontext *ctx, int unit ) { r128ContextPtr rmesa = R128_CONTEXT(ctx); const int source = rmesa->tmu_source[unit]; const struct gl_texture_unit *texUnit = &ctx->Texture.Unit[source]; const struct gl_texture_object *tObj = texUnit->_Current; r128TexObjPtr t = (r128TexObjPtr) tObj->DriverData; /* Fallback if there's a texture border */ if ( tObj->Image[0][tObj->BaseLevel]->Border > 0 ) { return GL_FALSE; } /* Update state if this is a different texture object to last * time. */ if ( rmesa->CurrentTexObj[unit] != t ) { if ( rmesa->CurrentTexObj[unit] != NULL ) { /* The old texture is no longer bound to this texture unit. * Mark it as such. */ rmesa->CurrentTexObj[unit]->base.bound &= ~(1UL << unit); } rmesa->CurrentTexObj[unit] = t; t->base.bound |= (1UL << unit); rmesa->dirty |= R128_UPLOAD_TEX0 << unit; driUpdateTextureLRU( (driTextureObject *) t ); /* XXX: should be locked! */ } /* FIXME: We need to update the texture unit if any texture parameters have * changed, but this texture was already bound. This could be changed to * work like the Radeon driver where the texture object has it's own * dirty state flags */ rmesa->dirty |= R128_UPLOAD_TEX0 << unit; /* register setup */ rmesa->setup.tex_size_pitch_c &= ~(R128_TEX_SIZE_PITCH_MASK << (R128_SEC_TEX_SIZE_PITCH_SHIFT * unit)); if ( unit == 0 ) { rmesa->setup.tex_cntl_c |= R128_TEXMAP_ENABLE; rmesa->setup.tex_size_pitch_c |= t->setup.tex_size_pitch << 0; rmesa->setup.scale_3d_cntl &= ~R128_TEX_CACHE_SPLIT; t->setup.tex_cntl &= ~R128_SEC_SELECT_SEC_ST; } else { rmesa->setup.tex_cntl_c |= R128_SEC_TEXMAP_ENABLE; rmesa->setup.tex_size_pitch_c |= t->setup.tex_size_pitch << 16; rmesa->setup.scale_3d_cntl |= R128_TEX_CACHE_SPLIT; t->setup.tex_cntl |= R128_SEC_SELECT_SEC_ST; /* If the second TMU is enabled, then multitexturing is happening. */ if ( R128_IS_PLAIN( rmesa ) ) rmesa->blend_flags |= R128_BLEND_MULTITEX; } rmesa->dirty |= R128_UPLOAD_CONTEXT; /* FIXME: The Radeon has some cached state so that it can avoid calling * FIXME: UpdateTextureEnv in some cases. Is that possible here? */ return r128UpdateTextureEnv( ctx, unit ); }
static void mach64UpdateTextureUnit( GLcontext *ctx, int unit ) { mach64ContextPtr mmesa = MACH64_CONTEXT(ctx); int source = mmesa->tmu_source[unit]; const struct gl_texture_unit *texUnit = &ctx->Texture.Unit[source]; const struct gl_texture_object *tObj = ctx->Texture.Unit[source]._Current; mach64TexObjPtr t = tObj->DriverData; GLuint d = mmesa->setup.dp_pix_width; GLuint s = mmesa->setup.scale_3d_cntl; assert(unit == 0 || unit == 1); /* only two tex units */ if ( MACH64_DEBUG & DEBUG_VERBOSE_API ) { fprintf( stderr, "%s( %p, %d ) enabled=0x%x 0x%x\n", __FUNCTION__, ctx, unit, ctx->Texture.Unit[0]._ReallyEnabled, ctx->Texture.Unit[1]._ReallyEnabled); } if (texUnit->_ReallyEnabled & (TEXTURE_1D_BIT | TEXTURE_2D_BIT)) { assert(t); /* should have driver tex data by now */ /* Fallback if there's a texture border */ if ( tObj->Image[0][tObj->BaseLevel]->Border > 0 ) { FALLBACK( mmesa, MACH64_FALLBACK_TEXTURE, GL_TRUE ); return; } /* Upload teximages */ if (t->base.dirty_images[0]) { mach64SetTexImages( mmesa, tObj ); mmesa->dirty |= (MACH64_UPLOAD_TEX0IMAGE << unit); } /* Bind to the given texture unit */ mmesa->CurrentTexObj[unit] = t; t->base.bound |= (1 << unit); if ( t->base.memBlock ) driUpdateTextureLRU( (driTextureObject *) t ); /* XXX: should be locked! */ /* register setup */ if ( unit == 0 ) { d &= ~MACH64_SCALE_PIX_WIDTH_MASK; d |= (t->textureFormat << 28); s &= ~(MACH64_TEXTURE_DISABLE | MACH64_TEX_CACHE_SPLIT | MACH64_TEX_BLEND_FCN_MASK | MACH64_TEX_MAP_AEN); if ( mmesa->multitex ) { s |= MACH64_TEX_BLEND_FCN_TRILINEAR | MACH64_TEX_CACHE_SPLIT; } else if ( t->BilinearMin ) { s |= MACH64_TEX_BLEND_FCN_LINEAR; } else { s |= MACH64_TEX_BLEND_FCN_NEAREST; } if ( t->BilinearMag ) { s |= MACH64_BILINEAR_TEX_EN; } else { s &= ~MACH64_BILINEAR_TEX_EN; } if ( t->hasAlpha ) { s |= MACH64_TEX_MAP_AEN; } mmesa->setup.tex_cntl &= ~(MACH64_TEXTURE_CLAMP_S | MACH64_TEXTURE_CLAMP_T | MACH64_SECONDARY_STW); if ( t->ClampS ) { mmesa->setup.tex_cntl |= MACH64_TEXTURE_CLAMP_S; } if ( t->ClampT ) { mmesa->setup.tex_cntl |= MACH64_TEXTURE_CLAMP_T; } mmesa->setup.tex_size_pitch |= ((t->widthLog2 << 0) | (t->maxLog2 << 4) | (t->heightLog2 << 8)); } else { /* Enable texture mapping mode */ s &= ~MACH64_TEXTURE_DISABLE; d &= ~MACH64_COMPOSITE_PIX_WIDTH_MASK; d |= (t->textureFormat << 4); mmesa->setup.tex_cntl &= ~(MACH64_COMP_ALPHA | MACH64_SEC_TEX_CLAMP_S | MACH64_SEC_TEX_CLAMP_T); mmesa->setup.tex_cntl |= (MACH64_TEXTURE_COMPOSITE | MACH64_SECONDARY_STW); if ( t->BilinearMin ) { mmesa->setup.tex_cntl |= MACH64_COMP_BLEND_BILINEAR; } else { mmesa->setup.tex_cntl &= ~MACH64_COMP_BLEND_BILINEAR; } if ( t->BilinearMag ) { mmesa->setup.tex_cntl |= MACH64_COMP_FILTER_BILINEAR; } else { mmesa->setup.tex_cntl &= ~MACH64_COMP_FILTER_BILINEAR; } if ( t->hasAlpha ) { mmesa->setup.tex_cntl |= MACH64_COMP_ALPHA; } if ( t->ClampS ) { mmesa->setup.tex_cntl |= MACH64_SEC_TEX_CLAMP_S; } if ( t->ClampT ) { mmesa->setup.tex_cntl |= MACH64_SEC_TEX_CLAMP_T; } mmesa->setup.tex_size_pitch |= ((t->widthLog2 << 16) | (t->maxLog2 << 20) | (t->heightLog2 << 24)); } if ( mmesa->setup.scale_3d_cntl != s ) { mmesa->setup.scale_3d_cntl = s; mmesa->dirty |= MACH64_UPLOAD_SCALE_3D_CNTL; } if ( mmesa->setup.dp_pix_width != d ) { mmesa->setup.dp_pix_width = d; mmesa->dirty |= MACH64_UPLOAD_DP_PIX_WIDTH; } } else if (texUnit->_ReallyEnabled) { /* 3D or cube map texture enabled - fallback */ FALLBACK( mmesa, MACH64_FALLBACK_TEXTURE, GL_TRUE ); } else { /* texture unit disabled */ } }
int mgaUploadTexImages( mgaContextPtr mmesa, mgaTextureObjectPtr t ) { int i; int ofs; if ( (t == NULL) || (t->base.totalSize == 0) ) return 0; LOCK_HARDWARE( mmesa ); if (t->base.memBlock == NULL ) { int heap; heap = driAllocateTexture( mmesa->texture_heaps, mmesa->nr_heaps, (driTextureObject *) t ); if ( heap == -1 ) { UNLOCK_HARDWARE( mmesa ); return -1; } ofs = mmesa->mgaScreen->textureOffset[ heap ] + t->base.memBlock->ofs; if ( MGA_IS_G200(mmesa) ) { t->setup.texorg = ofs; t->setup.texorg1 = ofs + t->offsets[1]; t->setup.texorg2 = ofs + t->offsets[2]; t->setup.texorg3 = ofs + t->offsets[3]; t->setup.texorg4 = ofs + t->offsets[4]; } else { t->setup.texorg = ofs | TO_texorgoffsetsel; t->setup.texorg1 = t->offsets[1]; t->setup.texorg2 = 0; t->setup.texorg3 = 0; t->setup.texorg4 = 0; } mmesa->dirty |= MGA_UPLOAD_CONTEXT; } /* Let the world know we've used this memory recently. */ driUpdateTextureLRU( (driTextureObject *) t ); if (MGA_DEBUG&DEBUG_VERBOSE_TEXTURE) fprintf(stderr, "[%s:%d] dispatch age: %d age freed memory: %d\n", __FILE__, __LINE__, GET_DISPATCH_AGE(mmesa), mmesa->dirtyAge); if (mmesa->dirtyAge >= GET_DISPATCH_AGE(mmesa)) mgaWaitAgeLocked( mmesa, mmesa->dirtyAge ); if (t->base.dirty_images[0]) { const int numLevels = t->base.lastLevel - t->base.firstLevel + 1; if (MGA_DEBUG&DEBUG_VERBOSE_TEXTURE) fprintf(stderr, "[%s:%d] dirty_images[0] = 0x%04x\n", __FILE__, __LINE__, t->base.dirty_images[0] ); for (i = 0 ; i < numLevels ; i++) { if ( (t->base.dirty_images[0] & (1U << i)) != 0 ) { mgaUploadSubImage( mmesa, t, i ); } } t->base.dirty_images[0] = 0; } UNLOCK_HARDWARE( mmesa ); return 0; }