static void radeonVtxfmtFlushVertices( GLcontext *ctx, GLuint flags ) { radeonContextPtr rmesa = RADEON_CONTEXT( ctx ); if (RADEON_DEBUG & DEBUG_VFMT) fprintf(stderr, "%s\n", __FUNCTION__); assert(rmesa->vb.installed); if (flags & FLUSH_UPDATE_CURRENT) { radeon_copy_to_current( ctx ); if (RADEON_DEBUG & DEBUG_VFMT) fprintf(stderr, "reinstall on update_current\n"); _mesa_install_exec_vtxfmt( ctx, &rmesa->vb.vtxfmt ); ctx->Driver.NeedFlush &= ~FLUSH_UPDATE_CURRENT; } if (flags & FLUSH_STORED_VERTICES) { radeonContextPtr rmesa = RADEON_CONTEXT( ctx ); assert (rmesa->dma.flush == 0 || rmesa->dma.flush == flush_prims); if (rmesa->dma.flush == flush_prims) flush_prims( RADEON_CONTEXT( ctx ) ); ctx->Driver.NeedFlush &= ~FLUSH_STORED_VERTICES; } }
struct dynfn *radeon_makeX86Color4ubv( GLcontext *ctx, int key ) { struct dynfn *dfn = MALLOC_STRUCT( dynfn ); radeonContextPtr rmesa = RADEON_CONTEXT(ctx); if (RADEON_DEBUG & DEBUG_CODEGEN) fprintf(stderr, "%s 0x%08x\n", __FUNCTION__, key ); if (key & RADEON_CP_VC_FRMT_PKCOLOR) { DFN ( _x86_Color4ubv_ub, rmesa->vb.dfn_cache.Color4ubv); FIXUP(dfn->code, 5, 0x12345678, (int)rmesa->vb.colorptr); return dfn; } else { DFN ( _x86_Color4ubv_4f, rmesa->vb.dfn_cache.Color4ubv); FIXUP(dfn->code, 2, 0x00000000, (int)_mesa_ubyte_to_float_color_tab); FIXUP(dfn->code, 27, 0xdeadbeaf, (int)rmesa->vb.floatcolorptr); FIXUP(dfn->code, 33, 0xdeadbeaf, (int)rmesa->vb.floatcolorptr+4); FIXUP(dfn->code, 55, 0xdeadbeaf, (int)rmesa->vb.floatcolorptr+8); FIXUP(dfn->code, 61, 0xdeadbeaf, (int)rmesa->vb.floatcolorptr+12); return dfn; } }
/* Return various strings for glGetString(). */ static const GLubyte *radeonGetString( GLcontext *ctx, GLenum name ) { radeonContextPtr rmesa = RADEON_CONTEXT(ctx); static char buffer[128]; unsigned offset; GLuint agp_mode = (rmesa->radeonScreen->card_type==RADEON_CARD_PCI) ? 0 : rmesa->radeonScreen->AGPMode; switch ( name ) { case GL_VENDOR: return (GLubyte *)"Tungsten Graphics, Inc."; case GL_RENDERER: offset = driGetRendererString( buffer, "Radeon", DRIVER_DATE, agp_mode ); sprintf( & buffer[ offset ], " %sTCL", !(rmesa->TclFallback & RADEON_TCL_FALLBACK_TCL_DISABLE) ? "" : "NO-" ); return (GLubyte *)buffer; default: return NULL; } }
static void r200_emit_vecfog(GLcontext *ctx, struct radeon_aos *aos, GLvoid *data, int stride, int count) { radeonContextPtr rmesa = RADEON_CONTEXT(ctx); uint32_t *out; int i; int size = 1; if (stride == 0) { radeonAllocDmaRegion(rmesa, &aos->bo, &aos->offset, size * 4, 32); count = 1; aos->stride = 0; } else { radeonAllocDmaRegion(rmesa, &aos->bo, &aos->offset, size * 4, 32); aos->stride = size; } aos->components = size; aos->count = count; out = (uint32_t*)((char*)aos->bo->ptr + aos->offset); for (i = 0; i < count; i++) { out[0] = r200ComputeFogBlendFactor( ctx, *(GLfloat *)data ); out++; data += stride; } }
void radeonGenerateMipmap(GLcontext* ctx, GLenum target, struct gl_texture_object *texObj) { radeonContextPtr rmesa = RADEON_CONTEXT(ctx); struct radeon_bo *bo; GLuint face = _mesa_tex_target_to_face(target); radeon_texture_image *baseimage = get_radeon_texture_image(texObj->Image[face][texObj->BaseLevel]); bo = !baseimage->mt ? baseimage->bo : baseimage->mt->bo; radeon_print(RADEON_TEXTURE, RADEON_TRACE, "%s(%p, target %s, tex %p)\n", __func__, ctx, _mesa_lookup_enum_by_nr(target), texObj); if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) { radeon_print(RADEON_TEXTURE, RADEON_NORMAL, "%s(%p, tex %p) Trying to generate mipmap for texture " "in processing by GPU.\n", __func__, ctx, texObj); radeon_firevertices(rmesa); } if (_mesa_meta_check_generate_mipmap_fallback(ctx, target, texObj)) { radeon_teximage_map(baseimage, GL_FALSE); radeon_generate_mipmap(ctx, target, texObj); radeon_teximage_unmap(baseimage); } else { _mesa_meta_GenerateMipmap(ctx, target, texObj); } }
void radeonReadPixels(struct gl_context * ctx, GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, const struct gl_pixelstore_attrib *pack, GLvoid * pixels) { radeonContextPtr radeon = RADEON_CONTEXT(ctx); radeon_prepare_render(radeon); if (do_blit_readpixels(ctx, x, y, width, height, format, type, pack, pixels)) return; /* Update Mesa state before calling _mesa_readpixels(). * XXX this may not be needed since ReadPixels no longer uses the * span code. */ radeon_print(RADEON_FALLBACKS, RADEON_NORMAL, "Falling back to sw for ReadPixels (format %s, type %s)\n", _mesa_lookup_enum_by_nr(format), _mesa_lookup_enum_by_nr(type)); if (ctx->NewState) _mesa_update_state(ctx); _mesa_readpixels(ctx, x, y, width, height, format, type, pack, pixels); }
static GLboolean radeonNotifyBegin( GLcontext *ctx, GLenum p ) { radeonContextPtr rmesa = RADEON_CONTEXT( ctx ); if (RADEON_DEBUG & DEBUG_VFMT) fprintf(stderr, "%s\n", __FUNCTION__); assert(!rmesa->vb.installed); if (ctx->NewState) _mesa_update_state( ctx ); if (rmesa->NewGLState) radeonValidateState( ctx ); if (ctx->Driver.NeedFlush) ctx->Driver.FlushVertices( ctx, ctx->Driver.NeedFlush ); if (rmesa->vb.recheck) radeonVtxfmtValidate( ctx ); if (!rmesa->vb.installed) { if (RADEON_DEBUG & DEBUG_VFMT) fprintf(stderr, "%s -- failed\n", __FUNCTION__); return GL_FALSE; } radeon_Begin( p ); return GL_TRUE; }
void radeonVtxfmtInvalidate( GLcontext *ctx ) { radeonContextPtr rmesa = RADEON_CONTEXT( ctx ); rmesa->vb.recheck = GL_TRUE; rmesa->vb.fell_back = GL_FALSE; }
static void radeon_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb) { radeonContextPtr radeon = RADEON_CONTEXT(ctx); gl_format mesa_format; int i; for (i = -2; i < (GLint) ctx->Const.MaxColorAttachments; i++) { struct gl_renderbuffer_attachment *att; if (i == -2) { att = &fb->Attachment[BUFFER_DEPTH]; } else if (i == -1) { att = &fb->Attachment[BUFFER_STENCIL]; } else { att = &fb->Attachment[BUFFER_COLOR0 + i]; } if (att->Type == GL_TEXTURE) { mesa_format = att->Texture->Image[att->CubeMapFace][att->TextureLevel]->TexFormat; } else { /* All renderbuffer formats are renderable, but not sampable */ continue; } if (!radeon->vtbl.is_format_renderable(mesa_format)){ fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED; radeon_print(RADEON_TEXTURE, RADEON_TRACE, "%s: HW doesn't support format %s as output format of attachment %d\n", __FUNCTION__, _mesa_get_format_name(mesa_format), i); return; } } }
void radeonTclPrimitive( GLcontext *ctx, GLenum prim, int hw_prim ) { radeonContextPtr rmesa = RADEON_CONTEXT(ctx); GLuint se_cntl; GLuint newprim = hw_prim | RADEON_CP_VC_CNTL_TCL_ENABLE; if (newprim != rmesa->tcl.hw_primitive || !discrete_prim[hw_prim&0xf]) { RADEON_NEWPRIM( rmesa ); rmesa->tcl.hw_primitive = newprim; } se_cntl = rmesa->hw.set.cmd[SET_SE_CNTL]; se_cntl &= ~RADEON_FLAT_SHADE_VTX_LAST; if (prim == GL_POLYGON && (ctx->_TriangleCaps & DD_FLATSHADE)) se_cntl |= RADEON_FLAT_SHADE_VTX_0; else se_cntl |= RADEON_FLAT_SHADE_VTX_LAST; if (se_cntl != rmesa->hw.set.cmd[SET_SE_CNTL]) { RADEON_STATECHANGE( rmesa, set ); rmesa->hw.set.cmd[SET_SE_CNTL] = se_cntl; } }
static void radeonTexImage2D( GLcontext *ctx, GLenum target, GLint level, GLint internalFormat, GLint width, GLint height, GLint border, GLenum format, GLenum type, const GLvoid *pixels, const struct gl_pixelstore_attrib *packing, struct gl_texture_object *texObj, struct gl_texture_image *texImage ) { radeonContextPtr rmesa = RADEON_CONTEXT(ctx); radeonTexObjPtr t = (radeonTexObjPtr)texObj->DriverData; /* fprintf(stderr, "%s\n", __FUNCTION__); */ if ( t ) { radeonSwapOutTexObj( rmesa, t ); } else { t = radeonAllocTexObj( texObj ); if (!t) { _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexImage2D"); return; } texObj->DriverData = t; } /* Note, this will call radeonChooseTextureFormat */ _mesa_store_teximage2d(ctx, target, level, internalFormat, width, height, border, format, type, pixels, &ctx->Unpack, texObj, texImage); t->dirty_images |= (1 << level); }
static void radeonTexSubImage2D( GLcontext *ctx, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid *pixels, const struct gl_pixelstore_attrib *packing, struct gl_texture_object *texObj, struct gl_texture_image *texImage ) { radeonContextPtr rmesa = RADEON_CONTEXT(ctx); radeonTexObjPtr t = (radeonTexObjPtr) texObj->DriverData; /* fprintf(stderr, "%s\n", __FUNCTION__); */ assert( t ); /* this _should_ be true */ if ( t ) { radeonSwapOutTexObj( rmesa, t ); } else { t = radeonAllocTexObj(texObj); if (!t) { _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexSubImage2D"); return; } texObj->DriverData = t; } _mesa_store_texsubimage2d(ctx, target, level, xoffset, yoffset, width, height, format, type, pixels, packing, texObj, texImage); t->dirty_images |= (1 << level); }
static struct dynfn *radeon_makeSSETexCoord2f( GLcontext *ctx, int key ) { radeonContextPtr rmesa = RADEON_CONTEXT(ctx); return radeon_makeSSEAttribute2f( & rmesa->vb.dfn_cache.TexCoord2f, key, __FUNCTION__, rmesa->vb.texcoordptr[0] ); }
static void r700SendQueryBegin(GLcontext *ctx, struct radeon_state_atom *atom) { radeonContextPtr radeon = RADEON_CONTEXT(ctx); struct radeon_query_object *query = radeon->query.current; BATCH_LOCALS(radeon); radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__); /* clear the buffer */ radeon_bo_map(query->bo, GL_FALSE); memset(query->bo->ptr, 0, 4 * 2 * sizeof(uint64_t)); /* 4 DBs, 2 qwords each */ radeon_bo_unmap(query->bo); radeon_cs_space_check_with_bo(radeon->cmdbuf.cs, query->bo, 0, RADEON_GEM_DOMAIN_GTT); BEGIN_BATCH_NO_AUTOSTATE(4 + 2); R600_OUT_BATCH(CP_PACKET3(R600_IT_EVENT_WRITE, 2)); R600_OUT_BATCH(ZPASS_DONE); R600_OUT_BATCH(query->curr_offset); /* hw writes qwords */ R600_OUT_BATCH(0x00000000); R600_OUT_BATCH_RELOC(VGT_EVENT_INITIATOR, query->bo, 0, 0, RADEON_GEM_DOMAIN_GTT, 0); END_BATCH(); query->emitted_begin = GL_TRUE; }
static void transition_to_hwtnl( GLcontext *ctx ) { radeonContextPtr rmesa = RADEON_CONTEXT(ctx); TNLcontext *tnl = TNL_CONTEXT(ctx); GLuint se_coord_fmt = (RADEON_VTX_W0_IS_NOT_1_OVER_W0 | RADEON_TEX1_W_ROUTING_USE_Q1); if ( se_coord_fmt != rmesa->hw.set.cmd[SET_SE_COORDFMT] ) { RADEON_STATECHANGE( rmesa, set ); rmesa->hw.set.cmd[SET_SE_COORDFMT] = se_coord_fmt; _tnl_need_projected_coords( ctx, GL_FALSE ); } radeonUpdateMaterial( ctx ); tnl->Driver.NotifyMaterialChange = radeonUpdateMaterial; if ( rmesa->dma.flush ) rmesa->dma.flush( rmesa ); rmesa->dma.flush = NULL; rmesa->swtcl.vertex_format = 0; if (rmesa->swtcl.indexed_verts.buf) radeonReleaseDmaRegion( rmesa, &rmesa->swtcl.indexed_verts, __FUNCTION__ ); if (RADEON_DEBUG & DEBUG_FALLBACKS) fprintf(stderr, "Radeon end tcl fallback\n"); }
static struct dynfn *radeon_makeSSENormal3f( GLcontext *ctx, int key ) { radeonContextPtr rmesa = RADEON_CONTEXT(ctx); return radeon_makeSSEAttribute3f( & rmesa->vb.dfn_cache.Normal3f, key, __FUNCTION__, rmesa->vb.normalptr ); }
void radeonReadPixels(struct gl_context * ctx, GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, const struct gl_pixelstore_attrib *pack, GLvoid * pixels) { radeonContextPtr radeon = RADEON_CONTEXT(ctx); radeon_prepare_render(radeon); if (do_blit_readpixels(ctx, x, y, width, height, format, type, pack, pixels)) return; /* Update Mesa state before calling down into _swrast_ReadPixels, as * the spans code requires the computed buffer states to be up to date, * but _swrast_ReadPixels only updates Mesa state after setting up * the spans code. */ radeon_print(RADEON_FALLBACKS, RADEON_NORMAL, "Falling back to sw for ReadPixels (format %s, type %s)\n", _mesa_lookup_enum_by_nr(format), _mesa_lookup_enum_by_nr(type)); if (ctx->NewState) _mesa_update_state(ctx); _swrast_ReadPixels(ctx, x, y, width, height, format, type, pack, pixels); }
void radeonChooseRenderState( GLcontext *ctx ) { TNLcontext *tnl = TNL_CONTEXT(ctx); radeonContextPtr rmesa = RADEON_CONTEXT(ctx); GLuint index = 0; GLuint flags = ctx->_TriangleCaps; if (!rmesa->TclFallback || rmesa->Fallback) return; if (flags & DD_TRI_LIGHT_TWOSIDE) index |= RADEON_TWOSIDE_BIT; if (flags & DD_TRI_UNFILLED) index |= RADEON_UNFILLED_BIT; if (index != rmesa->swtcl.RenderIndex) { tnl->Driver.Render.Points = rast_tab[index].points; tnl->Driver.Render.Line = rast_tab[index].line; tnl->Driver.Render.ClippedLine = rast_tab[index].line; tnl->Driver.Render.Triangle = rast_tab[index].triangle; tnl->Driver.Render.Quad = rast_tab[index].quad; if (index == 0) { tnl->Driver.Render.PrimTabVerts = radeon_render_tab_verts; tnl->Driver.Render.PrimTabElts = radeon_render_tab_elts; tnl->Driver.Render.ClippedPolygon = radeon_fast_clipped_poly; } else { tnl->Driver.Render.PrimTabVerts = _tnl_render_tab_verts; tnl->Driver.Render.PrimTabElts = _tnl_render_tab_elts; tnl->Driver.Render.ClippedPolygon = _tnl_RenderClippedPolygon; } rmesa->swtcl.RenderIndex = index; } }
static void radeonCheckQuery(struct gl_context *ctx, struct gl_query_object *q) { radeon_print(RADEON_STATE, RADEON_TRACE, "%s: query id %d\n", __FUNCTION__, q->Id); #ifdef DRM_RADEON_GEM_BUSY radeonContextPtr radeon = RADEON_CONTEXT(ctx); if (radeon->radeonScreen->kernel_mm) { struct radeon_query_object *query = (struct radeon_query_object *)q; uint32_t domain; /* Need to perform a flush, as per ARB_occlusion_query spec */ if (radeon_bo_is_referenced_by_cs(query->bo, radeon->cmdbuf.cs)) { ctx->Driver.Flush(ctx); } if (radeon_bo_is_busy(query->bo, &domain) == 0) { radeonQueryGetResult(ctx, q); query->Base.Ready = GL_TRUE; } } else { radeonWaitQuery(ctx, q); } #else radeonWaitQuery(ctx, q); #endif }
void radeonInitSwtcl( GLcontext *ctx ) { TNLcontext *tnl = TNL_CONTEXT(ctx); radeonContextPtr rmesa = RADEON_CONTEXT(ctx); static int firsttime = 1; if (firsttime) { init_rast_tab(); firsttime = 0; } tnl->Driver.Render.Start = radeonRenderStart; tnl->Driver.Render.Finish = radeonRenderFinish; tnl->Driver.Render.PrimitiveNotify = radeonRenderPrimitive; tnl->Driver.Render.ResetLineStipple = radeonResetLineStipple; tnl->Driver.Render.BuildVertices = _tnl_build_vertices; tnl->Driver.Render.CopyPV = _tnl_copy_pv; tnl->Driver.Render.Interp = _tnl_interp; _tnl_init_vertices( ctx, ctx->Const.MaxArrayLockSize + 12, RADEON_MAX_TNL_VERTEX_SIZE); rmesa->swtcl.verts = (GLubyte *)tnl->clipspace.vertex_buf; rmesa->swtcl.RenderIndex = ~0; rmesa->swtcl.render_primitive = GL_TRIANGLES; rmesa->swtcl.hw_primitive = 0; }
static void radeonRenderPrimitive( GLcontext *ctx, GLenum prim ) { radeonContextPtr rmesa = RADEON_CONTEXT(ctx); rmesa->swtcl.render_primitive = prim; if (prim < GL_TRIANGLES || !(ctx->_TriangleCaps & DD_TRI_UNFILLED)) radeonRasterPrimitive( ctx, reduced_hw_prim[prim] ); }
static void radeonInitTextureObjects( GLcontext *ctx ) { radeonContextPtr rmesa = RADEON_CONTEXT(ctx); struct gl_texture_object *texObj; GLuint tmp = ctx->Texture.CurrentUnit; ctx->Texture.CurrentUnit = 0; texObj = ctx->Texture.Unit[0].Current1D; radeonBindTexture( ctx, GL_TEXTURE_1D, texObj ); move_to_tail( &rmesa->texture.swapped, (radeonTexObjPtr)texObj->DriverData ); texObj = ctx->Texture.Unit[0].Current2D; radeonBindTexture( ctx, GL_TEXTURE_2D, texObj ); move_to_tail( &rmesa->texture.swapped, (radeonTexObjPtr)texObj->DriverData ); ctx->Texture.CurrentUnit = 1; texObj = ctx->Texture.Unit[1].Current1D; radeonBindTexture( ctx, GL_TEXTURE_1D, texObj ); move_to_tail( &rmesa->texture.swapped, (radeonTexObjPtr)texObj->DriverData ); texObj = ctx->Texture.Unit[1].Current2D; radeonBindTexture( ctx, GL_TEXTURE_2D, texObj ); move_to_tail( &rmesa->texture.swapped, (radeonTexObjPtr)texObj->DriverData ); ctx->Texture.CurrentUnit = tmp; }
static void transition_to_swtnl( GLcontext *ctx ) { radeonContextPtr rmesa = RADEON_CONTEXT(ctx); TNLcontext *tnl = TNL_CONTEXT(ctx); GLuint se_cntl; RADEON_NEWPRIM( rmesa ); rmesa->swtcl.vertex_format = 0; radeonChooseVertexState( ctx ); radeonChooseRenderState( ctx ); _mesa_validate_all_lighting_tables( ctx ); tnl->Driver.NotifyMaterialChange = _mesa_validate_all_lighting_tables; radeonReleaseArrays( ctx, ~0 ); se_cntl = rmesa->hw.set.cmd[SET_SE_CNTL]; se_cntl |= RADEON_FLAT_SHADE_VTX_LAST; if (se_cntl != rmesa->hw.set.cmd[SET_SE_CNTL]) { RADEON_STATECHANGE( rmesa, set ); rmesa->hw.set.cmd[SET_SE_CNTL] = se_cntl; } }
static void radeon_MultiTexCoord2fARB( GLenum target, GLfloat s, GLfloat t ) { GET_CURRENT_CONTEXT(ctx); radeonContextPtr rmesa = RADEON_CONTEXT(ctx); GLfloat *dest = rmesa->vb.texcoordptr[target & 1]; dest[0] = s; dest[1] = t; }
void radeonDestroySwtcl( GLcontext *ctx ) { radeonContextPtr rmesa = RADEON_CONTEXT(ctx); if (rmesa->swtcl.indexed_verts.buf) radeonReleaseDmaRegion( rmesa, &rmesa->swtcl.indexed_verts, __FUNCTION__ ); }
static void radeon_MultiTexCoord2fvARB( GLenum target, const GLfloat *v ) { GET_CURRENT_CONTEXT(ctx); radeonContextPtr rmesa = RADEON_CONTEXT(ctx); GLfloat *dest = rmesa->vb.texcoordptr[target & 1]; dest[0] = v[0]; dest[1] = v[1]; }
static void radeon_TexCoord2f( GLfloat s, GLfloat t ) { GET_CURRENT_CONTEXT(ctx); radeonContextPtr rmesa = RADEON_CONTEXT(ctx); GLfloat *dest = rmesa->vb.texcoordptr[0]; dest[0] = s; dest[1] = t; }
static void radeon_TexCoord2fv( const GLfloat *v ) { GET_CURRENT_CONTEXT(ctx); radeonContextPtr rmesa = RADEON_CONTEXT(ctx); GLfloat *dest = rmesa->vb.texcoordptr[0]; dest[0] = v[0]; dest[1] = v[1]; }
void radeon_image_target_texture_2d(struct gl_context *ctx, GLenum target, struct gl_texture_object *texObj, struct gl_texture_image *texImage, GLeglImageOES image_handle) { radeonContextPtr radeon = RADEON_CONTEXT(ctx); radeonTexObj *t = radeon_tex_obj(texObj); radeon_texture_image *radeonImage = get_radeon_texture_image(texImage); __DRIscreen *screen; __DRIimage *image; screen = radeon->dri.screen; image = screen->dri2.image->lookupEGLImage(screen, image_handle, screen->loaderPrivate); if (image == NULL) return; radeonFreeTextureImageBuffer(ctx, texImage); texImage->Width = image->width; texImage->Height = image->height; texImage->Depth = 1; texImage->_BaseFormat = GL_RGBA; texImage->TexFormat = image->format; radeonImage->base.RowStride = image->pitch; texImage->InternalFormat = image->internal_format; if(t->mt) { radeon_miptree_unreference(&t->mt); t->mt = NULL; } /* NOTE: The following is *very* ugly and will probably break. But I don't know how to deal with it, without creating a whole new function like radeon_miptree_from_bo() so I'm going with the easy but error-prone way. */ radeon_try_alloc_miptree(radeon, t); radeon_miptree_reference(t->mt, &radeonImage->mt); if (t->mt == NULL) { radeon_print(RADEON_TEXTURE, RADEON_VERBOSE, "%s Failed to allocate miptree.\n", __func__); return; } /* Particularly ugly: this is guaranteed to break, if image->bo is not of the required size for a miptree. */ radeon_bo_unref(t->mt->bo); radeon_bo_ref(image->bo); t->mt->bo = image->bo; if (!radeon_miptree_matches_image(t->mt, &radeonImage->base.Base)) fprintf(stderr, "miptree doesn't match image\n"); }
struct dynfn *radeon_makeX86Vertex3fv( GLcontext *ctx, int key ) { radeonContextPtr rmesa = RADEON_CONTEXT(ctx); struct dynfn *dfn = MALLOC_STRUCT( dynfn ); if (RADEON_DEBUG & DEBUG_CODEGEN) fprintf(stderr, "%s 0x%08x %d\n", __FUNCTION__, key, rmesa->vb.vertex_size ); switch (rmesa->vb.vertex_size) { case 6: { DFN ( _x86_Vertex3fv_6, rmesa->vb.dfn_cache.Vertex3fv ); FIXUP(dfn->code, 1, 0x00000000, (int)&rmesa->vb.dmaptr); FIXUP(dfn->code, 27, 0x0000001c, (int)&rmesa->vb.vertex[3]); FIXUP(dfn->code, 33, 0x00000020, (int)&rmesa->vb.vertex[4]); FIXUP(dfn->code, 45, 0x00000024, (int)&rmesa->vb.vertex[5]); FIXUP(dfn->code, 56, 0x00000000, (int)&rmesa->vb.dmaptr); FIXUP(dfn->code, 61, 0x00000004, (int)&rmesa->vb.counter); FIXUP(dfn->code, 67, 0x00000004, (int)&rmesa->vb.counter); FIXUP(dfn->code, 76, 0x00000008, (int)&rmesa->vb.notify); break; } case 8: { DFN ( _x86_Vertex3fv_8, rmesa->vb.dfn_cache.Vertex3fv ); FIXUP(dfn->code, 1, 0x00000000, (int)&rmesa->vb.dmaptr); FIXUP(dfn->code, 27, 0x0000001c, (int)&rmesa->vb.vertex[3]); FIXUP(dfn->code, 33, 0x00000020, (int)&rmesa->vb.vertex[4]); FIXUP(dfn->code, 45, 0x0000001c, (int)&rmesa->vb.vertex[5]); FIXUP(dfn->code, 51, 0x00000020, (int)&rmesa->vb.vertex[6]); FIXUP(dfn->code, 63, 0x00000024, (int)&rmesa->vb.vertex[7]); FIXUP(dfn->code, 74, 0x00000000, (int)&rmesa->vb.dmaptr); FIXUP(dfn->code, 79, 0x00000004, (int)&rmesa->vb.counter); FIXUP(dfn->code, 85, 0x00000004, (int)&rmesa->vb.counter); FIXUP(dfn->code, 94, 0x00000008, (int)&rmesa->vb.notify); break; } default: { DFN ( _x86_Vertex3fv, rmesa->vb.dfn_cache.Vertex3fv ); FIXUP(dfn->code, 8, 0x01010101, (int)&rmesa->vb.dmaptr); FIXUP(dfn->code, 32, 0x00000006, rmesa->vb.vertex_size-3); FIXUP(dfn->code, 37, 0x00000058, (int)&rmesa->vb.vertex[3]); FIXUP(dfn->code, 45, 0x01010101, (int)&rmesa->vb.dmaptr); FIXUP(dfn->code, 50, 0x02020202, (int)&rmesa->vb.counter); FIXUP(dfn->code, 58, 0x02020202, (int)&rmesa->vb.counter); FIXUP(dfn->code, 67, 0x0, (int)&rmesa->vb.notify); break; } } return dfn; }