static void transition_to_hwtnl( GLcontext *ctx ) { radeonContextPtr rmesa = RADEON_CONTEXT(ctx); TNLcontext *tnl = TNL_CONTEXT(ctx); GLuint se_coord_fmt = (RADEON_VTX_W0_IS_NOT_1_OVER_W0 | RADEON_TEX1_W_ROUTING_USE_Q1); if ( se_coord_fmt != rmesa->hw.set.cmd[SET_SE_COORDFMT] ) { RADEON_STATECHANGE( rmesa, set ); rmesa->hw.set.cmd[SET_SE_COORDFMT] = se_coord_fmt; _tnl_need_projected_coords( ctx, GL_FALSE ); } radeonUpdateMaterial( ctx ); tnl->Driver.NotifyMaterialChange = radeonUpdateMaterial; if ( rmesa->dma.flush ) rmesa->dma.flush( rmesa ); rmesa->dma.flush = NULL; rmesa->swtcl.vertex_format = 0; if (rmesa->swtcl.indexed_verts.buf) radeonReleaseDmaRegion( rmesa, &rmesa->swtcl.indexed_verts, __FUNCTION__ ); if (RADEON_DEBUG & DEBUG_FALLBACKS) fprintf(stderr, "Radeon end tcl fallback\n"); }
void radeonDestroySwtcl( GLcontext *ctx ) { radeonContextPtr rmesa = RADEON_CONTEXT(ctx); if (rmesa->swtcl.indexed_verts.buf) radeonReleaseDmaRegion( rmesa, &rmesa->swtcl.indexed_verts, __FUNCTION__ ); }
static void flush_prims( radeonContextPtr rmesa ) { int i,j; struct radeon_dma_region tmp = rmesa->dma.current; tmp.buf->refcount++; tmp.aos_size = rmesa->vb.vertex_size; tmp.aos_stride = rmesa->vb.vertex_size; tmp.aos_start = GET_START(&tmp); rmesa->dma.current.ptr = rmesa->dma.current.start += (rmesa->vb.initial_counter - rmesa->vb.counter) * rmesa->vb.vertex_size * 4; rmesa->tcl.vertex_format = rmesa->vb.vertex_format; rmesa->tcl.aos_components[0] = &tmp; rmesa->tcl.nr_aos_components = 1; rmesa->dma.flush = NULL; /* Optimize the primitive list: */ if (rmesa->vb.nrprims > 1) { for (j = 0, i = 1 ; i < rmesa->vb.nrprims; i++) { int pj = rmesa->vb.primlist[j].prim & 0xf; int pi = rmesa->vb.primlist[i].prim & 0xf; if (pj == pi && discreet_gl_prim[pj] && rmesa->vb.primlist[i].start == rmesa->vb.primlist[j].end) { rmesa->vb.primlist[j].end = rmesa->vb.primlist[i].end; } else { j++; if (j != i) rmesa->vb.primlist[j] = rmesa->vb.primlist[i]; } } rmesa->vb.nrprims = j+1; } for (i = 0 ; i < rmesa->vb.nrprims; i++) { if (RADEON_DEBUG & DEBUG_PRIMS) fprintf(stderr, "vtxfmt prim %d: %s %d..%d\n", i, _mesa_lookup_enum_by_nr( rmesa->vb.primlist[i].prim & PRIM_MODE_MASK ), rmesa->vb.primlist[i].start, rmesa->vb.primlist[i].end); radeonEmitPrimitive( rmesa->glCtx, rmesa->vb.primlist[i].start, rmesa->vb.primlist[i].end, rmesa->vb.primlist[i].prim ); } rmesa->vb.nrprims = 0; radeonReleaseDmaRegion( rmesa, &tmp, __FUNCTION__ ); }
/* Destroy the Mesa and driver specific context data. */ void radeonDestroyContext( __DRIcontextPrivate *driContextPriv ) { GET_CURRENT_CONTEXT(ctx); radeonContextPtr rmesa = (radeonContextPtr) driContextPriv->driverPrivate; radeonContextPtr current = ctx ? RADEON_CONTEXT(ctx) : NULL; /* check if we're deleting the currently bound context */ if (rmesa == current) { RADEON_FIREVERTICES( rmesa ); _mesa_make_current(NULL, NULL, NULL); } /* Free radeon context resources */ assert(rmesa); /* should never be null */ if ( rmesa ) { GLboolean release_texture_heaps; release_texture_heaps = (rmesa->glCtx->Shared->RefCount == 1); _swsetup_DestroyContext( rmesa->glCtx ); _tnl_DestroyContext( rmesa->glCtx ); _vbo_DestroyContext( rmesa->glCtx ); _swrast_DestroyContext( rmesa->glCtx ); radeonDestroySwtcl( rmesa->glCtx ); radeonReleaseArrays( rmesa->glCtx, ~0 ); if (rmesa->dma.current.buf) { radeonReleaseDmaRegion( rmesa, &rmesa->dma.current, __FUNCTION__ ); radeonFlushCmdBuf( rmesa, __FUNCTION__ ); } _mesa_vector4f_free( &rmesa->tcl.ObjClean ); if (rmesa->state.scissor.pClipRects) { FREE(rmesa->state.scissor.pClipRects); rmesa->state.scissor.pClipRects = NULL; } if ( release_texture_heaps ) { /* This share group is about to go away, free our private * texture object data. */ int i; for ( i = 0 ; i < rmesa->nr_heaps ; i++ ) { driDestroyTextureHeap( rmesa->texture_heaps[ i ] ); rmesa->texture_heaps[ i ] = NULL; } assert( is_empty_list( & rmesa->swapped ) ); } /* free the Mesa context */ rmesa->glCtx->DriverCtx = NULL; _mesa_destroy_context( rmesa->glCtx ); /* free the option cache */ driDestroyOptionCache (&rmesa->optionCache); FREE( rmesa ); } }
/* Emit any changed arrays to new agp memory, re-emit a packet to * update the arrays. */ void radeonEmitArrays( GLcontext *ctx, GLuint inputs ) { radeonContextPtr rmesa = RADEON_CONTEXT( ctx ); struct vertex_buffer *VB = &TNL_CONTEXT( ctx )->vb; struct radeon_dma_region **component = rmesa->tcl.aos_components; GLuint nr = 0; GLuint vfmt = 0; GLuint count = VB->Count; GLuint vtx; if (RADEON_DEBUG & DEBUG_VERTS) _tnl_print_vert_flags( __FUNCTION__, inputs ); if (1) { if (!rmesa->tcl.obj.buf) emit_vector( ctx, &rmesa->tcl.obj, (char *)VB->ObjPtr->data, VB->ObjPtr->size, VB->ObjPtr->stride, count); switch( VB->ObjPtr->size ) { case 4: vfmt |= RADEON_CP_VC_FRMT_W0; case 3: vfmt |= RADEON_CP_VC_FRMT_Z; case 2: vfmt |= RADEON_CP_VC_FRMT_XY; default: } component[nr++] = &rmesa->tcl.obj; } if (inputs & VERT_NORM) { if (!rmesa->tcl.norm.buf) emit_vector( ctx, &(rmesa->tcl.norm), (char *)VB->NormalPtr->data, 3, VB->NormalPtr->stride, count); vfmt |= RADEON_CP_VC_FRMT_N0; component[nr++] = &rmesa->tcl.norm; } if (inputs & VERT_RGBA) { if (VB->ColorPtr[0]->Type == GL_UNSIGNED_BYTE) { if (!rmesa->tcl.rgba.buf) emit_ubyte_rgba( ctx, &rmesa->tcl.rgba, (char *)VB->ColorPtr[0]->Ptr, VB->ColorPtr[0]->Size, VB->ColorPtr[0]->StrideB, count); vfmt |= RADEON_CP_VC_FRMT_PKCOLOR; } else { int emitsize; if (VB->ColorPtr[0]->Size == 4 && (VB->ColorPtr[0]->StrideB != 0 || ((GLfloat *)VB->ColorPtr[0]->Ptr)[3] != 1.0)) { vfmt |= RADEON_CP_VC_FRMT_FPCOLOR | RADEON_CP_VC_FRMT_FPALPHA; emitsize = 4; } else { vfmt |= RADEON_CP_VC_FRMT_FPCOLOR; emitsize = 3; } if (!rmesa->tcl.rgba.buf) emit_vector( ctx, &(rmesa->tcl.rgba), (char *)VB->ColorPtr[0]->Ptr, emitsize, VB->ColorPtr[0]->StrideB, count); } component[nr++] = &rmesa->tcl.rgba; } if (inputs & VERT_SPEC_RGB) { if (!rmesa->tcl.spec.buf) { if (VB->SecondaryColorPtr[0]->Type != GL_UNSIGNED_BYTE) radeon_import_float_spec_colors( ctx ); emit_ubyte_rgba( ctx, &rmesa->tcl.spec, (char *)VB->SecondaryColorPtr[0]->Ptr, 3, VB->SecondaryColorPtr[0]->StrideB, count); } vfmt |= RADEON_CP_VC_FRMT_PKSPEC; component[nr++] = &rmesa->tcl.spec; } vtx = (rmesa->hw.tcl.cmd[TCL_OUTPUT_VTXFMT] & ~(RADEON_TCL_VTX_Q0|RADEON_TCL_VTX_Q1)); if (inputs & VERT_TEX0) { if (!rmesa->tcl.tex[0].buf) emit_tex_vector( ctx, &(rmesa->tcl.tex[0]), (char *)VB->TexCoordPtr[0]->data, VB->TexCoordPtr[0]->size, VB->TexCoordPtr[0]->stride, count ); switch( VB->TexCoordPtr[0]->size ) { case 4: vtx |= RADEON_TCL_VTX_Q0; vfmt |= RADEON_CP_VC_FRMT_Q0; default: vfmt |= RADEON_CP_VC_FRMT_ST0; } component[nr++] = &rmesa->tcl.tex[0]; } if (inputs & VERT_TEX1) { if (!rmesa->tcl.tex[1].buf) emit_tex_vector( ctx, &(rmesa->tcl.tex[1]), (char *)VB->TexCoordPtr[1]->data, VB->TexCoordPtr[1]->size, VB->TexCoordPtr[1]->stride, count ); switch( VB->TexCoordPtr[1]->size ) { case 4: vtx |= RADEON_TCL_VTX_Q1; vfmt |= RADEON_CP_VC_FRMT_Q1; default: vfmt |= RADEON_CP_VC_FRMT_ST1; } component[nr++] = &rmesa->tcl.tex[1]; } if (vtx != rmesa->hw.tcl.cmd[TCL_OUTPUT_VTXFMT]) { RADEON_STATECHANGE( rmesa, tcl ); rmesa->hw.tcl.cmd[TCL_OUTPUT_VTXFMT] = vtx; } rmesa->tcl.nr_aos_components = nr; rmesa->tcl.vertex_format = vfmt; } void radeonReleaseArrays( GLcontext *ctx, GLuint newinputs ) { radeonContextPtr rmesa = RADEON_CONTEXT( ctx ); if (RADEON_DEBUG & DEBUG_VERTS) _tnl_print_vert_flags( __FUNCTION__, newinputs ); if (newinputs & VERT_OBJ) radeonReleaseDmaRegion( rmesa, &rmesa->tcl.obj, __FUNCTION__ ); if (newinputs & VERT_NORM) radeonReleaseDmaRegion( rmesa, &rmesa->tcl.norm, __FUNCTION__ ); if (newinputs & VERT_RGBA) radeonReleaseDmaRegion( rmesa, &rmesa->tcl.rgba, __FUNCTION__ ); if (newinputs & VERT_SPEC_RGB) radeonReleaseDmaRegion( rmesa, &rmesa->tcl.spec, __FUNCTION__ ); if (newinputs & VERT_TEX0) radeonReleaseDmaRegion( rmesa, &rmesa->tcl.tex[0], __FUNCTION__ ); if (newinputs & VERT_TEX1) radeonReleaseDmaRegion( rmesa, &rmesa->tcl.tex[1], __FUNCTION__ ); }
static void radeonUploadRectSubImage( radeonContextPtr rmesa, radeonTexObjPtr t, struct gl_texture_image *texImage, GLint x, GLint y, GLint width, GLint height ) { const struct gl_texture_format *texFormat = texImage->TexFormat; int blit_format, dstPitch, done; switch ( texFormat->TexelBytes ) { case 1: blit_format = RADEON_GMC_DST_8BPP_CI; break; case 2: blit_format = RADEON_GMC_DST_16BPP; break; case 4: blit_format = RADEON_GMC_DST_32BPP; break; default: fprintf( stderr, "radeonUploadRectSubImage: unknown blit_format (texelbytes=%d)\n", texFormat->TexelBytes); return; } t->image[0][0].data = texImage->Data; /* Currently don't need to cope with small pitches. */ width = texImage->Width; height = texImage->Height; dstPitch = t->pp_txpitch + 32; { /* FIXME: prefer GART-texturing if possible */ /* Data not in GART memory, or bad pitch. */ for (done = 0; done < height ; ) { struct radeon_dma_region region; int lines = MIN2( height - done, RADEON_BUFFER_SIZE / dstPitch ); int src_pitch; char *tex; src_pitch = texImage->RowStride * texFormat->TexelBytes; tex = (char *)texImage->Data + done * src_pitch; memset(®ion, 0, sizeof(region)); radeonAllocDmaRegion( rmesa, ®ion, lines * dstPitch, 1024 ); /* Copy texdata to dma: */ if (0) fprintf(stderr, "%s: src_pitch %d dst_pitch %d\n", __FUNCTION__, src_pitch, dstPitch); if (src_pitch == dstPitch) { memcpy( region.address + region.start, tex, lines * src_pitch ); } else { char *buf = region.address + region.start; int i; for (i = 0 ; i < lines ; i++) { memcpy( buf, tex, src_pitch ); buf += dstPitch; tex += src_pitch; } } radeonEmitWait( rmesa, RADEON_WAIT_3D ); /* Blit to framebuffer */ radeonEmitBlit( rmesa, blit_format, dstPitch, GET_START( ®ion ), dstPitch, t->bufAddr, 0, 0, 0, done, width, lines ); radeonEmitWait( rmesa, RADEON_WAIT_2D ); radeonReleaseDmaRegion( rmesa, ®ion, __FUNCTION__ ); done += lines; } } }