static void emit_tex_vector( GLcontext *ctx, struct radeon_dma_region *rvb, char *data, int size, int stride, int count ) { radeonContextPtr rmesa = RADEON_CONTEXT(ctx); int emitsize; if (RADEON_DEBUG & DEBUG_VERTS) fprintf(stderr, "%s %d/%d\n", __FUNCTION__, count, size); assert (!rvb->buf); switch (size) { case 4: emitsize = 3; break; default: emitsize = 2; break; } if (stride == 0) { radeonAllocDmaRegion( rmesa, rvb, 4 * emitsize, 4 ); count = 1; rvb->aos_start = GET_START(rvb); rvb->aos_stride = 0; rvb->aos_size = emitsize; } else { radeonAllocDmaRegion( rmesa, rvb, 4 * emitsize * count, 4 ); rvb->aos_start = GET_START(rvb); rvb->aos_stride = emitsize; rvb->aos_size = emitsize; } /* Emit the data */ switch (size) { case 1: emit_s0_vec( ctx, rvb, data, stride, count ); break; case 2: emit_vec8( ctx, rvb, data, stride, count ); break; case 3: emit_vec8( ctx, rvb, data, stride, count ); break; case 4: emit_stq_vec( ctx, rvb, data, stride, count ); break; default: assert(0); exit(1); break; } }
static void emit_vector( GLcontext *ctx, struct r200_dma_region *rvb, char *data, int size, int stride, int count ) { r200ContextPtr rmesa = R200_CONTEXT(ctx); if (R200_DEBUG & DEBUG_VERTS) fprintf(stderr, "%s count %d size %d stride %d\n", __FUNCTION__, count, size, stride); assert (!rvb->buf); if (stride == 0) { r200AllocDmaRegion( rmesa, rvb, size * 4, 4 ); count = 1; rvb->aos_start = GET_START(rvb); rvb->aos_stride = 0; rvb->aos_size = size; } else { r200AllocDmaRegion( rmesa, rvb, size * count * 4, 4 ); /* alignment? */ rvb->aos_start = GET_START(rvb); rvb->aos_stride = size; rvb->aos_size = size; } /* Emit the data */ switch (size) { case 1: emit_vec4( ctx, rvb, data, stride, count ); break; case 2: emit_vec8( ctx, rvb, data, stride, count ); break; case 3: emit_vec12( ctx, rvb, data, stride, count ); break; case 4: emit_vec16( ctx, rvb, data, stride, count ); break; default: assert(0); exit(1); break; } }
static void r300EmitElts(GLcontext * ctx, void *elts, unsigned long n_elts, int elt_size) { r300ContextPtr rmesa = R300_CONTEXT(ctx); struct r300_dma_region *rvb = &rmesa->state.elt_dma; void *out; assert(elt_size == 2 || elt_size == 4); if (r300IsGartMemory(rmesa, elts, n_elts * elt_size)) { rvb->address = rmesa->radeon.radeonScreen->gartTextures.map; rvb->start = ((char *)elts) - rvb->address; rvb->aos_offset = rmesa->radeon.radeonScreen->gart_texture_offset + rvb->start; return; } else if (r300IsGartMemory(rmesa, elts, 1)) { WARN_ONCE("Pointer not within GART memory!\n"); _mesa_exit(-1); } r300AllocDmaRegion(rmesa, rvb, n_elts * elt_size, elt_size); rvb->aos_offset = GET_START(rvb); out = rvb->address + rvb->start; memcpy(out, elts, n_elts * elt_size); }
static void emit_ubyte_rgba( GLcontext *ctx, struct radeon_dma_region *rvb, char *data, int size, int stride, int count ) { radeonContextPtr rmesa = RADEON_CONTEXT(ctx); if (RADEON_DEBUG & DEBUG_VERTS) fprintf(stderr, "%s %d/%d\n", __FUNCTION__, count, size); assert (!rvb->buf); if (stride == 0) { radeonAllocDmaRegion( rmesa, rvb, 4, 4 ); count = 1; rvb->aos_start = GET_START(rvb); rvb->aos_stride = 0; rvb->aos_size = 1; } else { radeonAllocDmaRegion( rmesa, rvb, 4 * count, 4 ); /* alignment? */ rvb->aos_start = GET_START(rvb); rvb->aos_stride = 1; rvb->aos_size = 1; } /* Emit the data */ switch (size) { case 3: emit_ubyte_rgba3( ctx, rvb, data, stride, count ); break; case 4: emit_ubyte_rgba4( ctx, rvb, data, stride, count ); break; default: assert(0); exit(1); break; } }
static void flush_prims( radeonContextPtr rmesa ) { int i,j; struct radeon_dma_region tmp = rmesa->dma.current; tmp.buf->refcount++; tmp.aos_size = rmesa->vb.vertex_size; tmp.aos_stride = rmesa->vb.vertex_size; tmp.aos_start = GET_START(&tmp); rmesa->dma.current.ptr = rmesa->dma.current.start += (rmesa->vb.initial_counter - rmesa->vb.counter) * rmesa->vb.vertex_size * 4; rmesa->tcl.vertex_format = rmesa->vb.vertex_format; rmesa->tcl.aos_components[0] = &tmp; rmesa->tcl.nr_aos_components = 1; rmesa->dma.flush = NULL; /* Optimize the primitive list: */ if (rmesa->vb.nrprims > 1) { for (j = 0, i = 1 ; i < rmesa->vb.nrprims; i++) { int pj = rmesa->vb.primlist[j].prim & 0xf; int pi = rmesa->vb.primlist[i].prim & 0xf; if (pj == pi && discreet_gl_prim[pj] && rmesa->vb.primlist[i].start == rmesa->vb.primlist[j].end) { rmesa->vb.primlist[j].end = rmesa->vb.primlist[i].end; } else { j++; if (j != i) rmesa->vb.primlist[j] = rmesa->vb.primlist[i]; } } rmesa->vb.nrprims = j+1; } for (i = 0 ; i < rmesa->vb.nrprims; i++) { if (RADEON_DEBUG & DEBUG_PRIMS) fprintf(stderr, "vtxfmt prim %d: %s %d..%d\n", i, _mesa_lookup_enum_by_nr( rmesa->vb.primlist[i].prim & PRIM_MODE_MASK ), rmesa->vb.primlist[i].start, rmesa->vb.primlist[i].end); radeonEmitPrimitive( rmesa->glCtx, rmesa->vb.primlist[i].start, rmesa->vb.primlist[i].end, rmesa->vb.primlist[i].prim ); } rmesa->vb.nrprims = 0; radeonReleaseDmaRegion( rmesa, &tmp, __FUNCTION__ ); }
static void emit_vecfog( GLcontext *ctx, struct r200_dma_region *rvb, char *data, int stride, int count ) { int i; GLfloat *out; r200ContextPtr rmesa = R200_CONTEXT(ctx); if (R200_DEBUG & DEBUG_VERTS) fprintf(stderr, "%s count %d stride %d\n", __FUNCTION__, count, stride); assert (!rvb->buf); if (stride == 0) { r200AllocDmaRegion( rmesa, rvb, 4, 4 ); count = 1; rvb->aos_start = GET_START(rvb); rvb->aos_stride = 0; rvb->aos_size = 1; } else { r200AllocDmaRegion( rmesa, rvb, count * 4, 4 ); /* alignment? */ rvb->aos_start = GET_START(rvb); rvb->aos_stride = 1; rvb->aos_size = 1; } /* Emit the data */ out = (GLfloat *)(rvb->address + rvb->start); for (i = 0; i < count; i++) { out[0] = r200ComputeFogBlendFactor( ctx, *(GLfloat *)data ); out++; data += stride; } }
static void r200UploadRectSubImage( r200ContextPtr rmesa, r200TexObjPtr t, struct gl_texture_image *texImage, GLint x, GLint y, GLint width, GLint height ) { const struct gl_texture_format *texFormat = texImage->TexFormat; int blit_format, dstPitch, done; switch ( texFormat->TexelBytes ) { case 1: blit_format = R200_CP_COLOR_FORMAT_CI8; break; case 2: blit_format = R200_CP_COLOR_FORMAT_RGB565; break; case 4: blit_format = R200_CP_COLOR_FORMAT_ARGB8888; break; default: return; } t->image[0][0].data = texImage->Data; /* Currently don't need to cope with small pitches. */ width = texImage->Width; height = texImage->Height; dstPitch = t->pp_txpitch + 32; if (rmesa->prefer_gart_client_texturing && texImage->IsClientData) { /* In this case, could also use GART texturing. This is * currently disabled, but has been tested & works. */ if ( !t->image_override ) t->pp_txoffset = r200GartOffsetFromVirtual( rmesa, texImage->Data ); t->pp_txpitch = texImage->RowStride * texFormat->TexelBytes - 32; if (R200_DEBUG & DEBUG_TEXTURE) fprintf(stderr, "Using GART texturing for rectangular client texture\n"); /* Release FB memory allocated for this image: */ /* FIXME This may not be correct as driSwapOutTextureObject sets * FIXME dirty_images. It may be fine, though. */ if ( t->base.memBlock ) { driSwapOutTextureObject( (driTextureObject *) t ); } } else if (texImage->IsClientData) { /* Data already in GART memory, with usable pitch. */ GLuint srcPitch; srcPitch = texImage->RowStride * texFormat->TexelBytes; r200EmitBlit( rmesa, blit_format, srcPitch, r200GartOffsetFromVirtual( rmesa, texImage->Data ), dstPitch, t->bufAddr, 0, 0, 0, 0, width, height ); } else { /* Data not in GART memory, or bad pitch. */ for (done = 0; done < height ; ) { struct r200_dma_region region; int lines = MIN2( height - done, RADEON_BUFFER_SIZE / dstPitch ); int src_pitch; char *tex; src_pitch = texImage->RowStride * texFormat->TexelBytes; tex = (char *)texImage->Data + done * src_pitch; memset(®ion, 0, sizeof(region)); r200AllocDmaRegion( rmesa, ®ion, lines * dstPitch, 1024 ); /* Copy texdata to dma: */ if (0) fprintf(stderr, "%s: src_pitch %d dst_pitch %d\n", __FUNCTION__, src_pitch, dstPitch); if (src_pitch == dstPitch) { memcpy( region.address + region.start, tex, lines * src_pitch ); } else { char *buf = region.address + region.start; int i; for (i = 0 ; i < lines ; i++) { memcpy( buf, tex, src_pitch ); buf += dstPitch; tex += src_pitch; } } r200EmitWait( rmesa, RADEON_WAIT_3D ); /* Blit to framebuffer */ r200EmitBlit( rmesa, blit_format, dstPitch, GET_START( ®ion ), dstPitch | (t->tile_bits >> 16), t->bufAddr, 0, 0, 0, done, width, lines ); r200EmitWait( rmesa, RADEON_WAIT_2D ); r200ReleaseDmaRegion( rmesa, ®ion, __FUNCTION__ ); done += lines; } } }
static void radeonUploadRectSubImage( radeonContextPtr rmesa, radeonTexObjPtr t, struct gl_texture_image *texImage, GLint x, GLint y, GLint width, GLint height ) { const struct gl_texture_format *texFormat = texImage->TexFormat; int blit_format, dstPitch, done; switch ( texFormat->TexelBytes ) { case 1: blit_format = RADEON_GMC_DST_8BPP_CI; break; case 2: blit_format = RADEON_GMC_DST_16BPP; break; case 4: blit_format = RADEON_GMC_DST_32BPP; break; default: fprintf( stderr, "radeonUploadRectSubImage: unknown blit_format (texelbytes=%d)\n", texFormat->TexelBytes); return; } t->image[0][0].data = texImage->Data; /* Currently don't need to cope with small pitches. */ width = texImage->Width; height = texImage->Height; dstPitch = t->pp_txpitch + 32; { /* FIXME: prefer GART-texturing if possible */ /* Data not in GART memory, or bad pitch. */ for (done = 0; done < height ; ) { struct radeon_dma_region region; int lines = MIN2( height - done, RADEON_BUFFER_SIZE / dstPitch ); int src_pitch; char *tex; src_pitch = texImage->RowStride * texFormat->TexelBytes; tex = (char *)texImage->Data + done * src_pitch; memset(®ion, 0, sizeof(region)); radeonAllocDmaRegion( rmesa, ®ion, lines * dstPitch, 1024 ); /* Copy texdata to dma: */ if (0) fprintf(stderr, "%s: src_pitch %d dst_pitch %d\n", __FUNCTION__, src_pitch, dstPitch); if (src_pitch == dstPitch) { memcpy( region.address + region.start, tex, lines * src_pitch ); } else { char *buf = region.address + region.start; int i; for (i = 0 ; i < lines ; i++) { memcpy( buf, tex, src_pitch ); buf += dstPitch; tex += src_pitch; } } radeonEmitWait( rmesa, RADEON_WAIT_3D ); /* Blit to framebuffer */ radeonEmitBlit( rmesa, blit_format, dstPitch, GET_START( ®ion ), dstPitch, t->bufAddr, 0, 0, 0, done, width, lines ); radeonEmitWait( rmesa, RADEON_WAIT_2D ); radeonReleaseDmaRegion( rmesa, ®ion, __FUNCTION__ ); done += lines; } } }