Beispiel #1
0
static void radeonTexEnv( GLcontext *ctx, GLenum target,
			  GLenum pname, const GLfloat *param )
{
   radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
   GLuint unit = ctx->Texture.CurrentUnit;
   struct gl_texture_unit *texUnit = &ctx->Texture.Unit[unit];

   if ( RADEON_DEBUG & DEBUG_STATE ) {
      fprintf( stderr, "%s( %s )\n",
	       __FUNCTION__, _mesa_lookup_enum_by_nr( pname ) );
   }

   switch ( pname ) {
   case GL_TEXTURE_ENV_COLOR: {
      GLubyte c[4];
      GLuint envColor;
      UNCLAMPED_FLOAT_TO_RGBA_CHAN( c, texUnit->EnvColor );
      envColor = radeonPackColor( 4, c[0], c[1], c[2], c[3] );
      if ( rmesa->hw.tex[unit].cmd[TEX_PP_TFACTOR] != envColor ) {
	 RADEON_STATECHANGE( rmesa, tex[unit] );
	 rmesa->hw.tex[unit].cmd[TEX_PP_TFACTOR] = envColor;
      }
      break;
   }

   case GL_TEXTURE_LOD_BIAS_EXT: {
      GLfloat bias, min;
      GLuint b;

      /* The Radeon's LOD bias is a signed 2's complement value with a
       * range of -1.0 <= bias < 4.0.  We break this into two linear
       * functions, one mapping [-1.0,0.0] to [-128,0] and one mapping
       * [0.0,4.0] to [0,127].
       */
      min = driQueryOptionb (&rmesa->optionCache, "no_neg_lod_bias") ?
	  0.0 : -1.0;
      bias = CLAMP( *param, min, 4.0 );
      if ( bias == 0 ) {
	 b = 0;
      } else if ( bias > 0 ) {
	 b = ((GLuint)SCALED_FLOAT_TO_BYTE( bias, 4.0 )) << RADEON_LOD_BIAS_SHIFT;
      } else {
	 b = ((GLuint)SCALED_FLOAT_TO_BYTE( bias, 1.0 )) << RADEON_LOD_BIAS_SHIFT;
      }
      if ( (rmesa->hw.tex[unit].cmd[TEX_PP_TXFILTER] & RADEON_LOD_BIAS_MASK) != b ) {
	 RADEON_STATECHANGE( rmesa, tex[unit] );
	 rmesa->hw.tex[unit].cmd[TEX_PP_TXFILTER] &= ~RADEON_LOD_BIAS_MASK;
	 rmesa->hw.tex[unit].cmd[TEX_PP_TXFILTER] |= (b & RADEON_LOD_BIAS_MASK);
      }
      break;
   }

   default:
      return;
   }
}
Beispiel #2
0
/* Turn on/off page flipping according to the flags in the sarea:
 */
static void
radeonUpdatePageFlipping( radeonContextPtr rmesa )
{
   int use_back;

   if (rmesa->dri.drmMinor < 3)
      return;

   rmesa->doPageFlip = rmesa->sarea->pfAllowPageFlip;

   use_back = (rmesa->glCtx->Color.DriverDrawBuffer == GL_BACK_LEFT);
   use_back ^= (rmesa->sarea->pfCurrentPage == 1);

   if ( RADEON_DEBUG & DEBUG_VERBOSE )
      fprintf(stderr, "%s allow %d current %d\n", __FUNCTION__, 
	      rmesa->doPageFlip,
	      rmesa->sarea->pfCurrentPage );

   if ( use_back ) {
	 rmesa->state.color.drawOffset = rmesa->radeonScreen->backOffset;
	 rmesa->state.color.drawPitch  = rmesa->radeonScreen->backPitch;
   } else {
	 rmesa->state.color.drawOffset = rmesa->radeonScreen->frontOffset;
	 rmesa->state.color.drawPitch  = rmesa->radeonScreen->frontPitch;
   }

   RADEON_STATECHANGE( rmesa, ctx );
   rmesa->hw.ctx.cmd[CTX_RB3D_COLOROFFSET] = rmesa->state.color.drawOffset;
   rmesa->hw.ctx.cmd[CTX_RB3D_COLORPITCH]  = rmesa->state.color.drawPitch;
}
Beispiel #3
0
static void transition_to_hwtnl( struct gl_context *ctx )
{
    r100ContextPtr rmesa = R100_CONTEXT(ctx);
    TNLcontext *tnl = TNL_CONTEXT(ctx);
    GLuint se_coord_fmt = rmesa->hw.set.cmd[SET_SE_COORDFMT];

    se_coord_fmt &= ~(RADEON_VTX_XY_PRE_MULT_1_OVER_W0 |
                      RADEON_VTX_Z_PRE_MULT_1_OVER_W0 |
                      RADEON_VTX_W0_IS_NOT_1_OVER_W0);
    se_coord_fmt |= RADEON_VTX_W0_IS_NOT_1_OVER_W0;

    if ( se_coord_fmt != rmesa->hw.set.cmd[SET_SE_COORDFMT] ) {
        RADEON_STATECHANGE( rmesa, set );
        rmesa->hw.set.cmd[SET_SE_COORDFMT] = se_coord_fmt;
        _tnl_need_projected_coords( ctx, GL_FALSE );
    }

    radeonUpdateMaterial( ctx );

    tnl->Driver.NotifyMaterialChange = radeonUpdateMaterial;

    if ( rmesa->radeon.dma.flush )
        rmesa->radeon.dma.flush( rmesa->radeon.glCtx );

    rmesa->radeon.dma.flush = NULL;
    rmesa->swtcl.vertex_format = 0;

    //   if (rmesa->swtcl.indexed_verts.buf)
    //      radeonReleaseDmaRegion( rmesa, &rmesa->swtcl.indexed_verts,
    //			      __FUNCTION__ );

    if (RADEON_DEBUG & RADEON_FALLBACKS)
        fprintf(stderr, "Radeon end tcl fallback\n");
}
Beispiel #4
0
static void transition_to_swtnl( struct gl_context *ctx )
{
    r100ContextPtr rmesa = R100_CONTEXT(ctx);
    TNLcontext *tnl = TNL_CONTEXT(ctx);
    GLuint se_cntl;

    RADEON_NEWPRIM( rmesa );
    rmesa->swtcl.vertex_format = 0;

    radeonChooseVertexState( ctx );
    radeonChooseRenderState( ctx );

    _mesa_validate_all_lighting_tables( ctx );

    tnl->Driver.NotifyMaterialChange =
        _mesa_validate_all_lighting_tables;

    radeonReleaseArrays( ctx, ~0 );

    se_cntl = rmesa->hw.set.cmd[SET_SE_CNTL];
    se_cntl |= RADEON_FLAT_SHADE_VTX_LAST;

    if (se_cntl != rmesa->hw.set.cmd[SET_SE_CNTL]) {
        RADEON_STATECHANGE( rmesa, set );
        rmesa->hw.set.cmd[SET_SE_CNTL] = se_cntl;
    }
}
Beispiel #5
0
void radeonTclPrimitive( struct gl_context *ctx,
                         GLenum prim,
                         int hw_prim )
{
    r100ContextPtr rmesa = R100_CONTEXT(ctx);
    GLuint se_cntl;
    GLuint newprim = hw_prim | RADEON_CP_VC_CNTL_TCL_ENABLE;

    radeon_prepare_render(&rmesa->radeon);
    if (rmesa->radeon.NewGLState)
        radeonValidateState( ctx );

    if (newprim != rmesa->tcl.hw_primitive ||
            !discrete_prim[hw_prim&0xf]) {
        RADEON_NEWPRIM( rmesa );
        rmesa->tcl.hw_primitive = newprim;
    }

    se_cntl = rmesa->hw.set.cmd[SET_SE_CNTL];
    se_cntl &= ~RADEON_FLAT_SHADE_VTX_LAST;

    if (prim == GL_POLYGON && (ctx->_TriangleCaps & DD_FLATSHADE))
        se_cntl |= RADEON_FLAT_SHADE_VTX_0;
    else
        se_cntl |= RADEON_FLAT_SHADE_VTX_LAST;

    if (se_cntl != rmesa->hw.set.cmd[SET_SE_CNTL]) {
        RADEON_STATECHANGE( rmesa, set );
        rmesa->hw.set.cmd[SET_SE_CNTL] = se_cntl;
    }
}
Beispiel #6
0
static void transition_to_hwtnl( GLcontext *ctx )
{
   radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
   TNLcontext *tnl = TNL_CONTEXT(ctx);
   GLuint se_coord_fmt = (RADEON_VTX_W0_IS_NOT_1_OVER_W0 |
			  RADEON_TEX1_W_ROUTING_USE_Q1);

   if ( se_coord_fmt != rmesa->hw.set.cmd[SET_SE_COORDFMT] ) {
      RADEON_STATECHANGE( rmesa, set );
      rmesa->hw.set.cmd[SET_SE_COORDFMT] = se_coord_fmt;
      _tnl_need_projected_coords( ctx, GL_FALSE );
   }

   radeonUpdateMaterial( ctx );

   tnl->Driver.NotifyMaterialChange = radeonUpdateMaterial;

   if ( rmesa->dma.flush )			
      rmesa->dma.flush( rmesa );	

   rmesa->dma.flush = NULL;
   rmesa->swtcl.vertex_format = 0;
   
   if (rmesa->swtcl.indexed_verts.buf) 
      radeonReleaseDmaRegion( rmesa, &rmesa->swtcl.indexed_verts, 
			      __FUNCTION__ );

   if (RADEON_DEBUG & DEBUG_FALLBACKS) 
      fprintf(stderr, "Radeon end tcl fallback\n");
}
Beispiel #7
0
/**
 * Set vertex state for SW TCL.  The primary purpose of this function is to
 * determine in advance whether or not the hardware can / should do the
 * projection divide or Mesa should do it.
 */
void radeonChooseVertexState( struct gl_context *ctx )
{
   r100ContextPtr rmesa = R100_CONTEXT( ctx );
   TNLcontext *tnl = TNL_CONTEXT(ctx);

   GLuint se_coord_fmt = rmesa->hw.set.cmd[SET_SE_COORDFMT];
   GLboolean unfilled = (ctx->Polygon.FrontMode != GL_FILL ||
                         ctx->Polygon.BackMode != GL_FILL);
   GLboolean twosided = ctx->Light.Enabled && ctx->Light.Model.TwoSide;
   
   se_coord_fmt &= ~(RADEON_VTX_XY_PRE_MULT_1_OVER_W0 |
		     RADEON_VTX_Z_PRE_MULT_1_OVER_W0 |
		     RADEON_VTX_W0_IS_NOT_1_OVER_W0);

   /* We must ensure that we don't do _tnl_need_projected_coords while in a
    * rasterization fallback.  As this function will be called again when we
    * leave a rasterization fallback, we can just skip it for now.
    */
   if (rmesa->radeon.Fallback != 0)
      return;

   /* HW perspective divide is a win, but tiny vertex formats are a
    * bigger one.
    */

   if ((0 == (tnl->render_inputs_bitset & 
        (BITFIELD64_RANGE(_TNL_ATTRIB_TEX0, _TNL_NUM_TEX)
         | BITFIELD64_BIT(_TNL_ATTRIB_COLOR1))))
       || twosided
       || unfilled) {
      rmesa->swtcl.needproj = GL_TRUE;
      se_coord_fmt |= (RADEON_VTX_XY_PRE_MULT_1_OVER_W0 |
		      RADEON_VTX_Z_PRE_MULT_1_OVER_W0);
   }
   else {
      rmesa->swtcl.needproj = GL_FALSE;
      se_coord_fmt |= (RADEON_VTX_W0_IS_NOT_1_OVER_W0);
   }

   _tnl_need_projected_coords( ctx, rmesa->swtcl.needproj );

   if ( se_coord_fmt != rmesa->hw.set.cmd[SET_SE_COORDFMT] ) {
      RADEON_STATECHANGE( rmesa, set );
      rmesa->hw.set.cmd[SET_SE_COORDFMT] = se_coord_fmt;
   }
}
Beispiel #8
0
static void r100_get_lock(radeonContextPtr radeon)
{
   r100ContextPtr rmesa = (r100ContextPtr)radeon;
   drm_radeon_sarea_t *sarea = radeon->sarea;

   RADEON_STATECHANGE(rmesa, ctx);
   if (rmesa->radeon.sarea->tiling_enabled) {
      rmesa->hw.ctx.cmd[CTX_RB3D_COLORPITCH] |=
	 RADEON_COLOR_TILE_ENABLE;
   } else {
      rmesa->hw.ctx.cmd[CTX_RB3D_COLORPITCH] &=
	 ~RADEON_COLOR_TILE_ENABLE;
   }
   
   if (sarea->ctx_owner != rmesa->radeon.dri.hwContext) {
      sarea->ctx_owner = rmesa->radeon.dri.hwContext;
   }
}
Beispiel #9
0
/**
 * Set vertex state for SW TCL.  The primary purpose of this function is to
 * determine in advance whether or not the hardware can / should do the
 * projection divide or Mesa should do it.
 */
void radeonChooseVertexState( GLcontext *ctx )
{
   radeonContextPtr rmesa = RADEON_CONTEXT( ctx );
   TNLcontext *tnl = TNL_CONTEXT(ctx);

   GLuint se_coord_fmt = rmesa->hw.set.cmd[SET_SE_COORDFMT];
   
   se_coord_fmt &= ~(RADEON_VTX_XY_PRE_MULT_1_OVER_W0 |
		     RADEON_VTX_Z_PRE_MULT_1_OVER_W0 |
		     RADEON_VTX_W0_IS_NOT_1_OVER_W0);

   /* We must ensure that we don't do _tnl_need_projected_coords while in a
    * rasterization fallback.  As this function will be called again when we
    * leave a rasterization fallback, we can just skip it for now.
    */
   if (rmesa->Fallback != 0)
      return;

   /* HW perspective divide is a win, but tiny vertex formats are a
    * bigger one.
    */

   if ((!RENDERINPUTS_TEST_RANGE( tnl->render_inputs_bitset, _TNL_FIRST_TEX, _TNL_LAST_TEX ) &&
       !RENDERINPUTS_TEST( tnl->render_inputs_bitset, _TNL_ATTRIB_COLOR1 ))
       || (ctx->_TriangleCaps & (DD_TRI_LIGHT_TWOSIDE|DD_TRI_UNFILLED))) {
      rmesa->swtcl.needproj = GL_TRUE;
      se_coord_fmt |= (RADEON_VTX_XY_PRE_MULT_1_OVER_W0 |
		      RADEON_VTX_Z_PRE_MULT_1_OVER_W0);
   }
   else {
      rmesa->swtcl.needproj = GL_FALSE;
      se_coord_fmt |= (RADEON_VTX_W0_IS_NOT_1_OVER_W0);
   }

   _tnl_need_projected_coords( ctx, rmesa->swtcl.needproj );

   if ( se_coord_fmt != rmesa->hw.set.cmd[SET_SE_COORDFMT] ) {
      RADEON_STATECHANGE( rmesa, set );
      rmesa->hw.set.cmd[SET_SE_COORDFMT] = se_coord_fmt;
   }
}
/**
 * \brief Set texture environment parameters.
 *
 * \param ctx GL context.
 * \param target texture environment.
 * \param pname texture parameter. Accepted value is GL_TEXTURE_ENV_COLOR.
 * \param param parameter value.
 *
 * Updates the current unit's RADEON_TEX_PP_TFACTOR register and informs of the
 * state change.
 */
static void radeonTexEnv( GLcontext *ctx, GLenum target,
			  GLenum pname, const GLfloat *param )
{
   radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
   GLuint unit = ctx->Texture.CurrentUnit;
   struct gl_texture_unit *texUnit = &ctx->Texture.Unit[unit];

   switch ( pname ) {
   case GL_TEXTURE_ENV_COLOR: {
      GLubyte c[4];
      GLuint envColor;
      UNCLAMPED_FLOAT_TO_RGBA_CHAN( c, texUnit->EnvColor );
      envColor = radeonPackColor( 4, c[0], c[1], c[2], c[3] );
      if ( rmesa->hw.tex[unit].cmd[TEX_PP_TFACTOR] != envColor ) {
	 RADEON_STATECHANGE( rmesa, tex[unit] );
	 rmesa->hw.tex[unit].cmd[TEX_PP_TFACTOR] = envColor;
      }
      break;
   }

   default:
      return;
   }
}
Beispiel #11
0
static void radeonResetLineStipple( struct gl_context *ctx )
{
   r100ContextPtr rmesa = R100_CONTEXT(ctx);
   RADEON_STATECHANGE( rmesa, lin );
}
void radeonUpdateTextureState( GLcontext *ctx )
{
   radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
   struct gl_texture_unit *texUnit = &ctx->Texture.Unit[0];

   if ( texUnit->_ReallyEnabled & (TEXTURE_1D_BIT | TEXTURE_2D_BIT) ) {
      struct gl_texture_object *tObj = texUnit->_Current;
      radeonTexObjPtr t = (radeonTexObjPtr) tObj->DriverData;

      /* Upload teximages (not pipelined)
       */
      if ( t->dirty_images ) {
	 RADEON_FIREVERTICES( rmesa );
	 radeonSetTexImages( rmesa, tObj );
      }

      /* Update state if this is a different texture object to last
       * time.
       */
      if ( rmesa->state.texture.unit[0].texobj != t ) {
	 rmesa->state.texture.unit[0].texobj = t;
	 t->dirty_state |= 1<<0;
	 move_to_head( &rmesa->texture.objects[0], t );
      }

      if (t->dirty_state) {
	 GLuint *cmd = RADEON_DB_STATE( tex[0] );

	 cmd[TEX_PP_TXFILTER] &= ~TEXOBJ_TXFILTER_MASK;
	 cmd[TEX_PP_TXFORMAT] &= ~TEXOBJ_TXFORMAT_MASK;
	 cmd[TEX_PP_TXFILTER] |= t->pp_txfilter & TEXOBJ_TXFILTER_MASK;
	 cmd[TEX_PP_TXFORMAT] |= t->pp_txformat & TEXOBJ_TXFORMAT_MASK;
	 cmd[TEX_PP_TXOFFSET] = t->pp_txoffset;
	 cmd[TEX_PP_BORDER_COLOR] = t->pp_border_color;
	 
	 RADEON_DB_STATECHANGE( rmesa, &rmesa->hw.tex[0] );
	 t->dirty_state = 0;
      }

      /* Newly enabled?
       */
      if (!(rmesa->hw.ctx.cmd[CTX_PP_CNTL] & RADEON_TEX_0_ENABLE)) {
	 RADEON_STATECHANGE( rmesa, ctx );
	 rmesa->hw.ctx.cmd[CTX_PP_CNTL] |= (RADEON_TEX_0_ENABLE | 
					    RADEON_TEX_BLEND_0_ENABLE);

	 RADEON_STATECHANGE( rmesa, tcl );
	 rmesa->hw.tcl.cmd[TCL_OUTPUT_VTXFMT] |= RADEON_TCL_VTX_ST0;
      }

      radeonUpdateTextureEnv( ctx, 0 );
   }
   else if (rmesa->hw.ctx.cmd[CTX_PP_CNTL] & (RADEON_TEX_0_ENABLE<<0)) {
      /* Texture unit disabled */
      rmesa->state.texture.unit[0].texobj = 0;
      RADEON_STATECHANGE( rmesa, ctx );
      rmesa->hw.ctx.cmd[CTX_PP_CNTL] &= 
	 ~((RADEON_TEX_0_ENABLE | RADEON_TEX_BLEND_0_ENABLE) << 0);

      RADEON_STATECHANGE( rmesa, tcl );
      rmesa->hw.tcl.cmd[TCL_OUTPUT_VTXFMT] &= ~(RADEON_TCL_VTX_ST0 |
						RADEON_TCL_VTX_Q0);
   }
}
/**
 * \brief Update the texture environment.
 *
 * \param ctx GL context
 * \param unit texture unit to update.
 *
 * Sets the state of the RADEON_TEX_PP_TXCBLEND and RADEON_TEX_PP_TXABLEND
 * registers using the ::radeon_color_combine and ::radeon_alpha_combine tables,
 * and informs of the state change.
 */
static void radeonUpdateTextureEnv( GLcontext *ctx, int unit )
{
   radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
   const struct gl_texture_unit *texUnit = &ctx->Texture.Unit[unit];
   const struct gl_texture_object *tObj = texUnit->_Current;
   const GLenum format = tObj->Image[0][tObj->BaseLevel]->Format;
   GLuint color_combine = radeon_color_combine[unit][RADEON_DISABLE];
   GLuint alpha_combine = radeon_alpha_combine[unit][RADEON_DISABLE];


   /* Set the texture environment state.  Isn't this nice and clean?
    * The Radeon will automagically set the texture alpha to 0xff when
    * the texture format does not include an alpha component.  This
    * reduces the amount of special-casing we have to do, alpha-only
    * textures being a notable exception.
    */
   switch ( texUnit->EnvMode ) {
   case GL_REPLACE:
      switch ( format ) {
      case GL_RGBA:
      case GL_INTENSITY:
	 color_combine = radeon_color_combine[unit][RADEON_REPLACE];
	 alpha_combine = radeon_alpha_combine[unit][RADEON_REPLACE];
	 break;
      case GL_RGB:
	 color_combine = radeon_color_combine[unit][RADEON_REPLACE];
	 alpha_combine = radeon_alpha_combine[unit][RADEON_DISABLE];
         break;
      default:
	 break;
      }
      break;

   case GL_MODULATE:
      switch ( format ) {
      case GL_RGBA:
      case GL_INTENSITY:
	 color_combine = radeon_color_combine[unit][RADEON_MODULATE];
	 alpha_combine = radeon_alpha_combine[unit][RADEON_MODULATE];
	 break;
      case GL_RGB:
	 color_combine = radeon_color_combine[unit][RADEON_MODULATE];
	 alpha_combine = radeon_alpha_combine[unit][RADEON_DISABLE];
	 break;
      default:
	 break;
      }
      break;

   case GL_DECAL:
      switch ( format ) {
      case GL_RGBA:
      case GL_RGB:
	 color_combine = radeon_color_combine[unit][RADEON_DECAL];
	 alpha_combine = radeon_alpha_combine[unit][RADEON_DISABLE];
	 break;
      case GL_INTENSITY:
	 color_combine = radeon_color_combine[unit][RADEON_DISABLE];
	 alpha_combine = radeon_alpha_combine[unit][RADEON_DISABLE];
	 break;
      default:
	 break;
      }
      break;

   case GL_BLEND:
      switch ( format ) {
      case GL_RGBA:
      case GL_RGB:
	 color_combine = radeon_color_combine[unit][RADEON_BLEND];
	 alpha_combine = radeon_alpha_combine[unit][RADEON_MODULATE];
	 break;
      case GL_INTENSITY:
	 color_combine = radeon_color_combine[unit][RADEON_BLEND];
	 alpha_combine = radeon_alpha_combine[unit][RADEON_BLEND];
	 break;
      default:
	 break;
      }
      break;

   default:
      break;
   }

   if ( rmesa->hw.tex[unit].cmd[TEX_PP_TXCBLEND] != color_combine ||
	rmesa->hw.tex[unit].cmd[TEX_PP_TXABLEND] != alpha_combine ) {
      RADEON_STATECHANGE( rmesa, tex[unit] );
      rmesa->hw.tex[unit].cmd[TEX_PP_TXCBLEND] = color_combine;
      rmesa->hw.tex[unit].cmd[TEX_PP_TXABLEND] = alpha_combine;
   }
}
Beispiel #14
0
static void radeonResetLineStipple( GLcontext *ctx )
{
   radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
   RADEON_STATECHANGE( rmesa, lin );
}
Beispiel #15
0
/* Emit any changed arrays to new agp memory, re-emit a packet to
 * update the arrays.  
 */
void radeonEmitArrays( GLcontext *ctx, GLuint inputs )
{
   radeonContextPtr rmesa = RADEON_CONTEXT( ctx );
   struct vertex_buffer *VB = &TNL_CONTEXT( ctx )->vb;
   struct radeon_dma_region **component = rmesa->tcl.aos_components;
   GLuint nr = 0;
   GLuint vfmt = 0;
   GLuint count = VB->Count;
   GLuint vtx;
   
   if (RADEON_DEBUG & DEBUG_VERTS) 
      _tnl_print_vert_flags( __FUNCTION__, inputs );

   if (1) {
      if (!rmesa->tcl.obj.buf) 
	 emit_vector( ctx, 
		      &rmesa->tcl.obj, 
		      (char *)VB->ObjPtr->data,
		      VB->ObjPtr->size,
		      VB->ObjPtr->stride,
		      count);

      switch( VB->ObjPtr->size ) {
      case 4: vfmt |= RADEON_CP_VC_FRMT_W0;
      case 3: vfmt |= RADEON_CP_VC_FRMT_Z;
      case 2: vfmt |= RADEON_CP_VC_FRMT_XY;
      default:
      }
      component[nr++] = &rmesa->tcl.obj;
   }
   

   if (inputs & VERT_NORM) {
      if (!rmesa->tcl.norm.buf)
	 emit_vector( ctx, 
		      &(rmesa->tcl.norm), 
		      (char *)VB->NormalPtr->data,
		      3,
		      VB->NormalPtr->stride,
		      count);

      vfmt |= RADEON_CP_VC_FRMT_N0;
      component[nr++] = &rmesa->tcl.norm;
   }

   if (inputs & VERT_RGBA) {
      if (VB->ColorPtr[0]->Type == GL_UNSIGNED_BYTE) {
	 if (!rmesa->tcl.rgba.buf)
	    emit_ubyte_rgba( ctx, 
			     &rmesa->tcl.rgba, 
			     (char *)VB->ColorPtr[0]->Ptr,
			     VB->ColorPtr[0]->Size,
			     VB->ColorPtr[0]->StrideB,
			     count);

	 vfmt |= RADEON_CP_VC_FRMT_PKCOLOR; 
      }
      else {
	 int emitsize;

	 if (VB->ColorPtr[0]->Size == 4 &&
	     (VB->ColorPtr[0]->StrideB != 0 ||
	      ((GLfloat *)VB->ColorPtr[0]->Ptr)[3] != 1.0)) { 
	    vfmt |= RADEON_CP_VC_FRMT_FPCOLOR | RADEON_CP_VC_FRMT_FPALPHA;
	    emitsize = 4;
	 }
	 else { 
	    vfmt |= RADEON_CP_VC_FRMT_FPCOLOR;
	    emitsize = 3;
	 }


	 if (!rmesa->tcl.rgba.buf)
	    emit_vector( ctx, 
			 &(rmesa->tcl.rgba), 
			 (char *)VB->ColorPtr[0]->Ptr,
			 emitsize,
			 VB->ColorPtr[0]->StrideB,
			 count);
      }

      component[nr++] = &rmesa->tcl.rgba;
   }


   if (inputs & VERT_SPEC_RGB) {
      if (!rmesa->tcl.spec.buf) {
	 if (VB->SecondaryColorPtr[0]->Type != GL_UNSIGNED_BYTE)
	    radeon_import_float_spec_colors( ctx );

	 emit_ubyte_rgba( ctx, 
			  &rmesa->tcl.spec, 
			  (char *)VB->SecondaryColorPtr[0]->Ptr,
			  3,
			  VB->SecondaryColorPtr[0]->StrideB,
			  count);
      }

      vfmt |= RADEON_CP_VC_FRMT_PKSPEC; 
      component[nr++] = &rmesa->tcl.spec;
   }

   vtx = (rmesa->hw.tcl.cmd[TCL_OUTPUT_VTXFMT] &
	  ~(RADEON_TCL_VTX_Q0|RADEON_TCL_VTX_Q1));
      
   if (inputs & VERT_TEX0) {
      if (!rmesa->tcl.tex[0].buf)
	 emit_tex_vector( ctx, 
			  &(rmesa->tcl.tex[0]), 
			  (char *)VB->TexCoordPtr[0]->data,
			  VB->TexCoordPtr[0]->size,
			  VB->TexCoordPtr[0]->stride,
			  count );

      switch( VB->TexCoordPtr[0]->size ) {
      case 4:
	 vtx |= RADEON_TCL_VTX_Q0; 
	 vfmt |= RADEON_CP_VC_FRMT_Q0;
      default: 
	 vfmt |= RADEON_CP_VC_FRMT_ST0;
      }
      component[nr++] = &rmesa->tcl.tex[0];
   }

   if (inputs & VERT_TEX1) {
      if (!rmesa->tcl.tex[1].buf)
	 emit_tex_vector( ctx, 
			  &(rmesa->tcl.tex[1]), 
			  (char *)VB->TexCoordPtr[1]->data,
			  VB->TexCoordPtr[1]->size,
			  VB->TexCoordPtr[1]->stride,
			  count );
	 
      switch( VB->TexCoordPtr[1]->size ) {
      case 4: 
	 vtx |= RADEON_TCL_VTX_Q1;
	 vfmt |= RADEON_CP_VC_FRMT_Q1;
      default: 
	 vfmt |= RADEON_CP_VC_FRMT_ST1;
      }
      component[nr++] = &rmesa->tcl.tex[1];
   }

   if (vtx != rmesa->hw.tcl.cmd[TCL_OUTPUT_VTXFMT]) {
      RADEON_STATECHANGE( rmesa, tcl );
      rmesa->hw.tcl.cmd[TCL_OUTPUT_VTXFMT] = vtx;
   }

   rmesa->tcl.nr_aos_components = nr;
   rmesa->tcl.vertex_format = vfmt;
}


void radeonReleaseArrays( GLcontext *ctx, GLuint newinputs )
{
   radeonContextPtr rmesa = RADEON_CONTEXT( ctx );

   if (RADEON_DEBUG & DEBUG_VERTS) 
      _tnl_print_vert_flags( __FUNCTION__, newinputs );

   if (newinputs & VERT_OBJ) 
     radeonReleaseDmaRegion( rmesa, &rmesa->tcl.obj, __FUNCTION__ );

   if (newinputs & VERT_NORM) 
      radeonReleaseDmaRegion( rmesa, &rmesa->tcl.norm, __FUNCTION__ );

   if (newinputs & VERT_RGBA) 
      radeonReleaseDmaRegion( rmesa, &rmesa->tcl.rgba, __FUNCTION__ );

   if (newinputs & VERT_SPEC_RGB) 
      radeonReleaseDmaRegion( rmesa, &rmesa->tcl.spec, __FUNCTION__ );

   if (newinputs & VERT_TEX0)
      radeonReleaseDmaRegion( rmesa, &rmesa->tcl.tex[0], __FUNCTION__ );

   if (newinputs & VERT_TEX1)
      radeonReleaseDmaRegion( rmesa, &rmesa->tcl.tex[1], __FUNCTION__ );
}