Ejemplo n.º 1
0
static void transition_to_swtnl( struct gl_context *ctx )
{
    r100ContextPtr rmesa = R100_CONTEXT(ctx);
    TNLcontext *tnl = TNL_CONTEXT(ctx);
    GLuint se_cntl;

    RADEON_NEWPRIM( rmesa );
    rmesa->swtcl.vertex_format = 0;

    radeonChooseVertexState( ctx );
    radeonChooseRenderState( ctx );

    _mesa_validate_all_lighting_tables( ctx );

    tnl->Driver.NotifyMaterialChange =
        _mesa_validate_all_lighting_tables;

    radeonReleaseArrays( ctx, ~0 );

    se_cntl = rmesa->hw.set.cmd[SET_SE_CNTL];
    se_cntl |= RADEON_FLAT_SHADE_VTX_LAST;

    if (se_cntl != rmesa->hw.set.cmd[SET_SE_CNTL]) {
        RADEON_STATECHANGE( rmesa, set );
        rmesa->hw.set.cmd[SET_SE_CNTL] = se_cntl;
    }
}
Ejemplo n.º 2
0
static void transition_to_swtnl( struct gl_context *ctx )
{
   r200ContextPtr rmesa = R200_CONTEXT(ctx);
   TNLcontext *tnl = TNL_CONTEXT(ctx);

   R200_NEWPRIM( rmesa );

   r200ChooseVertexState( ctx );
   r200ChooseRenderState( ctx );

   _tnl_validate_shine_tables( ctx ); 

   tnl->Driver.NotifyMaterialChange = 
      _tnl_validate_shine_tables;

   radeonReleaseArrays( ctx, ~0 );

   /* Still using the D3D based hardware-rasterizer from the radeon;
    * need to put the card into D3D mode to make it work:
    */
   R200_STATECHANGE( rmesa, vap );
   rmesa->hw.vap.cmd[VAP_SE_VAP_CNTL] &= ~(R200_VAP_TCL_ENABLE|R200_VAP_PROG_VTX_SHADER_ENABLE);
}
Ejemplo n.º 3
0
/* TCL render.
 */
static GLboolean r200_run_tcl_render( struct gl_context *ctx,
				      struct tnl_pipeline_stage *stage )
{
   r200ContextPtr rmesa = R200_CONTEXT(ctx);
   TNLcontext *tnl = TNL_CONTEXT(ctx);
   struct vertex_buffer *VB = &tnl->vb;
   GLuint i;
   GLubyte *vimap_rev;
/* use hw fixed order for simplicity, pos 0, weight 1, normal 2, fog 3, 
   color0 - color3 4-7, texcoord0 - texcoord5 8-13, pos 1 14. Must not use
   more than 12 of those at the same time. */
   GLubyte map_rev_fixed[15] = {255, 255, 255, 255, 255, 255, 255, 255,
			    255, 255, 255, 255, 255, 255, 255};


   /* TODO: separate this from the swtnl pipeline 
    */
   if (rmesa->radeon.TclFallback)
      return GL_TRUE;	/* fallback to software t&l */

   radeon_print(RADEON_RENDER, RADEON_NORMAL, "%s\n", __func__);

   if (VB->Count == 0)
      return GL_FALSE;

   /* Validate state:
    */
   if (rmesa->radeon.NewGLState)
      if (!r200ValidateState( ctx ))
         return GL_TRUE; /* fallback to sw t&l */

   if (!_mesa_arb_vertex_program_enabled(ctx)) {
   /* NOTE: inputs != tnl->render_inputs - these are the untransformed
    * inputs.
    */
      map_rev_fixed[0] = VERT_ATTRIB_POS;
      /* technically there is no reason we always need VA_COLOR0. In theory
         could disable it depending on lighting, color materials, texturing... */
      map_rev_fixed[4] = VERT_ATTRIB_COLOR0;

      if (ctx->Light.Enabled) {
	 map_rev_fixed[2] = VERT_ATTRIB_NORMAL;
      }

      /* this also enables VA_COLOR1 when using separate specular
         lighting model, which is unnecessary.
         FIXME: OTOH, we're missing the case where a ATI_fragment_shader accesses
         the secondary color (if lighting is disabled). The chip seems
         misconfigured for that though elsewhere (tcl output, might lock up) */
      if (_mesa_need_secondary_color(ctx)) {
	 map_rev_fixed[5] = VERT_ATTRIB_COLOR1;
      }

      if ( (ctx->Fog.FogCoordinateSource == GL_FOG_COORD) && ctx->Fog.Enabled ) {
	 map_rev_fixed[3] = VERT_ATTRIB_FOG;
      }

      for (i = 0 ; i < ctx->Const.MaxTextureUnits; i++) {
	 if (ctx->Texture.Unit[i]._Current) {
	    if (rmesa->TexGenNeedNormals[i]) {
	       map_rev_fixed[2] = VERT_ATTRIB_NORMAL;
	    }
	    map_rev_fixed[8 + i] = VERT_ATTRIB_TEX0 + i;
	 }
      }
      vimap_rev = &map_rev_fixed[0];
   }
   else {
      /* vtx_tcl_output_vtxfmt_0/1 need to match configuration of "fragment
	 part", since using some vertex interpolator later which is not in
	 out_vtxfmt0/1 will lock up. It seems to be ok to write in vertex
	 prog to a not enabled output however, so just don't mess with it.
	 We only need to change compsel. */
      GLuint out_compsel = 0;
      const GLbitfield64 vp_out =
	 rmesa->curr_vp_hw->mesa_program.info.outputs_written;

      vimap_rev = &rmesa->curr_vp_hw->inputmap_rev[0];
      assert(vp_out & BITFIELD64_BIT(VARYING_SLOT_POS));
      out_compsel = R200_OUTPUT_XYZW;
      if (vp_out & BITFIELD64_BIT(VARYING_SLOT_COL0)) {
	 out_compsel |= R200_OUTPUT_COLOR_0;
      }
      if (vp_out & BITFIELD64_BIT(VARYING_SLOT_COL1)) {
	 out_compsel |= R200_OUTPUT_COLOR_1;
      }
      if (vp_out & BITFIELD64_BIT(VARYING_SLOT_FOGC)) {
         out_compsel |= R200_OUTPUT_DISCRETE_FOG;
      }
      if (vp_out & BITFIELD64_BIT(VARYING_SLOT_PSIZ)) {
	 out_compsel |= R200_OUTPUT_PT_SIZE;
      }
      for (i = VARYING_SLOT_TEX0; i < VARYING_SLOT_TEX6; i++) {
	 if (vp_out & BITFIELD64_BIT(i)) {
	    out_compsel |= R200_OUTPUT_TEX_0 << (i - VARYING_SLOT_TEX0);
	 }
      }
      if (rmesa->hw.vtx.cmd[VTX_TCL_OUTPUT_COMPSEL] != out_compsel) {
	 R200_STATECHANGE( rmesa, vtx );
	 rmesa->hw.vtx.cmd[VTX_TCL_OUTPUT_COMPSEL] = out_compsel;
      }
   }

   /* Do the actual work:
    */
   radeonReleaseArrays( ctx, ~0 /* stage->changed_inputs */ );
   GLuint emit_end = r200EnsureEmitSize( ctx, vimap_rev )
     + rmesa->radeon.cmdbuf.cs->cdw;
   r200EmitArrays( ctx, vimap_rev );

   for (i = 0 ; i < VB->PrimitiveCount ; i++)
   {
      GLuint prim = _tnl_translate_prim(&VB->Primitive[i]);
      GLuint start = VB->Primitive[i].start;
      GLuint length = VB->Primitive[i].count;

      if (!length)
	 continue;

      if (VB->Elts)
	 r200EmitEltPrimitive( ctx, start, start+length, prim );
      else
	 r200EmitPrimitive( ctx, start, start+length, prim );
   }
   if ( emit_end < rmesa->radeon.cmdbuf.cs->cdw )
     WARN_ONCE("Rendering was %d commands larger than predicted size."
	 " We might overflow  command buffer.\n", rmesa->radeon.cmdbuf.cs->cdw - emit_end);

   return GL_FALSE;		/* finished the pipe */
}
Ejemplo n.º 4
0
/* TCL render.
 */
static GLboolean radeon_run_tcl_render( struct gl_context *ctx,
                                        struct tnl_pipeline_stage *stage )
{
    r100ContextPtr rmesa = R100_CONTEXT(ctx);
    TNLcontext *tnl = TNL_CONTEXT(ctx);
    struct vertex_buffer *VB = &tnl->vb;
    GLuint inputs = VERT_BIT_POS | VERT_BIT_COLOR0;
    GLuint i;

    /* TODO: separate this from the swtnl pipeline
     */
    if (rmesa->radeon.TclFallback)
        return GL_TRUE;	/* fallback to software t&l */

    if (VB->Count == 0)
        return GL_FALSE;

    /* NOTE: inputs != tnl->render_inputs - these are the untransformed
     * inputs.
     */
    if (ctx->Light.Enabled) {
        inputs |= VERT_BIT_NORMAL;
    }

    if (ctx->_TriangleCaps & DD_SEPARATE_SPECULAR) {
        inputs |= VERT_BIT_COLOR1;
    }

    if ( (ctx->Fog.FogCoordinateSource == GL_FOG_COORD) && ctx->Fog.Enabled ) {
        inputs |= VERT_BIT_FOG;
    }

    for (i = 0 ; i < ctx->Const.MaxTextureUnits; i++) {
        if (ctx->Texture.Unit[i]._ReallyEnabled) {
            /* TODO: probably should not emit texture coords when texgen is enabled */
            if (rmesa->TexGenNeedNormals[i]) {
                inputs |= VERT_BIT_NORMAL;
            }
            inputs |= VERT_BIT_TEX(i);
        }
    }

    radeonReleaseArrays( ctx, ~0 );
    GLuint emit_end = radeonEnsureEmitSize( ctx, inputs )
                      + rmesa->radeon.cmdbuf.cs->cdw;
    radeonEmitArrays( ctx, inputs );

    rmesa->tcl.Elts = VB->Elts;

    for (i = 0 ; i < VB->PrimitiveCount ; i++)
    {
        GLuint prim = _tnl_translate_prim(&VB->Primitive[i]);
        GLuint start = VB->Primitive[i].start;
        GLuint length = VB->Primitive[i].count;

        if (!length)
            continue;

        if (rmesa->tcl.Elts)
            radeonEmitEltPrimitive( ctx, start, start+length, prim );
        else
            radeonEmitPrimitive( ctx, start, start+length, prim );
    }

    if (emit_end < rmesa->radeon.cmdbuf.cs->cdw)
        WARN_ONCE("Rendering was %d commands larger than predicted size."
                  " We might overflow  command buffer.\n", rmesa->radeon.cmdbuf.cs->cdw - emit_end);

    return GL_FALSE;		/* finished the pipe */
}
Ejemplo n.º 5
0
/* TCL render.
 */
static GLboolean radeon_run_tcl_render( GLcontext *ctx,
					struct tnl_pipeline_stage *stage )
{
   radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
   TNLcontext *tnl = TNL_CONTEXT(ctx);
   struct vertex_buffer *VB = &tnl->vb;
   GLuint inputs = VERT_BIT_POS | VERT_BIT_COLOR0;
   GLuint i;

   /* TODO: separate this from the swtnl pipeline 
    */
   if (rmesa->TclFallback)
      return GL_TRUE;	/* fallback to software t&l */

   if (VB->Count == 0)
      return GL_FALSE;

   /* NOTE: inputs != tnl->render_inputs - these are the untransformed
    * inputs.
    */
   if (ctx->Light.Enabled) {
      inputs |= VERT_BIT_NORMAL;
   }

   if (ctx->_TriangleCaps & DD_SEPARATE_SPECULAR) {
      inputs |= VERT_BIT_COLOR1;
   }

   if ( (ctx->Fog.FogCoordinateSource == GL_FOG_COORD) && ctx->Fog.Enabled ) {
      inputs |= VERT_BIT_FOG;
   }

   for (i = 0 ; i < ctx->Const.MaxTextureUnits; i++) {
      if (ctx->Texture.Unit[i]._ReallyEnabled) {
      /* TODO: probably should not emit texture coords when texgen is enabled */
	 if (rmesa->TexGenNeedNormals[i]) {
	    inputs |= VERT_BIT_NORMAL;
	 }
	 inputs |= VERT_BIT_TEX(i);
      }
   }

   radeonReleaseArrays( ctx, ~0 );
   radeonEmitArrays( ctx, inputs );

   rmesa->tcl.Elts = VB->Elts;

   for (i = 0 ; i < VB->PrimitiveCount ; i++)
   {
      GLuint prim = VB->Primitive[i].mode;
      GLuint start = VB->Primitive[i].start;
      GLuint length = VB->Primitive[i].count;

      if (!length)
	 continue;

      if (rmesa->tcl.Elts)
	 radeonEmitEltPrimitive( ctx, start, start+length, prim );
      else
	 radeonEmitPrimitive( ctx, start, start+length, prim );
   }

   return GL_FALSE;		/* finished the pipe */
}
Ejemplo n.º 6
0
/* Destroy the Mesa and driver specific context data.
 */
void radeonDestroyContext( __DRIcontextPrivate *driContextPriv )
{
   GET_CURRENT_CONTEXT(ctx);
   radeonContextPtr rmesa = (radeonContextPtr) driContextPriv->driverPrivate;
   radeonContextPtr current = ctx ? RADEON_CONTEXT(ctx) : NULL;

   /* check if we're deleting the currently bound context */
   if (rmesa == current) {
      RADEON_FIREVERTICES( rmesa );
      _mesa_make_current(NULL, NULL, NULL);
   }

   /* Free radeon context resources */
   assert(rmesa); /* should never be null */
   if ( rmesa ) {
      GLboolean   release_texture_heaps;


      release_texture_heaps = (rmesa->glCtx->Shared->RefCount == 1);
      _swsetup_DestroyContext( rmesa->glCtx );
      _tnl_DestroyContext( rmesa->glCtx );
      _vbo_DestroyContext( rmesa->glCtx );
      _swrast_DestroyContext( rmesa->glCtx );

      radeonDestroySwtcl( rmesa->glCtx );
      radeonReleaseArrays( rmesa->glCtx, ~0 );
      if (rmesa->dma.current.buf) {
	 radeonReleaseDmaRegion( rmesa, &rmesa->dma.current, __FUNCTION__ );
	 radeonFlushCmdBuf( rmesa, __FUNCTION__ );
      }

      _mesa_vector4f_free( &rmesa->tcl.ObjClean );

      if (rmesa->state.scissor.pClipRects) {
	 FREE(rmesa->state.scissor.pClipRects);
	 rmesa->state.scissor.pClipRects = NULL;
      }

      if ( release_texture_heaps ) {
         /* This share group is about to go away, free our private
          * texture object data.
          */
         int i;

         for ( i = 0 ; i < rmesa->nr_heaps ; i++ ) {
	    driDestroyTextureHeap( rmesa->texture_heaps[ i ] );
	    rmesa->texture_heaps[ i ] = NULL;
         }

	 assert( is_empty_list( & rmesa->swapped ) );
      }

      /* free the Mesa context */
      rmesa->glCtx->DriverCtx = NULL;
      _mesa_destroy_context( rmesa->glCtx );

      /* free the option cache */
      driDestroyOptionCache (&rmesa->optionCache);

      FREE( rmesa );
   }
}