Пример #1
0
/**
 * Called the first time stage->run is called.  In effect, don't
 * allocate data until the first time the stage is run.
 */
static GLboolean init_vp( GLcontext *ctx,
			  struct tnl_pipeline_stage *stage )
{
   TNLcontext *tnl = TNL_CONTEXT(ctx);
   struct vertex_buffer *VB = &(tnl->vb);
   struct vp_stage_data *store;
   const GLuint size = VB->Size;
   GLuint i;

   stage->privatePtr = MALLOC(sizeof(*store));
   store = VP_STAGE_DATA(stage);
   if (!store)
      return GL_FALSE;

   /* Allocate arrays of vertex output values */
   for (i = 0; i < 15; i++) {
      _mesa_vector4f_alloc( &store->attribs[i], 0, size, 32 );
      store->attribs[i].size = 4;
   }

   /* a few other misc allocations */
   _mesa_vector4f_alloc( &store->ndcCoords, 0, size, 32 );
   store->clipmask = (GLubyte *) ALIGN_MALLOC(sizeof(GLubyte)*size, 32 );

   return GL_TRUE;
}
Пример #2
0
static GLboolean init_vertex_stage( struct gl_context *ctx,
				    struct tnl_pipeline_stage *stage )
{
   struct vertex_buffer *VB = &TNL_CONTEXT(ctx)->vb;
   struct vertex_stage_data *store;
   GLuint size = VB->Size;

   stage->privatePtr = calloc(1, sizeof(*store));
   store = VERTEX_STAGE_DATA(stage);
   if (!store)
      return GL_FALSE;

   _mesa_vector4f_alloc( &store->eye, 0, size, 32 );
   _mesa_vector4f_alloc( &store->clip, 0, size, 32 );
   _mesa_vector4f_alloc( &store->proj, 0, size, 32 );

   store->clipmask = _mesa_align_malloc(sizeof(GLubyte)*size, 32 );

   if (!store->clipmask ||
       !store->eye.data ||
       !store->clip.data ||
       !store->proj.data)
      return GL_FALSE;

   return GL_TRUE;
}
static GLboolean
alloc_point_data(struct gl_context *ctx, struct tnl_pipeline_stage *stage)
{
   struct vertex_buffer *VB = &TNL_CONTEXT(ctx)->vb;
   struct point_stage_data *store;
   stage->privatePtr = malloc(sizeof(*store));
   store = POINT_STAGE_DATA(stage);
   if (!store)
      return GL_FALSE;

   _mesa_vector4f_alloc( &store->PointSize, 0, VB->Size, 32 );
   return GL_TRUE;
}
Пример #4
0
/**
 * Allocate stage's private data (storage for transformed normals).
 */
static GLboolean
alloc_normal_data(GLcontext *ctx, struct tnl_pipeline_stage *stage)
{
   TNLcontext *tnl = TNL_CONTEXT(ctx);
   struct normal_stage_data *store;

   stage->privatePtr = _mesa_malloc(sizeof(*store));
   store = NORMAL_STAGE_DATA(stage);
   if (!store)
      return GL_FALSE;

   _mesa_vector4f_alloc( &store->normal, 0, tnl->vb.Size, 32 );
   return GL_TRUE;
}
Пример #5
0
/* Called the first time stage->run() is invoked.
 */
static GLboolean alloc_texnorm_data( GLcontext *ctx,
				     struct tnl_pipeline_stage *stage )
{
   struct vertex_buffer *VB = &TNL_CONTEXT(ctx)->vb;
   struct texnorm_stage_data *store;
   GLuint i;

   stage->privatePtr = CALLOC(sizeof(*store));
   store = TEXNORM_STAGE_DATA(stage);
   if (!store)
      return GL_FALSE;

   for (i = 0 ; i < ctx->Const.MaxTextureUnits ; i++)
      _mesa_vector4f_alloc( &store->texcoord[i], 0, VB->Size, 32 );
   
   return GL_TRUE;
}
Пример #6
0
/* Called the first time stage->run() is invoked.
 */
static GLboolean
alloc_fog_data(struct gl_context *ctx, struct tnl_pipeline_stage *stage)
{
   TNLcontext *tnl = TNL_CONTEXT(ctx);
   struct fog_stage_data *store;
   stage->privatePtr = malloc(sizeof(*store));
   store = FOG_STAGE_DATA(stage);
   if (!store)
      return GL_FALSE;

   _mesa_vector4f_alloc( &store->fogcoord, 0, tnl->vb.Size, 32 );

   if (!inited)
      init_static_data();

   return GL_TRUE;
}
Пример #7
0
static GLboolean alloc_point_data( GLcontext *ctx,
				   struct gl_pipeline_stage *stage )
{
   struct vertex_buffer *VB = &TNL_CONTEXT(ctx)->vb;
   struct point_stage_data *store;
   stage->privatePtr = MALLOC(sizeof(*store));
   store = POINT_STAGE_DATA(stage);
   if (!store)
      return GL_FALSE;

   _mesa_vector4f_alloc( &store->PointSize, 0, VB->Size, 32 );

   /* Now run the stage.
    */
   stage->run = run_point_stage;
   return stage->run( ctx, stage );
}
Пример #8
0
/* Called the first time stage->run() is invoked.
 */
static GLboolean alloc_texgen_data( struct gl_context *ctx,
				    struct tnl_pipeline_stage *stage )
{
   struct vertex_buffer *VB = &TNL_CONTEXT(ctx)->vb;
   struct texgen_stage_data *store;

   stage->privatePtr = CALLOC(sizeof(*store));
   store = TEXGEN_STAGE_DATA(stage);
   if (!store)
      return GL_FALSE;

   _mesa_vector4f_alloc( &store->texcoord, 0, VB->Size, 32 );

   store->tmp_f = (GLfloat (*)[3]) MALLOC(VB->Size * sizeof(GLfloat) * 3);
   store->tmp_m = (GLfloat *) MALLOC(VB->Size * sizeof(GLfloat));

   return GL_TRUE;
}
Пример #9
0
/**
 * Called the first time stage->run is called.  In effect, don't
 * allocate data until the first time the stage is run.
 */
static GLboolean
init_vp(struct gl_context *ctx, struct tnl_pipeline_stage *stage)
{
   TNLcontext *tnl = TNL_CONTEXT(ctx);
   struct vertex_buffer *VB = &(tnl->vb);
   struct vp_stage_data *store;
   const GLuint size = VB->Size;

   stage->privatePtr = calloc(1, sizeof(*store));
   store = VP_STAGE_DATA(stage);
   if (!store)
      return GL_FALSE;

   /* a few other misc allocations */
   _mesa_vector4f_alloc( &store->ndcCoords, 0, size, 32 );
   store->clipmask = _mesa_align_malloc(sizeof(GLubyte)*size, 32 );

   return GL_TRUE;
}
Пример #10
0
/* Called the first time stage->run() is invoked.
 */
static GLboolean alloc_texgen_data( struct gl_context *ctx,
				    struct tnl_pipeline_stage *stage )
{
   struct vertex_buffer *VB = &TNL_CONTEXT(ctx)->vb;
   struct texgen_stage_data *store;
   GLuint i;

   stage->privatePtr = calloc(1, sizeof(*store));
   store = TEXGEN_STAGE_DATA(stage);
   if (!store)
      return GL_FALSE;

   for (i = 0 ; i < ctx->Const.MaxTextureCoordUnits ; i++)
      _mesa_vector4f_alloc( &store->texcoord[i], 0, VB->Size, 32 );

   store->tmp_f = malloc(VB->Size * sizeof(GLfloat) * 3);
   store->tmp_m = malloc(VB->Size * sizeof(GLfloat));

   return GL_TRUE;
}
Пример #11
0
/* Called the first time stage->run is called.  In effect, don't
 * allocate data until the first time the stage is run.
 */
static GLboolean init_lighting( GLcontext *ctx,
				struct tnl_pipeline_stage *stage )
{
   TNLcontext *tnl = TNL_CONTEXT(ctx);
   struct light_stage_data *store;
   GLuint size = tnl->vb.Size;

   stage->privatePtr = MALLOC(sizeof(*store));
   store = LIGHT_STAGE_DATA(stage);
   if (!store)
      return GL_FALSE;

   /* Do onetime init.
    */
   init_lighting_tables();

   _mesa_vector4f_alloc( &store->Input, 0, size, 32 );
   _mesa_vector4f_alloc( &store->LitColor[0], 0, size, 32 );
   _mesa_vector4f_alloc( &store->LitColor[1], 0, size, 32 );
   _mesa_vector4f_alloc( &store->LitSecondary[0], 0, size, 32 );
   _mesa_vector4f_alloc( &store->LitSecondary[1], 0, size, 32 );
   _mesa_vector4f_alloc( &store->LitIndex[0], 0, size, 32 );
   _mesa_vector4f_alloc( &store->LitIndex[1], 0, size, 32 );

   store->LitColor[0].size = 4;
   store->LitColor[1].size = 4;
   store->LitSecondary[0].size = 3;
   store->LitSecondary[1].size = 3;

   store->LitIndex[0].size = 1;
   store->LitIndex[0].stride = sizeof(GLfloat);
   store->LitIndex[1].size = 1;
   store->LitIndex[1].stride = sizeof(GLfloat);

   return GL_TRUE;
}
Пример #12
0
/* Create the device specific context.
 */
GLboolean
r100CreateContext( gl_api api,
		   const struct gl_config *glVisual,
		   __DRIcontext *driContextPriv,
		   unsigned major_version,
		   unsigned minor_version,
		   uint32_t flags,
		   unsigned *error,
		   void *sharedContextPrivate)
{
   __DRIscreen *sPriv = driContextPriv->driScreenPriv;
   radeonScreenPtr screen = (radeonScreenPtr)(sPriv->driverPrivate);
   struct dd_function_table functions;
   r100ContextPtr rmesa;
   struct gl_context *ctx;
   int i;
   int tcl_mode, fthrottle_mode;

   switch (api) {
   case API_OPENGL_COMPAT:
      if (major_version > 1 || minor_version > 3) {
         *error = __DRI_CTX_ERROR_BAD_VERSION;
         return GL_FALSE;
      }
      break;
   case API_OPENGLES:
      break;
   default:
      *error = __DRI_CTX_ERROR_BAD_API;
      return GL_FALSE;
   }

   /* Flag filtering is handled in dri2CreateContextAttribs.
    */
   (void) flags;

   assert(glVisual);
   assert(driContextPriv);
   assert(screen);

   /* Allocate the Radeon context */
   rmesa = calloc(1, sizeof(*rmesa));
   if ( !rmesa ) {
      *error = __DRI_CTX_ERROR_NO_MEMORY;
      return GL_FALSE;
   }

   rmesa->radeon.radeonScreen = screen;
   r100_init_vtbl(&rmesa->radeon);

   /* init exp fog table data */
   radeonInitStaticFogData();
   
   /* Parse configuration files.
    * Do this here so that initialMaxAnisotropy is set before we create
    * the default textures.
    */
   driParseConfigFiles (&rmesa->radeon.optionCache, &screen->optionCache,
			screen->driScreen->myNum, "radeon");
   rmesa->radeon.initialMaxAnisotropy = driQueryOptionf(&rmesa->radeon.optionCache,
                                                 "def_max_anisotropy");

   if ( driQueryOptionb( &rmesa->radeon.optionCache, "hyperz" ) ) {
      if ( sPriv->drm_version.minor < 13 )
	 fprintf( stderr, "DRM version 1.%d too old to support HyperZ, "
			  "disabling.\n", sPriv->drm_version.minor );
      else
	 rmesa->using_hyperz = GL_TRUE;
   }

   if ( sPriv->drm_version.minor >= 15 )
      rmesa->texmicrotile = GL_TRUE;

   /* Init default driver functions then plug in our Radeon-specific functions
    * (the texture functions are especially important)
    */
   _mesa_init_driver_functions( &functions );
   radeonInitTextureFuncs( &rmesa->radeon, &functions );
   radeonInitQueryObjFunctions(&functions);

   if (!radeonInitContext(&rmesa->radeon, &functions,
			  glVisual, driContextPriv,
			  sharedContextPrivate)) {
     free(rmesa);
     *error = __DRI_CTX_ERROR_NO_MEMORY;
     return GL_FALSE;
   }

   rmesa->radeon.swtcl.RenderIndex = ~0;
   rmesa->radeon.hw.all_dirty = GL_TRUE;

   ctx = &rmesa->radeon.glCtx;
   /* Initialize the software rasterizer and helper modules.
    */
   _swrast_CreateContext( ctx );
   _vbo_CreateContext( ctx );
   _tnl_CreateContext( ctx );
   _swsetup_CreateContext( ctx );
   _ae_create_context( ctx );

   /* Set the maximum texture size small enough that we can guarentee that
    * all texture units can bind a maximal texture and have all of them in
    * texturable memory at once. Depending on the allow_large_textures driconf
    * setting allow larger textures.
    */

   ctx->Const.MaxTextureUnits = driQueryOptioni (&rmesa->radeon.optionCache,
						 "texture_units");
   ctx->Const.FragmentProgram.MaxTextureImageUnits = ctx->Const.MaxTextureUnits;
   ctx->Const.MaxTextureCoordUnits = ctx->Const.MaxTextureUnits;
   ctx->Const.MaxCombinedTextureImageUnits = ctx->Const.MaxTextureUnits;

   ctx->Const.StripTextureBorder = GL_TRUE;

   i = driQueryOptioni( &rmesa->radeon.optionCache, "allow_large_textures");

   /* FIXME: When no memory manager is available we should set this 
    * to some reasonable value based on texture memory pool size */
   ctx->Const.MaxTextureLevels = 12;
   ctx->Const.Max3DTextureLevels = 9;
   ctx->Const.MaxCubeTextureLevels = 12;
   ctx->Const.MaxTextureRectSize = 2048;

   ctx->Const.MaxTextureMaxAnisotropy = 16.0;

   /* No wide points.
    */
   ctx->Const.MinPointSize = 1.0;
   ctx->Const.MinPointSizeAA = 1.0;
   ctx->Const.MaxPointSize = 1.0;
   ctx->Const.MaxPointSizeAA = 1.0;

   ctx->Const.MinLineWidth = 1.0;
   ctx->Const.MinLineWidthAA = 1.0;
   ctx->Const.MaxLineWidth = 10.0;
   ctx->Const.MaxLineWidthAA = 10.0;
   ctx->Const.LineWidthGranularity = 0.0625;

   /* Set maxlocksize (and hence vb size) small enough to avoid
    * fallbacks in radeon_tcl.c.  ie. guarentee that all vertices can
    * fit in a single dma buffer for indexed rendering of quad strips,
    * etc.
    */
   ctx->Const.MaxArrayLockSize = 
      MIN2( ctx->Const.MaxArrayLockSize, 
 	    RADEON_BUFFER_SIZE / RADEON_MAX_TCL_VERTSIZE ); 

   rmesa->boxes = 0;

   ctx->Const.MaxDrawBuffers = 1;
   ctx->Const.MaxColorAttachments = 1;
   ctx->Const.MaxRenderbufferSize = 2048;

   ctx->ShaderCompilerOptions[MESA_SHADER_VERTEX].PreferDP4 = true;

   /* Install the customized pipeline:
    */
   _tnl_destroy_pipeline( ctx );
   _tnl_install_pipeline( ctx, radeon_pipeline );

   /* Try and keep materials and vertices separate:
    */
/*    _tnl_isolate_materials( ctx, GL_TRUE ); */

   /* Configure swrast and T&L to match hardware characteristics:
    */
   _swrast_allow_pixel_fog( ctx, GL_FALSE );
   _swrast_allow_vertex_fog( ctx, GL_TRUE );
   _tnl_allow_pixel_fog( ctx, GL_FALSE );
   _tnl_allow_vertex_fog( ctx, GL_TRUE );


   for ( i = 0 ; i < RADEON_MAX_TEXTURE_UNITS ; i++ ) {
      _math_matrix_ctr( &rmesa->TexGenMatrix[i] );
      _math_matrix_ctr( &rmesa->tmpmat[i] );
      _math_matrix_set_identity( &rmesa->TexGenMatrix[i] );
      _math_matrix_set_identity( &rmesa->tmpmat[i] );
   }

   ctx->Extensions.ARB_texture_border_clamp = true;
   ctx->Extensions.ARB_texture_env_combine = true;
   ctx->Extensions.ARB_texture_env_crossbar = true;
   ctx->Extensions.ARB_texture_env_dot3 = true;
   ctx->Extensions.EXT_fog_coord = true;
   ctx->Extensions.EXT_packed_depth_stencil = true;
   ctx->Extensions.EXT_secondary_color = true;
   ctx->Extensions.EXT_texture_env_dot3 = true;
   ctx->Extensions.EXT_texture_filter_anisotropic = true;
   ctx->Extensions.EXT_texture_mirror_clamp = true;
   ctx->Extensions.ATI_texture_env_combine3 = true;
   ctx->Extensions.ATI_texture_mirror_once = true;
   ctx->Extensions.MESA_ycbcr_texture = true;
   ctx->Extensions.NV_blend_square = true;
   ctx->Extensions.OES_EGL_image = true;
   ctx->Extensions.EXT_framebuffer_object = true;
   ctx->Extensions.ARB_texture_cube_map = true;

   if (rmesa->radeon.glCtx.Mesa_DXTn) {
      ctx->Extensions.EXT_texture_compression_s3tc = true;
      ctx->Extensions.ANGLE_texture_compression_dxt = true;
   }
   else if (driQueryOptionb (&rmesa->radeon.optionCache, "force_s3tc_enable")) {
      ctx->Extensions.EXT_texture_compression_s3tc = true;
      ctx->Extensions.ANGLE_texture_compression_dxt = true;
   }

   ctx->Extensions.NV_texture_rectangle = true;
   ctx->Extensions.ARB_occlusion_query = true;

   /* XXX these should really go right after _mesa_init_driver_functions() */
   radeon_fbo_init(&rmesa->radeon);
   radeonInitSpanFuncs( ctx );
   radeonInitIoctlFuncs( ctx );
   radeonInitStateFuncs( ctx );
   radeonInitState( rmesa );
   radeonInitSwtcl( ctx );

   _mesa_vector4f_alloc( &rmesa->tcl.ObjClean, 0, 
			 ctx->Const.MaxArrayLockSize, 32 );

   fthrottle_mode = driQueryOptioni(&rmesa->radeon.optionCache, "fthrottle_mode");
   rmesa->radeon.iw.irq_seq = -1;
   rmesa->radeon.irqsEmitted = 0;
   rmesa->radeon.do_irqs = (rmesa->radeon.radeonScreen->irq != 0 &&
			    fthrottle_mode == DRI_CONF_FTHROTTLE_IRQS);

   rmesa->radeon.do_usleeps = (fthrottle_mode == DRI_CONF_FTHROTTLE_USLEEPS);


#if DO_DEBUG
   RADEON_DEBUG = driParseDebugString( getenv( "RADEON_DEBUG" ),
				       debug_control );
#endif

   tcl_mode = driQueryOptioni(&rmesa->radeon.optionCache, "tcl_mode");
   if (driQueryOptionb(&rmesa->radeon.optionCache, "no_rast")) {
      fprintf(stderr, "disabling 3D acceleration\n");
      FALLBACK(rmesa, RADEON_FALLBACK_DISABLE, 1);
   } else if (tcl_mode == DRI_CONF_TCL_SW ||
	      !(rmesa->radeon.radeonScreen->chip_flags & RADEON_CHIPSET_TCL)) {
      if (rmesa->radeon.radeonScreen->chip_flags & RADEON_CHIPSET_TCL) {
	 rmesa->radeon.radeonScreen->chip_flags &= ~RADEON_CHIPSET_TCL;
	 fprintf(stderr, "Disabling HW TCL support\n");
      }
      TCL_FALLBACK(&rmesa->radeon.glCtx, RADEON_TCL_FALLBACK_TCL_DISABLE, 1);
   }

   if (rmesa->radeon.radeonScreen->chip_flags & RADEON_CHIPSET_TCL) {
/*       _tnl_need_dlist_norm_lengths( ctx, GL_FALSE ); */
   }

   _mesa_compute_version(ctx);

   /* Exec table initialization requires the version to be computed */
   _mesa_initialize_dispatch_tables(ctx);
   _mesa_initialize_vbo_vtxfmt(ctx);

   *error = __DRI_CTX_ERROR_SUCCESS;
   return GL_TRUE;
}
Пример #13
0
/* Create the device specific context.
 */
GLboolean
r100CreateContext( gl_api api,
		   const struct gl_config *glVisual,
		   __DRIcontext *driContextPriv,
		   const struct __DriverContextConfig *ctx_config,
		   unsigned *error,
		   void *sharedContextPrivate)
{
   __DRIscreen *sPriv = driContextPriv->driScreenPriv;
   radeonScreenPtr screen = (radeonScreenPtr)(sPriv->driverPrivate);
   struct dd_function_table functions;
   r100ContextPtr rmesa;
   struct gl_context *ctx;
   int i;
   int tcl_mode, fthrottle_mode;

   if (ctx_config->flags & ~(__DRI_CTX_FLAG_DEBUG | __DRI_CTX_FLAG_NO_ERROR)) {
      *error = __DRI_CTX_ERROR_UNKNOWN_FLAG;
      return false;
   }

   if (ctx_config->attribute_mask) {
      *error = __DRI_CTX_ERROR_UNKNOWN_ATTRIBUTE;
      return false;
   }

   assert(driContextPriv);
   assert(screen);

   /* Allocate the Radeon context */
   rmesa = calloc(1, sizeof(*rmesa));
   if ( !rmesa ) {
      *error = __DRI_CTX_ERROR_NO_MEMORY;
      return GL_FALSE;
   }

   rmesa->radeon.radeonScreen = screen;
   r100_init_vtbl(&rmesa->radeon);

   /* init exp fog table data */
   radeonInitStaticFogData();
   
   /* Parse configuration files.
    * Do this here so that initialMaxAnisotropy is set before we create
    * the default textures.
    */
   driParseConfigFiles (&rmesa->radeon.optionCache, &screen->optionCache,
			screen->driScreen->myNum, "radeon", NULL);
   rmesa->radeon.initialMaxAnisotropy = driQueryOptionf(&rmesa->radeon.optionCache,
                                                 "def_max_anisotropy");

   if (driQueryOptionb(&rmesa->radeon.optionCache, "hyperz"))
      rmesa->using_hyperz = GL_TRUE;

   /* Init default driver functions then plug in our Radeon-specific functions
    * (the texture functions are especially important)
    */
   _mesa_init_driver_functions( &functions );
   _tnl_init_driver_draw_function( &functions );
   radeonInitTextureFuncs( &rmesa->radeon, &functions );
   radeonInitQueryObjFunctions(&functions);

   if (!radeonInitContext(&rmesa->radeon, api, &functions,
			  glVisual, driContextPriv,
			  sharedContextPrivate)) {
     free(rmesa);
     *error = __DRI_CTX_ERROR_NO_MEMORY;
     return GL_FALSE;
   }

   rmesa->radeon.swtcl.RenderIndex = ~0;
   rmesa->radeon.hw.all_dirty = GL_TRUE;

   ctx = &rmesa->radeon.glCtx;

   driContextSetFlags(ctx, ctx_config->flags);

   /* Initialize the software rasterizer and helper modules.
    */
   _swrast_CreateContext( ctx );
   _vbo_CreateContext( ctx );
   _tnl_CreateContext( ctx );
   _swsetup_CreateContext( ctx );

   ctx->Const.MaxTextureUnits = driQueryOptioni (&rmesa->radeon.optionCache,
						 "texture_units");
   ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits = ctx->Const.MaxTextureUnits;
   ctx->Const.MaxTextureCoordUnits = ctx->Const.MaxTextureUnits;
   ctx->Const.MaxCombinedTextureImageUnits = ctx->Const.MaxTextureUnits;

   ctx->Const.StripTextureBorder = GL_TRUE;

   /* FIXME: When no memory manager is available we should set this 
    * to some reasonable value based on texture memory pool size */
   ctx->Const.MaxTextureLevels = 12;
   ctx->Const.Max3DTextureLevels = 9;
   ctx->Const.MaxCubeTextureLevels = 12;
   ctx->Const.MaxTextureRectSize = 2048;

   ctx->Const.MaxTextureMaxAnisotropy = 16.0;

   /* No wide points.
    */
   ctx->Const.MinPointSize = 1.0;
   ctx->Const.MinPointSizeAA = 1.0;
   ctx->Const.MaxPointSize = 1.0;
   ctx->Const.MaxPointSizeAA = 1.0;

   ctx->Const.MinLineWidth = 1.0;
   ctx->Const.MinLineWidthAA = 1.0;
   ctx->Const.MaxLineWidth = 10.0;
   ctx->Const.MaxLineWidthAA = 10.0;
   ctx->Const.LineWidthGranularity = 0.0625;

   /* Set maxlocksize (and hence vb size) small enough to avoid
    * fallbacks in radeon_tcl.c.  ie. guarentee that all vertices can
    * fit in a single dma buffer for indexed rendering of quad strips,
    * etc.
    */
   ctx->Const.MaxArrayLockSize = 
      MIN2( ctx->Const.MaxArrayLockSize, 
 	    RADEON_BUFFER_SIZE / RADEON_MAX_TCL_VERTSIZE ); 

   rmesa->boxes = 0;

   ctx->Const.MaxDrawBuffers = 1;
   ctx->Const.MaxColorAttachments = 1;
   ctx->Const.MaxRenderbufferSize = 2048;

   ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].OptimizeForAOS = true;

   /* Install the customized pipeline:
    */
   _tnl_destroy_pipeline( ctx );
   _tnl_install_pipeline( ctx, radeon_pipeline );

   /* Try and keep materials and vertices separate:
    */
/*    _tnl_isolate_materials( ctx, GL_TRUE ); */

   /* Configure swrast and T&L to match hardware characteristics:
    */
   _swrast_allow_pixel_fog( ctx, GL_FALSE );
   _swrast_allow_vertex_fog( ctx, GL_TRUE );
   _tnl_allow_pixel_fog( ctx, GL_FALSE );
   _tnl_allow_vertex_fog( ctx, GL_TRUE );


   for ( i = 0 ; i < RADEON_MAX_TEXTURE_UNITS ; i++ ) {
      _math_matrix_ctr( &rmesa->TexGenMatrix[i] );
      _math_matrix_ctr( &rmesa->tmpmat[i] );
      _math_matrix_set_identity( &rmesa->TexGenMatrix[i] );
      _math_matrix_set_identity( &rmesa->tmpmat[i] );
   }

   ctx->Extensions.ARB_occlusion_query = true;
   ctx->Extensions.ARB_texture_border_clamp = true;
   ctx->Extensions.ARB_texture_cube_map = true;
   ctx->Extensions.ARB_texture_env_combine = true;
   ctx->Extensions.ARB_texture_env_crossbar = true;
   ctx->Extensions.ARB_texture_env_dot3 = true;
   ctx->Extensions.ARB_texture_filter_anisotropic = true;
   ctx->Extensions.ARB_texture_mirror_clamp_to_edge = true;
   ctx->Extensions.ATI_texture_env_combine3 = true;
   ctx->Extensions.ATI_texture_mirror_once = true;
   ctx->Extensions.EXT_texture_env_dot3 = true;
   ctx->Extensions.EXT_texture_filter_anisotropic = true;
   ctx->Extensions.EXT_texture_mirror_clamp = true;
   ctx->Extensions.MESA_ycbcr_texture = true;
   ctx->Extensions.NV_texture_rectangle = true;
   ctx->Extensions.OES_EGL_image = true;

   ctx->Extensions.EXT_texture_compression_s3tc = true;
   ctx->Extensions.ANGLE_texture_compression_dxt = true;

   /* XXX these should really go right after _mesa_init_driver_functions() */
   radeon_fbo_init(&rmesa->radeon);
   radeonInitSpanFuncs( ctx );
   radeonInitIoctlFuncs( ctx );
   radeonInitStateFuncs( ctx );
   radeonInitState( rmesa );
   radeonInitSwtcl( ctx );

   _mesa_vector4f_alloc( &rmesa->tcl.ObjClean, 0, 
			 ctx->Const.MaxArrayLockSize, 32 );

   fthrottle_mode = driQueryOptioni(&rmesa->radeon.optionCache, "fthrottle_mode");
   rmesa->radeon.iw.irq_seq = -1;
   rmesa->radeon.irqsEmitted = 0;
   rmesa->radeon.do_irqs = (rmesa->radeon.radeonScreen->irq != 0 &&
			    fthrottle_mode == DRI_CONF_FTHROTTLE_IRQS);

   rmesa->radeon.do_usleeps = (fthrottle_mode == DRI_CONF_FTHROTTLE_USLEEPS);

   tcl_mode = driQueryOptioni(&rmesa->radeon.optionCache, "tcl_mode");
   if (getenv("RADEON_NO_RAST")) {
      fprintf(stderr, "disabling 3D acceleration\n");
      FALLBACK(rmesa, RADEON_FALLBACK_DISABLE, 1);
   } else if (tcl_mode == DRI_CONF_TCL_SW ||
	      !(rmesa->radeon.radeonScreen->chip_flags & RADEON_CHIPSET_TCL)) {
      if (rmesa->radeon.radeonScreen->chip_flags & RADEON_CHIPSET_TCL) {
	 rmesa->radeon.radeonScreen->chip_flags &= ~RADEON_CHIPSET_TCL;
	 fprintf(stderr, "Disabling HW TCL support\n");
      }
      TCL_FALLBACK(&rmesa->radeon.glCtx, RADEON_TCL_FALLBACK_TCL_DISABLE, 1);
   }

   if (rmesa->radeon.radeonScreen->chip_flags & RADEON_CHIPSET_TCL) {
/*       _tnl_need_dlist_norm_lengths( ctx, GL_FALSE ); */
   }

   _mesa_override_extensions(ctx);
   _mesa_compute_version(ctx);

   /* Exec table initialization requires the version to be computed */
   _mesa_initialize_dispatch_tables(ctx);
   _mesa_initialize_vbo_vtxfmt(ctx);

   *error = __DRI_CTX_ERROR_SUCCESS;
   return GL_TRUE;
}
Пример #14
0
/* Create the device specific context.
 */
GLboolean
radeonCreateContext( const __GLcontextModes *glVisual,
                     __DRIcontextPrivate *driContextPriv,
                     void *sharedContextPrivate)
{
   __DRIscreenPrivate *sPriv = driContextPriv->driScreenPriv;
   radeonScreenPtr screen = (radeonScreenPtr)(sPriv->private);
   struct dd_function_table functions;
   radeonContextPtr rmesa;
   GLcontext *ctx, *shareCtx;
   int i;
   int tcl_mode, fthrottle_mode;

   assert(glVisual);
   assert(driContextPriv);
   assert(screen);

   /* Allocate the Radeon context */
   rmesa = (radeonContextPtr) CALLOC( sizeof(*rmesa) );
   if ( !rmesa )
      return GL_FALSE;

   /* init exp fog table data */
   radeonInitStaticFogData();
   
   /* Parse configuration files.
    * Do this here so that initialMaxAnisotropy is set before we create
    * the default textures.
    */
   driParseConfigFiles (&rmesa->optionCache, &screen->optionCache,
			screen->driScreen->myNum, "radeon");
   rmesa->initialMaxAnisotropy = driQueryOptionf(&rmesa->optionCache,
                                                 "def_max_anisotropy");

   if ( driQueryOptionb( &rmesa->optionCache, "hyperz" ) ) {
      if ( sPriv->drm_version.minor < 13 )
	 fprintf( stderr, "DRM version 1.%d too old to support HyperZ, "
			  "disabling.\n", sPriv->drm_version.minor );
      else
	 rmesa->using_hyperz = GL_TRUE;
   }

   if ( sPriv->drm_version.minor >= 15 )
      rmesa->texmicrotile = GL_TRUE;

   /* Init default driver functions then plug in our Radeon-specific functions
    * (the texture functions are especially important)
    */
   _mesa_init_driver_functions( &functions );
   radeonInitDriverFuncs( &functions );
   radeonInitTextureFuncs( &functions );

   /* Allocate the Mesa context */
   if (sharedContextPrivate)
      shareCtx = ((radeonContextPtr) sharedContextPrivate)->glCtx;
   else
      shareCtx = NULL;
   rmesa->glCtx = _mesa_create_context(glVisual, shareCtx,
                                       &functions, (void *) rmesa);
   if (!rmesa->glCtx) {
      FREE(rmesa);
      return GL_FALSE;
   }
   driContextPriv->driverPrivate = rmesa;

   /* Init radeon context data */
   rmesa->dri.context = driContextPriv;
   rmesa->dri.screen = sPriv;
   rmesa->dri.drawable = NULL;
   rmesa->dri.readable = NULL;
   rmesa->dri.hwContext = driContextPriv->hHWContext;
   rmesa->dri.hwLock = &sPriv->pSAREA->lock;
   rmesa->dri.fd = sPriv->fd;
   rmesa->dri.drmMinor = sPriv->drm_version.minor;

   rmesa->radeonScreen = screen;
   rmesa->sarea = (drm_radeon_sarea_t *)((GLubyte *)sPriv->pSAREA +
				       screen->sarea_priv_offset);


   rmesa->dma.buf0_address = rmesa->radeonScreen->buffers->list[0].address;

   (void) memset( rmesa->texture_heaps, 0, sizeof( rmesa->texture_heaps ) );
   make_empty_list( & rmesa->swapped );

   rmesa->nr_heaps = screen->numTexHeaps;
   for ( i = 0 ; i < rmesa->nr_heaps ; i++ ) {
      rmesa->texture_heaps[i] = driCreateTextureHeap( i, rmesa,
	    screen->texSize[i],
	    12,
	    RADEON_NR_TEX_REGIONS,
	    (drmTextureRegionPtr)rmesa->sarea->tex_list[i],
	    & rmesa->sarea->tex_age[i],
	    & rmesa->swapped,
	    sizeof( radeonTexObj ),
	    (destroy_texture_object_t *) radeonDestroyTexObj );

      driSetTextureSwapCounterLocation( rmesa->texture_heaps[i],
					& rmesa->c_textureSwaps );
   }
   rmesa->texture_depth = driQueryOptioni (&rmesa->optionCache,
					   "texture_depth");
   if (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_FB)
      rmesa->texture_depth = ( screen->cpp == 4 ) ?
	 DRI_CONF_TEXTURE_DEPTH_32 : DRI_CONF_TEXTURE_DEPTH_16;

   rmesa->swtcl.RenderIndex = ~0;
   rmesa->hw.all_dirty = GL_TRUE;

   /* Set the maximum texture size small enough that we can guarentee that
    * all texture units can bind a maximal texture and have all of them in
    * texturable memory at once. Depending on the allow_large_textures driconf
    * setting allow larger textures.
    */

   ctx = rmesa->glCtx;
   ctx->Const.MaxTextureUnits = driQueryOptioni (&rmesa->optionCache,
						 "texture_units");
   ctx->Const.MaxTextureImageUnits = ctx->Const.MaxTextureUnits;
   ctx->Const.MaxTextureCoordUnits = ctx->Const.MaxTextureUnits;

   i = driQueryOptioni( &rmesa->optionCache, "allow_large_textures");

   driCalculateMaxTextureLevels( rmesa->texture_heaps,
				 rmesa->nr_heaps,
				 & ctx->Const,
				 4,
				 11, /* max 2D texture size is 2048x2048 */
				 8,  /* 256^3 */
				 9,  /* \todo: max cube texture size seems to be 512x512(x6) */
				 11, /* max rect texture size is 2048x2048. */
				 12,
				 GL_FALSE,
				 i );


   ctx->Const.MaxTextureMaxAnisotropy = 16.0;

   /* No wide points.
    */
   ctx->Const.MinPointSize = 1.0;
   ctx->Const.MinPointSizeAA = 1.0;
   ctx->Const.MaxPointSize = 1.0;
   ctx->Const.MaxPointSizeAA = 1.0;

   ctx->Const.MinLineWidth = 1.0;
   ctx->Const.MinLineWidthAA = 1.0;
   ctx->Const.MaxLineWidth = 10.0;
   ctx->Const.MaxLineWidthAA = 10.0;
   ctx->Const.LineWidthGranularity = 0.0625;

   /* Set maxlocksize (and hence vb size) small enough to avoid
    * fallbacks in radeon_tcl.c.  ie. guarentee that all vertices can
    * fit in a single dma buffer for indexed rendering of quad strips,
    * etc.
    */
   ctx->Const.MaxArrayLockSize = 
      MIN2( ctx->Const.MaxArrayLockSize, 
 	    RADEON_BUFFER_SIZE / RADEON_MAX_TCL_VERTSIZE ); 

   rmesa->boxes = 0;

   ctx->Const.MaxDrawBuffers = 1;

   _mesa_set_mvp_with_dp4( ctx, GL_TRUE );

   /* Initialize the software rasterizer and helper modules.
    */
   _swrast_CreateContext( ctx );
   _vbo_CreateContext( ctx );
   _tnl_CreateContext( ctx );
   _swsetup_CreateContext( ctx );
   _ae_create_context( ctx );

   /* Install the customized pipeline:
    */
   _tnl_destroy_pipeline( ctx );
   _tnl_install_pipeline( ctx, radeon_pipeline );

   /* Try and keep materials and vertices separate:
    */
/*    _tnl_isolate_materials( ctx, GL_TRUE ); */

   /* Configure swrast and T&L to match hardware characteristics:
    */
   _swrast_allow_pixel_fog( ctx, GL_FALSE );
   _swrast_allow_vertex_fog( ctx, GL_TRUE );
   _tnl_allow_pixel_fog( ctx, GL_FALSE );
   _tnl_allow_vertex_fog( ctx, GL_TRUE );


   for ( i = 0 ; i < RADEON_MAX_TEXTURE_UNITS ; i++ ) {
      _math_matrix_ctr( &rmesa->TexGenMatrix[i] );
      _math_matrix_ctr( &rmesa->tmpmat[i] );
      _math_matrix_set_identity( &rmesa->TexGenMatrix[i] );
      _math_matrix_set_identity( &rmesa->tmpmat[i] );
   }

   driInitExtensions( ctx, card_extensions, GL_TRUE );
   if (rmesa->radeonScreen->drmSupportsCubeMapsR100)
      _mesa_enable_extension( ctx, "GL_ARB_texture_cube_map" );
   if (rmesa->glCtx->Mesa_DXTn) {
      _mesa_enable_extension( ctx, "GL_EXT_texture_compression_s3tc" );
      _mesa_enable_extension( ctx, "GL_S3_s3tc" );
   }
   else if (driQueryOptionb (&rmesa->optionCache, "force_s3tc_enable")) {
      _mesa_enable_extension( ctx, "GL_EXT_texture_compression_s3tc" );
   }

   if (rmesa->dri.drmMinor >= 9)
      _mesa_enable_extension( ctx, "GL_NV_texture_rectangle");

   /* XXX these should really go right after _mesa_init_driver_functions() */
   radeonInitIoctlFuncs( ctx );
   radeonInitStateFuncs( ctx );
   radeonInitSpanFuncs( ctx );
   radeonInitState( rmesa );
   radeonInitSwtcl( ctx );

   _mesa_vector4f_alloc( &rmesa->tcl.ObjClean, 0, 
			 ctx->Const.MaxArrayLockSize, 32 );

   fthrottle_mode = driQueryOptioni(&rmesa->optionCache, "fthrottle_mode");
   rmesa->iw.irq_seq = -1;
   rmesa->irqsEmitted = 0;
   rmesa->do_irqs = (rmesa->radeonScreen->irq != 0 &&
		     fthrottle_mode == DRI_CONF_FTHROTTLE_IRQS);

   rmesa->do_usleeps = (fthrottle_mode == DRI_CONF_FTHROTTLE_USLEEPS);

   (*sPriv->systemTime->getUST)( & rmesa->swap_ust );


#if DO_DEBUG
   RADEON_DEBUG = driParseDebugString( getenv( "RADEON_DEBUG" ),
				       debug_control );
#endif

   tcl_mode = driQueryOptioni(&rmesa->optionCache, "tcl_mode");
   if (driQueryOptionb(&rmesa->optionCache, "no_rast")) {
      fprintf(stderr, "disabling 3D acceleration\n");
      FALLBACK(rmesa, RADEON_FALLBACK_DISABLE, 1);
   } else if (tcl_mode == DRI_CONF_TCL_SW ||
	      !(rmesa->radeonScreen->chip_flags & RADEON_CHIPSET_TCL)) {
      if (rmesa->radeonScreen->chip_flags & RADEON_CHIPSET_TCL) {
	 rmesa->radeonScreen->chip_flags &= ~RADEON_CHIPSET_TCL;
	 fprintf(stderr, "Disabling HW TCL support\n");
      }
      TCL_FALLBACK(rmesa->glCtx, RADEON_TCL_FALLBACK_TCL_DISABLE, 1);
   }

   if (rmesa->radeonScreen->chip_flags & RADEON_CHIPSET_TCL) {
/*       _tnl_need_dlist_norm_lengths( ctx, GL_FALSE ); */
   }
   return GL_TRUE;
}
Пример #15
0
/**
 * This function executes vertex programs
 */
static GLboolean
run_vp( struct gl_context *ctx, struct tnl_pipeline_stage *stage )
{
   TNLcontext *tnl = TNL_CONTEXT(ctx);
   struct vp_stage_data *store = VP_STAGE_DATA(stage);
   struct vertex_buffer *VB = &tnl->vb;
   struct gl_vertex_program *program = ctx->VertexProgram._Current;
   struct gl_program_machine *machine = &store->machine;
   GLuint outputs[VARYING_SLOT_MAX], numOutputs;
   GLuint i, j;

   if (!program)
      return GL_TRUE;

   /* ARB program or vertex shader */
   _mesa_load_state_parameters(ctx, program->Base.Parameters);

   /* make list of outputs to save some time below */
   numOutputs = 0;
   for (i = 0; i < VARYING_SLOT_MAX; i++) {
      if (program->Base.OutputsWritten & BITFIELD64_BIT(i)) {
         outputs[numOutputs++] = i;
      }
   }

   /* Allocate result vectors.  We delay this until now to avoid allocating
    * memory that would never be used if we don't run the software tnl pipeline.
    */
   if (!store->results[0].storage) {
      for (i = 0; i < VARYING_SLOT_MAX; i++) {
         assert(!store->results[i].storage);
         _mesa_vector4f_alloc( &store->results[i], 0, VB->Size, 32 );
         store->results[i].size = 4;
      }
   }

   map_textures(ctx, program);

   for (i = 0; i < VB->Count; i++) {
      GLuint attr;

      init_machine(ctx, machine, tnl->CurInstance);

#if 0
      printf("Input  %d: %f, %f, %f, %f\n", i,
             VB->AttribPtr[0]->data[i][0],
             VB->AttribPtr[0]->data[i][1],
             VB->AttribPtr[0]->data[i][2],
             VB->AttribPtr[0]->data[i][3]);
      printf("   color: %f, %f, %f, %f\n",
             VB->AttribPtr[3]->data[i][0],
             VB->AttribPtr[3]->data[i][1],
             VB->AttribPtr[3]->data[i][2],
             VB->AttribPtr[3]->data[i][3]);
      printf("  normal: %f, %f, %f, %f\n",
             VB->AttribPtr[2]->data[i][0],
             VB->AttribPtr[2]->data[i][1],
             VB->AttribPtr[2]->data[i][2],
             VB->AttribPtr[2]->data[i][3]);
#endif

      /* the vertex array case */
      for (attr = 0; attr < VERT_ATTRIB_MAX; attr++) {
	 if (program->Base.InputsRead & BITFIELD64_BIT(attr)) {
	    const GLubyte *ptr = (const GLubyte*) VB->AttribPtr[attr]->data;
	    const GLuint size = VB->AttribPtr[attr]->size;
	    const GLuint stride = VB->AttribPtr[attr]->stride;
	    const GLfloat *data = (GLfloat *) (ptr + stride * i);
#ifdef NAN_CHECK
            check_float(data[0]);
            check_float(data[1]);
            check_float(data[2]);
            check_float(data[3]);
#endif
	    COPY_CLEAN_4V(machine->VertAttribs[attr], size, data);
	 }
      }

      /* execute the program */
      _mesa_execute_program(ctx, &program->Base, machine);

      /* copy the output registers into the VB->attribs arrays */
      for (j = 0; j < numOutputs; j++) {
         const GLuint attr = outputs[j];
#ifdef NAN_CHECK
         check_float(machine->Outputs[attr][0]);
         check_float(machine->Outputs[attr][1]);
         check_float(machine->Outputs[attr][2]);
         check_float(machine->Outputs[attr][3]);
#endif
         COPY_4V(store->results[attr].data[i], machine->Outputs[attr]);
      }

      /* FOGC is a special case.  Fragment shader expects (f,0,0,1) */
      if (program->Base.OutputsWritten & BITFIELD64_BIT(VARYING_SLOT_FOGC)) {
         store->results[VARYING_SLOT_FOGC].data[i][1] = 0.0;
         store->results[VARYING_SLOT_FOGC].data[i][2] = 0.0;
         store->results[VARYING_SLOT_FOGC].data[i][3] = 1.0;
      }
#ifdef NAN_CHECK
      ASSERT(machine->Outputs[0][3] != 0.0F);
#endif
#if 0
      printf("HPOS: %f %f %f %f\n",
             machine->Outputs[0][0], 
             machine->Outputs[0][1], 
             machine->Outputs[0][2], 
             machine->Outputs[0][3]);
#endif
   }

   unmap_textures(ctx, program);

   if (program->IsPositionInvariant) {
      /* We need the exact same transform as in the fixed function path here
       * to guarantee invariance, depending on compiler optimization flags
       * results could be different otherwise.
       */
      VB->ClipPtr = TransformRaw( &store->results[0],
				  &ctx->_ModelProjectMatrix,
				  VB->AttribPtr[0] );

      /* Drivers expect this to be clean to element 4...
       */
      switch (VB->ClipPtr->size) {
      case 1:
	 /* impossible */
      case 2:
	 _mesa_vector4f_clean_elem( VB->ClipPtr, VB->Count, 2 );
	 /* fall-through */
      case 3:
	 _mesa_vector4f_clean_elem( VB->ClipPtr, VB->Count, 3 );
	 /* fall-through */
      case 4:
	 break;
      }
   }
   else {
      /* Setup the VB pointers so that the next pipeline stages get
       * their data from the right place (the program output arrays).
       */
      VB->ClipPtr = &store->results[VARYING_SLOT_POS];
      VB->ClipPtr->size = 4;
      VB->ClipPtr->count = VB->Count;
   }

   VB->AttribPtr[VERT_ATTRIB_COLOR0] = &store->results[VARYING_SLOT_COL0];
   VB->AttribPtr[VERT_ATTRIB_COLOR1] = &store->results[VARYING_SLOT_COL1];
   VB->AttribPtr[VERT_ATTRIB_FOG] = &store->results[VARYING_SLOT_FOGC];
   VB->AttribPtr[_TNL_ATTRIB_POINTSIZE] = &store->results[VARYING_SLOT_PSIZ];
   VB->BackfaceColorPtr = &store->results[VARYING_SLOT_BFC0];
   VB->BackfaceSecondaryColorPtr = &store->results[VARYING_SLOT_BFC1];

   for (i = 0; i < ctx->Const.MaxTextureCoordUnits; i++) {
      VB->AttribPtr[_TNL_ATTRIB_TEX0 + i]
         = &store->results[VARYING_SLOT_TEX0 + i];
   }

   for (i = 0; i < ctx->Const.MaxVarying; i++) {
      if (program->Base.OutputsWritten & BITFIELD64_BIT(VARYING_SLOT_VAR0 + i)) {
         /* Note: varying results get put into the generic attributes */
	 VB->AttribPtr[VERT_ATTRIB_GENERIC0+i]
            = &store->results[VARYING_SLOT_VAR0 + i];
      }
   }


   /* Perform NDC and cliptest operations:
    */
   return do_ndc_cliptest(ctx, store);
}