/* Create the device specific context. */ GLboolean r100CreateContext( gl_api api, const struct gl_config *glVisual, __DRIcontext *driContextPriv, unsigned major_version, unsigned minor_version, uint32_t flags, unsigned *error, void *sharedContextPrivate) { __DRIscreen *sPriv = driContextPriv->driScreenPriv; radeonScreenPtr screen = (radeonScreenPtr)(sPriv->driverPrivate); struct dd_function_table functions; r100ContextPtr rmesa; struct gl_context *ctx; int i; int tcl_mode, fthrottle_mode; switch (api) { case API_OPENGL_COMPAT: if (major_version > 1 || minor_version > 3) { *error = __DRI_CTX_ERROR_BAD_VERSION; return GL_FALSE; } break; case API_OPENGLES: break; default: *error = __DRI_CTX_ERROR_BAD_API; return GL_FALSE; } /* Flag filtering is handled in dri2CreateContextAttribs. */ (void) flags; assert(glVisual); assert(driContextPriv); assert(screen); /* Allocate the Radeon context */ rmesa = calloc(1, sizeof(*rmesa)); if ( !rmesa ) { *error = __DRI_CTX_ERROR_NO_MEMORY; return GL_FALSE; } rmesa->radeon.radeonScreen = screen; r100_init_vtbl(&rmesa->radeon); /* init exp fog table data */ radeonInitStaticFogData(); /* Parse configuration files. * Do this here so that initialMaxAnisotropy is set before we create * the default textures. */ driParseConfigFiles (&rmesa->radeon.optionCache, &screen->optionCache, screen->driScreen->myNum, "radeon"); rmesa->radeon.initialMaxAnisotropy = driQueryOptionf(&rmesa->radeon.optionCache, "def_max_anisotropy"); if ( driQueryOptionb( &rmesa->radeon.optionCache, "hyperz" ) ) { if ( sPriv->drm_version.minor < 13 ) fprintf( stderr, "DRM version 1.%d too old to support HyperZ, " "disabling.\n", sPriv->drm_version.minor ); else rmesa->using_hyperz = GL_TRUE; } if ( sPriv->drm_version.minor >= 15 ) rmesa->texmicrotile = GL_TRUE; /* Init default driver functions then plug in our Radeon-specific functions * (the texture functions are especially important) */ _mesa_init_driver_functions( &functions ); radeonInitTextureFuncs( &rmesa->radeon, &functions ); radeonInitQueryObjFunctions(&functions); if (!radeonInitContext(&rmesa->radeon, &functions, glVisual, driContextPriv, sharedContextPrivate)) { free(rmesa); *error = __DRI_CTX_ERROR_NO_MEMORY; return GL_FALSE; } rmesa->radeon.swtcl.RenderIndex = ~0; rmesa->radeon.hw.all_dirty = GL_TRUE; ctx = &rmesa->radeon.glCtx; /* Initialize the software rasterizer and helper modules. */ _swrast_CreateContext( ctx ); _vbo_CreateContext( ctx ); _tnl_CreateContext( ctx ); _swsetup_CreateContext( ctx ); _ae_create_context( ctx ); /* Set the maximum texture size small enough that we can guarentee that * all texture units can bind a maximal texture and have all of them in * texturable memory at once. Depending on the allow_large_textures driconf * setting allow larger textures. */ ctx->Const.MaxTextureUnits = driQueryOptioni (&rmesa->radeon.optionCache, "texture_units"); ctx->Const.FragmentProgram.MaxTextureImageUnits = ctx->Const.MaxTextureUnits; ctx->Const.MaxTextureCoordUnits = ctx->Const.MaxTextureUnits; ctx->Const.MaxCombinedTextureImageUnits = ctx->Const.MaxTextureUnits; ctx->Const.StripTextureBorder = GL_TRUE; i = driQueryOptioni( &rmesa->radeon.optionCache, "allow_large_textures"); /* FIXME: When no memory manager is available we should set this * to some reasonable value based on texture memory pool size */ ctx->Const.MaxTextureLevels = 12; ctx->Const.Max3DTextureLevels = 9; ctx->Const.MaxCubeTextureLevels = 12; ctx->Const.MaxTextureRectSize = 2048; ctx->Const.MaxTextureMaxAnisotropy = 16.0; /* No wide points. */ ctx->Const.MinPointSize = 1.0; ctx->Const.MinPointSizeAA = 1.0; ctx->Const.MaxPointSize = 1.0; ctx->Const.MaxPointSizeAA = 1.0; ctx->Const.MinLineWidth = 1.0; ctx->Const.MinLineWidthAA = 1.0; ctx->Const.MaxLineWidth = 10.0; ctx->Const.MaxLineWidthAA = 10.0; ctx->Const.LineWidthGranularity = 0.0625; /* Set maxlocksize (and hence vb size) small enough to avoid * fallbacks in radeon_tcl.c. ie. guarentee that all vertices can * fit in a single dma buffer for indexed rendering of quad strips, * etc. */ ctx->Const.MaxArrayLockSize = MIN2( ctx->Const.MaxArrayLockSize, RADEON_BUFFER_SIZE / RADEON_MAX_TCL_VERTSIZE ); rmesa->boxes = 0; ctx->Const.MaxDrawBuffers = 1; ctx->Const.MaxColorAttachments = 1; ctx->Const.MaxRenderbufferSize = 2048; ctx->ShaderCompilerOptions[MESA_SHADER_VERTEX].PreferDP4 = true; /* Install the customized pipeline: */ _tnl_destroy_pipeline( ctx ); _tnl_install_pipeline( ctx, radeon_pipeline ); /* Try and keep materials and vertices separate: */ /* _tnl_isolate_materials( ctx, GL_TRUE ); */ /* Configure swrast and T&L to match hardware characteristics: */ _swrast_allow_pixel_fog( ctx, GL_FALSE ); _swrast_allow_vertex_fog( ctx, GL_TRUE ); _tnl_allow_pixel_fog( ctx, GL_FALSE ); _tnl_allow_vertex_fog( ctx, GL_TRUE ); for ( i = 0 ; i < RADEON_MAX_TEXTURE_UNITS ; i++ ) { _math_matrix_ctr( &rmesa->TexGenMatrix[i] ); _math_matrix_ctr( &rmesa->tmpmat[i] ); _math_matrix_set_identity( &rmesa->TexGenMatrix[i] ); _math_matrix_set_identity( &rmesa->tmpmat[i] ); } ctx->Extensions.ARB_texture_border_clamp = true; ctx->Extensions.ARB_texture_env_combine = true; ctx->Extensions.ARB_texture_env_crossbar = true; ctx->Extensions.ARB_texture_env_dot3 = true; ctx->Extensions.EXT_fog_coord = true; ctx->Extensions.EXT_packed_depth_stencil = true; ctx->Extensions.EXT_secondary_color = true; ctx->Extensions.EXT_texture_env_dot3 = true; ctx->Extensions.EXT_texture_filter_anisotropic = true; ctx->Extensions.EXT_texture_mirror_clamp = true; ctx->Extensions.ATI_texture_env_combine3 = true; ctx->Extensions.ATI_texture_mirror_once = true; ctx->Extensions.MESA_ycbcr_texture = true; ctx->Extensions.NV_blend_square = true; ctx->Extensions.OES_EGL_image = true; ctx->Extensions.EXT_framebuffer_object = true; ctx->Extensions.ARB_texture_cube_map = true; if (rmesa->radeon.glCtx.Mesa_DXTn) { ctx->Extensions.EXT_texture_compression_s3tc = true; ctx->Extensions.ANGLE_texture_compression_dxt = true; } else if (driQueryOptionb (&rmesa->radeon.optionCache, "force_s3tc_enable")) { ctx->Extensions.EXT_texture_compression_s3tc = true; ctx->Extensions.ANGLE_texture_compression_dxt = true; } ctx->Extensions.NV_texture_rectangle = true; ctx->Extensions.ARB_occlusion_query = true; /* XXX these should really go right after _mesa_init_driver_functions() */ radeon_fbo_init(&rmesa->radeon); radeonInitSpanFuncs( ctx ); radeonInitIoctlFuncs( ctx ); radeonInitStateFuncs( ctx ); radeonInitState( rmesa ); radeonInitSwtcl( ctx ); _mesa_vector4f_alloc( &rmesa->tcl.ObjClean, 0, ctx->Const.MaxArrayLockSize, 32 ); fthrottle_mode = driQueryOptioni(&rmesa->radeon.optionCache, "fthrottle_mode"); rmesa->radeon.iw.irq_seq = -1; rmesa->radeon.irqsEmitted = 0; rmesa->radeon.do_irqs = (rmesa->radeon.radeonScreen->irq != 0 && fthrottle_mode == DRI_CONF_FTHROTTLE_IRQS); rmesa->radeon.do_usleeps = (fthrottle_mode == DRI_CONF_FTHROTTLE_USLEEPS); #if DO_DEBUG RADEON_DEBUG = driParseDebugString( getenv( "RADEON_DEBUG" ), debug_control ); #endif tcl_mode = driQueryOptioni(&rmesa->radeon.optionCache, "tcl_mode"); if (driQueryOptionb(&rmesa->radeon.optionCache, "no_rast")) { fprintf(stderr, "disabling 3D acceleration\n"); FALLBACK(rmesa, RADEON_FALLBACK_DISABLE, 1); } else if (tcl_mode == DRI_CONF_TCL_SW || !(rmesa->radeon.radeonScreen->chip_flags & RADEON_CHIPSET_TCL)) { if (rmesa->radeon.radeonScreen->chip_flags & RADEON_CHIPSET_TCL) { rmesa->radeon.radeonScreen->chip_flags &= ~RADEON_CHIPSET_TCL; fprintf(stderr, "Disabling HW TCL support\n"); } TCL_FALLBACK(&rmesa->radeon.glCtx, RADEON_TCL_FALLBACK_TCL_DISABLE, 1); } if (rmesa->radeon.radeonScreen->chip_flags & RADEON_CHIPSET_TCL) { /* _tnl_need_dlist_norm_lengths( ctx, GL_FALSE ); */ } _mesa_compute_version(ctx); /* Exec table initialization requires the version to be computed */ _mesa_initialize_dispatch_tables(ctx); _mesa_initialize_vbo_vtxfmt(ctx); *error = __DRI_CTX_ERROR_SUCCESS; return GL_TRUE; }
/* Create the device specific context. */ GLboolean radeonCreateContext( const __GLcontextModes *glVisual, __DRIcontextPrivate *driContextPriv, void *sharedContextPrivate) { __DRIscreenPrivate *sPriv = driContextPriv->driScreenPriv; radeonScreenPtr screen = (radeonScreenPtr)(sPriv->private); struct dd_function_table functions; radeonContextPtr rmesa; GLcontext *ctx, *shareCtx; int i; int tcl_mode, fthrottle_mode; assert(glVisual); assert(driContextPriv); assert(screen); /* Allocate the Radeon context */ rmesa = (radeonContextPtr) CALLOC( sizeof(*rmesa) ); if ( !rmesa ) return GL_FALSE; /* init exp fog table data */ radeonInitStaticFogData(); /* Parse configuration files. * Do this here so that initialMaxAnisotropy is set before we create * the default textures. */ driParseConfigFiles (&rmesa->optionCache, &screen->optionCache, screen->driScreen->myNum, "radeon"); rmesa->initialMaxAnisotropy = driQueryOptionf(&rmesa->optionCache, "def_max_anisotropy"); if ( driQueryOptionb( &rmesa->optionCache, "hyperz" ) ) { if ( sPriv->drm_version.minor < 13 ) fprintf( stderr, "DRM version 1.%d too old to support HyperZ, " "disabling.\n", sPriv->drm_version.minor ); else rmesa->using_hyperz = GL_TRUE; } if ( sPriv->drm_version.minor >= 15 ) rmesa->texmicrotile = GL_TRUE; /* Init default driver functions then plug in our Radeon-specific functions * (the texture functions are especially important) */ _mesa_init_driver_functions( &functions ); radeonInitDriverFuncs( &functions ); radeonInitTextureFuncs( &functions ); /* Allocate the Mesa context */ if (sharedContextPrivate) shareCtx = ((radeonContextPtr) sharedContextPrivate)->glCtx; else shareCtx = NULL; rmesa->glCtx = _mesa_create_context(glVisual, shareCtx, &functions, (void *) rmesa); if (!rmesa->glCtx) { FREE(rmesa); return GL_FALSE; } driContextPriv->driverPrivate = rmesa; /* Init radeon context data */ rmesa->dri.context = driContextPriv; rmesa->dri.screen = sPriv; rmesa->dri.drawable = NULL; rmesa->dri.readable = NULL; rmesa->dri.hwContext = driContextPriv->hHWContext; rmesa->dri.hwLock = &sPriv->pSAREA->lock; rmesa->dri.fd = sPriv->fd; rmesa->dri.drmMinor = sPriv->drm_version.minor; rmesa->radeonScreen = screen; rmesa->sarea = (drm_radeon_sarea_t *)((GLubyte *)sPriv->pSAREA + screen->sarea_priv_offset); rmesa->dma.buf0_address = rmesa->radeonScreen->buffers->list[0].address; (void) memset( rmesa->texture_heaps, 0, sizeof( rmesa->texture_heaps ) ); make_empty_list( & rmesa->swapped ); rmesa->nr_heaps = screen->numTexHeaps; for ( i = 0 ; i < rmesa->nr_heaps ; i++ ) { rmesa->texture_heaps[i] = driCreateTextureHeap( i, rmesa, screen->texSize[i], 12, RADEON_NR_TEX_REGIONS, (drmTextureRegionPtr)rmesa->sarea->tex_list[i], & rmesa->sarea->tex_age[i], & rmesa->swapped, sizeof( radeonTexObj ), (destroy_texture_object_t *) radeonDestroyTexObj ); driSetTextureSwapCounterLocation( rmesa->texture_heaps[i], & rmesa->c_textureSwaps ); } rmesa->texture_depth = driQueryOptioni (&rmesa->optionCache, "texture_depth"); if (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_FB) rmesa->texture_depth = ( screen->cpp == 4 ) ? DRI_CONF_TEXTURE_DEPTH_32 : DRI_CONF_TEXTURE_DEPTH_16; rmesa->swtcl.RenderIndex = ~0; rmesa->hw.all_dirty = GL_TRUE; /* Set the maximum texture size small enough that we can guarentee that * all texture units can bind a maximal texture and have all of them in * texturable memory at once. Depending on the allow_large_textures driconf * setting allow larger textures. */ ctx = rmesa->glCtx; ctx->Const.MaxTextureUnits = driQueryOptioni (&rmesa->optionCache, "texture_units"); ctx->Const.MaxTextureImageUnits = ctx->Const.MaxTextureUnits; ctx->Const.MaxTextureCoordUnits = ctx->Const.MaxTextureUnits; i = driQueryOptioni( &rmesa->optionCache, "allow_large_textures"); driCalculateMaxTextureLevels( rmesa->texture_heaps, rmesa->nr_heaps, & ctx->Const, 4, 11, /* max 2D texture size is 2048x2048 */ 8, /* 256^3 */ 9, /* \todo: max cube texture size seems to be 512x512(x6) */ 11, /* max rect texture size is 2048x2048. */ 12, GL_FALSE, i ); ctx->Const.MaxTextureMaxAnisotropy = 16.0; /* No wide points. */ ctx->Const.MinPointSize = 1.0; ctx->Const.MinPointSizeAA = 1.0; ctx->Const.MaxPointSize = 1.0; ctx->Const.MaxPointSizeAA = 1.0; ctx->Const.MinLineWidth = 1.0; ctx->Const.MinLineWidthAA = 1.0; ctx->Const.MaxLineWidth = 10.0; ctx->Const.MaxLineWidthAA = 10.0; ctx->Const.LineWidthGranularity = 0.0625; /* Set maxlocksize (and hence vb size) small enough to avoid * fallbacks in radeon_tcl.c. ie. guarentee that all vertices can * fit in a single dma buffer for indexed rendering of quad strips, * etc. */ ctx->Const.MaxArrayLockSize = MIN2( ctx->Const.MaxArrayLockSize, RADEON_BUFFER_SIZE / RADEON_MAX_TCL_VERTSIZE ); rmesa->boxes = 0; ctx->Const.MaxDrawBuffers = 1; _mesa_set_mvp_with_dp4( ctx, GL_TRUE ); /* Initialize the software rasterizer and helper modules. */ _swrast_CreateContext( ctx ); _vbo_CreateContext( ctx ); _tnl_CreateContext( ctx ); _swsetup_CreateContext( ctx ); _ae_create_context( ctx ); /* Install the customized pipeline: */ _tnl_destroy_pipeline( ctx ); _tnl_install_pipeline( ctx, radeon_pipeline ); /* Try and keep materials and vertices separate: */ /* _tnl_isolate_materials( ctx, GL_TRUE ); */ /* Configure swrast and T&L to match hardware characteristics: */ _swrast_allow_pixel_fog( ctx, GL_FALSE ); _swrast_allow_vertex_fog( ctx, GL_TRUE ); _tnl_allow_pixel_fog( ctx, GL_FALSE ); _tnl_allow_vertex_fog( ctx, GL_TRUE ); for ( i = 0 ; i < RADEON_MAX_TEXTURE_UNITS ; i++ ) { _math_matrix_ctr( &rmesa->TexGenMatrix[i] ); _math_matrix_ctr( &rmesa->tmpmat[i] ); _math_matrix_set_identity( &rmesa->TexGenMatrix[i] ); _math_matrix_set_identity( &rmesa->tmpmat[i] ); } driInitExtensions( ctx, card_extensions, GL_TRUE ); if (rmesa->radeonScreen->drmSupportsCubeMapsR100) _mesa_enable_extension( ctx, "GL_ARB_texture_cube_map" ); if (rmesa->glCtx->Mesa_DXTn) { _mesa_enable_extension( ctx, "GL_EXT_texture_compression_s3tc" ); _mesa_enable_extension( ctx, "GL_S3_s3tc" ); } else if (driQueryOptionb (&rmesa->optionCache, "force_s3tc_enable")) { _mesa_enable_extension( ctx, "GL_EXT_texture_compression_s3tc" ); } if (rmesa->dri.drmMinor >= 9) _mesa_enable_extension( ctx, "GL_NV_texture_rectangle"); /* XXX these should really go right after _mesa_init_driver_functions() */ radeonInitIoctlFuncs( ctx ); radeonInitStateFuncs( ctx ); radeonInitSpanFuncs( ctx ); radeonInitState( rmesa ); radeonInitSwtcl( ctx ); _mesa_vector4f_alloc( &rmesa->tcl.ObjClean, 0, ctx->Const.MaxArrayLockSize, 32 ); fthrottle_mode = driQueryOptioni(&rmesa->optionCache, "fthrottle_mode"); rmesa->iw.irq_seq = -1; rmesa->irqsEmitted = 0; rmesa->do_irqs = (rmesa->radeonScreen->irq != 0 && fthrottle_mode == DRI_CONF_FTHROTTLE_IRQS); rmesa->do_usleeps = (fthrottle_mode == DRI_CONF_FTHROTTLE_USLEEPS); (*sPriv->systemTime->getUST)( & rmesa->swap_ust ); #if DO_DEBUG RADEON_DEBUG = driParseDebugString( getenv( "RADEON_DEBUG" ), debug_control ); #endif tcl_mode = driQueryOptioni(&rmesa->optionCache, "tcl_mode"); if (driQueryOptionb(&rmesa->optionCache, "no_rast")) { fprintf(stderr, "disabling 3D acceleration\n"); FALLBACK(rmesa, RADEON_FALLBACK_DISABLE, 1); } else if (tcl_mode == DRI_CONF_TCL_SW || !(rmesa->radeonScreen->chip_flags & RADEON_CHIPSET_TCL)) { if (rmesa->radeonScreen->chip_flags & RADEON_CHIPSET_TCL) { rmesa->radeonScreen->chip_flags &= ~RADEON_CHIPSET_TCL; fprintf(stderr, "Disabling HW TCL support\n"); } TCL_FALLBACK(rmesa->glCtx, RADEON_TCL_FALLBACK_TCL_DISABLE, 1); } if (rmesa->radeonScreen->chip_flags & RADEON_CHIPSET_TCL) { /* _tnl_need_dlist_norm_lengths( ctx, GL_FALSE ); */ } return GL_TRUE; }
/* Create the device specific context. */ GLboolean r100CreateContext( gl_api api, const struct gl_config *glVisual, __DRIcontext *driContextPriv, const struct __DriverContextConfig *ctx_config, unsigned *error, void *sharedContextPrivate) { __DRIscreen *sPriv = driContextPriv->driScreenPriv; radeonScreenPtr screen = (radeonScreenPtr)(sPriv->driverPrivate); struct dd_function_table functions; r100ContextPtr rmesa; struct gl_context *ctx; int i; int tcl_mode, fthrottle_mode; if (ctx_config->flags & ~(__DRI_CTX_FLAG_DEBUG | __DRI_CTX_FLAG_NO_ERROR)) { *error = __DRI_CTX_ERROR_UNKNOWN_FLAG; return false; } if (ctx_config->attribute_mask) { *error = __DRI_CTX_ERROR_UNKNOWN_ATTRIBUTE; return false; } assert(driContextPriv); assert(screen); /* Allocate the Radeon context */ rmesa = calloc(1, sizeof(*rmesa)); if ( !rmesa ) { *error = __DRI_CTX_ERROR_NO_MEMORY; return GL_FALSE; } rmesa->radeon.radeonScreen = screen; r100_init_vtbl(&rmesa->radeon); /* init exp fog table data */ radeonInitStaticFogData(); /* Parse configuration files. * Do this here so that initialMaxAnisotropy is set before we create * the default textures. */ driParseConfigFiles (&rmesa->radeon.optionCache, &screen->optionCache, screen->driScreen->myNum, "radeon", NULL); rmesa->radeon.initialMaxAnisotropy = driQueryOptionf(&rmesa->radeon.optionCache, "def_max_anisotropy"); if (driQueryOptionb(&rmesa->radeon.optionCache, "hyperz")) rmesa->using_hyperz = GL_TRUE; /* Init default driver functions then plug in our Radeon-specific functions * (the texture functions are especially important) */ _mesa_init_driver_functions( &functions ); _tnl_init_driver_draw_function( &functions ); radeonInitTextureFuncs( &rmesa->radeon, &functions ); radeonInitQueryObjFunctions(&functions); if (!radeonInitContext(&rmesa->radeon, api, &functions, glVisual, driContextPriv, sharedContextPrivate)) { free(rmesa); *error = __DRI_CTX_ERROR_NO_MEMORY; return GL_FALSE; } rmesa->radeon.swtcl.RenderIndex = ~0; rmesa->radeon.hw.all_dirty = GL_TRUE; ctx = &rmesa->radeon.glCtx; driContextSetFlags(ctx, ctx_config->flags); /* Initialize the software rasterizer and helper modules. */ _swrast_CreateContext( ctx ); _vbo_CreateContext( ctx ); _tnl_CreateContext( ctx ); _swsetup_CreateContext( ctx ); ctx->Const.MaxTextureUnits = driQueryOptioni (&rmesa->radeon.optionCache, "texture_units"); ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits = ctx->Const.MaxTextureUnits; ctx->Const.MaxTextureCoordUnits = ctx->Const.MaxTextureUnits; ctx->Const.MaxCombinedTextureImageUnits = ctx->Const.MaxTextureUnits; ctx->Const.StripTextureBorder = GL_TRUE; /* FIXME: When no memory manager is available we should set this * to some reasonable value based on texture memory pool size */ ctx->Const.MaxTextureLevels = 12; ctx->Const.Max3DTextureLevels = 9; ctx->Const.MaxCubeTextureLevels = 12; ctx->Const.MaxTextureRectSize = 2048; ctx->Const.MaxTextureMaxAnisotropy = 16.0; /* No wide points. */ ctx->Const.MinPointSize = 1.0; ctx->Const.MinPointSizeAA = 1.0; ctx->Const.MaxPointSize = 1.0; ctx->Const.MaxPointSizeAA = 1.0; ctx->Const.MinLineWidth = 1.0; ctx->Const.MinLineWidthAA = 1.0; ctx->Const.MaxLineWidth = 10.0; ctx->Const.MaxLineWidthAA = 10.0; ctx->Const.LineWidthGranularity = 0.0625; /* Set maxlocksize (and hence vb size) small enough to avoid * fallbacks in radeon_tcl.c. ie. guarentee that all vertices can * fit in a single dma buffer for indexed rendering of quad strips, * etc. */ ctx->Const.MaxArrayLockSize = MIN2( ctx->Const.MaxArrayLockSize, RADEON_BUFFER_SIZE / RADEON_MAX_TCL_VERTSIZE ); rmesa->boxes = 0; ctx->Const.MaxDrawBuffers = 1; ctx->Const.MaxColorAttachments = 1; ctx->Const.MaxRenderbufferSize = 2048; ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].OptimizeForAOS = true; /* Install the customized pipeline: */ _tnl_destroy_pipeline( ctx ); _tnl_install_pipeline( ctx, radeon_pipeline ); /* Try and keep materials and vertices separate: */ /* _tnl_isolate_materials( ctx, GL_TRUE ); */ /* Configure swrast and T&L to match hardware characteristics: */ _swrast_allow_pixel_fog( ctx, GL_FALSE ); _swrast_allow_vertex_fog( ctx, GL_TRUE ); _tnl_allow_pixel_fog( ctx, GL_FALSE ); _tnl_allow_vertex_fog( ctx, GL_TRUE ); for ( i = 0 ; i < RADEON_MAX_TEXTURE_UNITS ; i++ ) { _math_matrix_ctr( &rmesa->TexGenMatrix[i] ); _math_matrix_ctr( &rmesa->tmpmat[i] ); _math_matrix_set_identity( &rmesa->TexGenMatrix[i] ); _math_matrix_set_identity( &rmesa->tmpmat[i] ); } ctx->Extensions.ARB_occlusion_query = true; ctx->Extensions.ARB_texture_border_clamp = true; ctx->Extensions.ARB_texture_cube_map = true; ctx->Extensions.ARB_texture_env_combine = true; ctx->Extensions.ARB_texture_env_crossbar = true; ctx->Extensions.ARB_texture_env_dot3 = true; ctx->Extensions.ARB_texture_filter_anisotropic = true; ctx->Extensions.ARB_texture_mirror_clamp_to_edge = true; ctx->Extensions.ATI_texture_env_combine3 = true; ctx->Extensions.ATI_texture_mirror_once = true; ctx->Extensions.EXT_texture_env_dot3 = true; ctx->Extensions.EXT_texture_filter_anisotropic = true; ctx->Extensions.EXT_texture_mirror_clamp = true; ctx->Extensions.MESA_ycbcr_texture = true; ctx->Extensions.NV_texture_rectangle = true; ctx->Extensions.OES_EGL_image = true; ctx->Extensions.EXT_texture_compression_s3tc = true; ctx->Extensions.ANGLE_texture_compression_dxt = true; /* XXX these should really go right after _mesa_init_driver_functions() */ radeon_fbo_init(&rmesa->radeon); radeonInitSpanFuncs( ctx ); radeonInitIoctlFuncs( ctx ); radeonInitStateFuncs( ctx ); radeonInitState( rmesa ); radeonInitSwtcl( ctx ); _mesa_vector4f_alloc( &rmesa->tcl.ObjClean, 0, ctx->Const.MaxArrayLockSize, 32 ); fthrottle_mode = driQueryOptioni(&rmesa->radeon.optionCache, "fthrottle_mode"); rmesa->radeon.iw.irq_seq = -1; rmesa->radeon.irqsEmitted = 0; rmesa->radeon.do_irqs = (rmesa->radeon.radeonScreen->irq != 0 && fthrottle_mode == DRI_CONF_FTHROTTLE_IRQS); rmesa->radeon.do_usleeps = (fthrottle_mode == DRI_CONF_FTHROTTLE_USLEEPS); tcl_mode = driQueryOptioni(&rmesa->radeon.optionCache, "tcl_mode"); if (getenv("RADEON_NO_RAST")) { fprintf(stderr, "disabling 3D acceleration\n"); FALLBACK(rmesa, RADEON_FALLBACK_DISABLE, 1); } else if (tcl_mode == DRI_CONF_TCL_SW || !(rmesa->radeon.radeonScreen->chip_flags & RADEON_CHIPSET_TCL)) { if (rmesa->radeon.radeonScreen->chip_flags & RADEON_CHIPSET_TCL) { rmesa->radeon.radeonScreen->chip_flags &= ~RADEON_CHIPSET_TCL; fprintf(stderr, "Disabling HW TCL support\n"); } TCL_FALLBACK(&rmesa->radeon.glCtx, RADEON_TCL_FALLBACK_TCL_DISABLE, 1); } if (rmesa->radeon.radeonScreen->chip_flags & RADEON_CHIPSET_TCL) { /* _tnl_need_dlist_norm_lengths( ctx, GL_FALSE ); */ } _mesa_override_extensions(ctx); _mesa_compute_version(ctx); /* Exec table initialization requires the version to be computed */ _mesa_initialize_dispatch_tables(ctx); _mesa_initialize_vbo_vtxfmt(ctx); *error = __DRI_CTX_ERROR_SUCCESS; return GL_TRUE; }
/* Create the device specific context. */ GLboolean radeonCreateContext( const __GLcontextModes *glVisual, __DRIcontextPrivate *driContextPriv, void *sharedContextPrivate) { __DRIscreenPrivate *sPriv = driContextPriv->driScreenPriv; radeonScreenPtr screen = (radeonScreenPtr)(sPriv->private); radeonContextPtr rmesa; GLcontext *ctx, *shareCtx; int i; assert(glVisual); assert(driContextPriv); assert(screen); /* Allocate the Radeon context */ rmesa = (radeonContextPtr) CALLOC( sizeof(*rmesa) ); if ( !rmesa ) return GL_FALSE; /* Allocate the Mesa context */ if (sharedContextPrivate) shareCtx = ((radeonContextPtr) sharedContextPrivate)->glCtx; else shareCtx = NULL; rmesa->glCtx = _mesa_create_context(glVisual, shareCtx, (void *) rmesa, GL_TRUE); if (!rmesa->glCtx) { FREE(rmesa); return GL_FALSE; } driContextPriv->driverPrivate = rmesa; /* Init radeon context data */ rmesa->dri.context = driContextPriv; rmesa->dri.screen = sPriv; rmesa->dri.drawable = NULL; /* Set by XMesaMakeCurrent */ rmesa->dri.hwContext = driContextPriv->hHWContext; rmesa->dri.hwLock = &sPriv->pSAREA->lock; rmesa->dri.fd = sPriv->fd; rmesa->dri.drmMinor = sPriv->drmMinor; rmesa->radeonScreen = screen; rmesa->sarea = (RADEONSAREAPrivPtr)((GLubyte *)sPriv->pSAREA + screen->sarea_priv_offset); rmesa->dma.buf0_address = rmesa->radeonScreen->buffers->list[0].address; (void) memset( rmesa->texture_heaps, 0, sizeof( rmesa->texture_heaps ) ); make_empty_list( & rmesa->swapped ); rmesa->nr_heaps = screen->numTexHeaps; for ( i = 0 ; i < rmesa->nr_heaps ; i++ ) { rmesa->texture_heaps[i] = driCreateTextureHeap( i, rmesa, screen->texSize[i], 12, RADEON_NR_TEX_REGIONS, rmesa->sarea->texList[i], & rmesa->sarea->texAge[i], & rmesa->swapped, sizeof( radeonTexObj ), (destroy_texture_object_t *) radeonDestroyTexObj ); driSetTextureSwapCounterLocation( rmesa->texture_heaps[i], & rmesa->c_textureSwaps ); } rmesa->swtcl.RenderIndex = ~0; rmesa->lost_context = 1; /* Set the maximum texture size small enough that we can guarentee that * all texture units can bind a maximal texture and have them both in * texturable memory at once. */ ctx = rmesa->glCtx; ctx->Const.MaxTextureUnits = 2; driCalculateMaxTextureLevels( rmesa->texture_heaps, rmesa->nr_heaps, & ctx->Const, 4, 11, /* max 2D texture size is 2048x2048 */ 0, /* 3D textures unsupported. */ 0, /* cube textures unsupported. */ 11, /* max rect texture size is 2048x2048. */ 12, GL_FALSE ); ctx->Const.MaxTextureMaxAnisotropy = 16.0; /* No wide points. */ ctx->Const.MinPointSize = 1.0; ctx->Const.MinPointSizeAA = 1.0; ctx->Const.MaxPointSize = 1.0; ctx->Const.MaxPointSizeAA = 1.0; ctx->Const.MinLineWidth = 1.0; ctx->Const.MinLineWidthAA = 1.0; ctx->Const.MaxLineWidth = 10.0; ctx->Const.MaxLineWidthAA = 10.0; ctx->Const.LineWidthGranularity = 0.0625; /* Set maxlocksize (and hence vb size) small enough to avoid * fallbacks in radeon_tcl.c. ie. guarentee that all vertices can * fit in a single dma buffer for indexed rendering of quad strips, * etc. */ ctx->Const.MaxArrayLockSize = MIN2( ctx->Const.MaxArrayLockSize, RADEON_BUFFER_SIZE / RADEON_MAX_TCL_VERTSIZE ); rmesa->boxes = (getenv("LIBGL_PERFORMANCE_BOXES") != NULL); /* Initialize the software rasterizer and helper modules. */ _swrast_CreateContext( ctx ); _ac_CreateContext( ctx ); _tnl_CreateContext( ctx ); _swsetup_CreateContext( ctx ); _ae_create_context( ctx ); /* Install the customized pipeline: */ _tnl_destroy_pipeline( ctx ); _tnl_install_pipeline( ctx, radeon_pipeline ); ctx->Driver.FlushVertices = radeonFlushVertices; /* Try and keep materials and vertices separate: */ _tnl_isolate_materials( ctx, GL_TRUE ); /* _mesa_allow_light_in_model( ctx, GL_FALSE ); */ /* Try and keep materials and vertices separate: */ _tnl_isolate_materials( ctx, GL_TRUE ); /* Configure swrast to match hardware characteristics: */ _swrast_allow_pixel_fog( ctx, GL_FALSE ); _swrast_allow_vertex_fog( ctx, GL_TRUE ); _math_matrix_ctr( &rmesa->TexGenMatrix[0] ); _math_matrix_ctr( &rmesa->TexGenMatrix[1] ); _math_matrix_ctr( &rmesa->tmpmat ); _math_matrix_set_identity( &rmesa->TexGenMatrix[0] ); _math_matrix_set_identity( &rmesa->TexGenMatrix[1] ); _math_matrix_set_identity( &rmesa->tmpmat ); driInitExtensions( ctx, card_extensions, GL_TRUE ); if (rmesa->dri.drmMinor >= 9) _mesa_enable_extension( ctx, "GL_NV_texture_rectangle"); radeonInitDriverFuncs( ctx ); radeonInitIoctlFuncs( ctx ); radeonInitStateFuncs( ctx ); radeonInitSpanFuncs( ctx ); radeonInitTextureFuncs( ctx ); radeonInitState( rmesa ); radeonInitSwtcl( ctx ); rmesa->iw.irq_seq = -1; rmesa->irqsEmitted = 0; rmesa->do_irqs = (rmesa->radeonScreen->irq && !getenv("RADEON_NO_IRQS")); rmesa->do_usleeps = !getenv("RADEON_NO_USLEEPS"); rmesa->vblank_flags = (rmesa->radeonScreen->irq != 0) ? driGetDefaultVBlankFlags() : VBLANK_FLAG_NO_IRQ; rmesa->get_ust = (PFNGLXGETUSTPROC) glXGetProcAddress( (const GLubyte *) "__glXGetUST" ); if ( rmesa->get_ust == NULL ) { rmesa->get_ust = get_ust_nop; } (*rmesa->get_ust)( & rmesa->swap_ust ); #if DO_DEBUG RADEON_DEBUG = driParseDebugString( getenv( "RADEON_DEBUG" ), debug_control ); #endif if (getenv("RADEON_NO_RAST")) { fprintf(stderr, "disabling 3D acceleration\n"); FALLBACK(rmesa, RADEON_FALLBACK_DISABLE, 1); } else if (getenv("RADEON_TCL_FORCE_ENABLE")) { fprintf(stderr, "Enabling TCL support... this will probably crash\n"); fprintf(stderr, " your card if it isn't capable of TCL!\n"); rmesa->radeonScreen->chipset |= RADEON_CHIPSET_TCL; } else if (getenv("RADEON_TCL_FORCE_DISABLE") || !(rmesa->radeonScreen->chipset & RADEON_CHIPSET_TCL)) { rmesa->radeonScreen->chipset &= ~RADEON_CHIPSET_TCL; fprintf(stderr, "disabling TCL support\n"); TCL_FALLBACK(rmesa->glCtx, RADEON_TCL_FALLBACK_TCL_DISABLE, 1); } if (rmesa->radeonScreen->chipset & RADEON_CHIPSET_TCL) { if (!getenv("RADEON_NO_VTXFMT")) radeonVtxfmtInit( ctx ); _tnl_need_dlist_norm_lengths( ctx, GL_FALSE ); } return GL_TRUE; }