/** * Matrix constructor. * * \param m matrix. * * Initialize the GLmatrix fields. */ void _math_matrix_ctr( GLmatrix *m ) { m->m = _mesa_align_malloc( 16 * sizeof(GLfloat), 16 ); if (m->m) memcpy( m->m, Identity, sizeof(Identity) ); m->inv = _mesa_align_malloc( 16 * sizeof(GLfloat), 16 ); if (m->inv) memcpy( m->inv, Identity, sizeof(Identity) ); m->type = MATRIX_IDENTITY; m->flags = 0; }
static GLboolean init_vertex_stage( struct gl_context *ctx, struct tnl_pipeline_stage *stage ) { struct vertex_buffer *VB = &TNL_CONTEXT(ctx)->vb; struct vertex_stage_data *store; GLuint size = VB->Size; stage->privatePtr = calloc(1, sizeof(*store)); store = VERTEX_STAGE_DATA(stage); if (!store) return GL_FALSE; _mesa_vector4f_alloc( &store->eye, 0, size, 32 ); _mesa_vector4f_alloc( &store->clip, 0, size, 32 ); _mesa_vector4f_alloc( &store->proj, 0, size, 32 ); store->clipmask = _mesa_align_malloc(sizeof(GLubyte)*size, 32 ); if (!store->clipmask || !store->eye.data || !store->clip.data || !store->proj.data) return GL_FALSE; return GL_TRUE; }
struct gl_program_parameter_list * _mesa_new_parameter_list_sized(unsigned size) { struct gl_program_parameter_list *p = _mesa_new_parameter_list(); if ((p != NULL) && (size != 0)) { p->Size = size; /* alloc arrays */ p->Parameters = (struct gl_program_parameter *) calloc(1, size * sizeof(struct gl_program_parameter)); p->ParameterValues = (gl_constant_value (*)[4]) _mesa_align_malloc(size * 4 *sizeof(gl_constant_value), 16); if ((p->Parameters == NULL) || (p->ParameterValues == NULL)) { free(p->Parameters); _mesa_align_free(p->ParameterValues); free(p); p = NULL; } } return p; }
void vbo_exec_vtx_init(struct vbo_exec_context *exec) { struct gl_context *ctx = exec->ctx; GLuint i; /* Allocate a buffer object. Will just reuse this object * continuously, unless vbo_use_buffer_objects() is called to enable * use of real VBOs. */ _mesa_reference_buffer_object(ctx, &exec->vtx.bufferobj, ctx->Shared->NullBufferObj); assert(!exec->vtx.buffer_map); exec->vtx.buffer_map = _mesa_align_malloc(VBO_VERT_BUFFER_SIZE, 64); exec->vtx.buffer_ptr = exec->vtx.buffer_map; vbo_exec_vtxfmt_init(exec); _mesa_noop_vtxfmt_init(&exec->vtxfmt_noop); exec->vtx.enabled = 0; for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { assert(i < ARRAY_SIZE(exec->vtx.attrsz)); exec->vtx.attrsz[i] = 0; assert(i < ARRAY_SIZE(exec->vtx.attrtype)); exec->vtx.attrtype[i] = GL_FLOAT; assert(i < ARRAY_SIZE(exec->vtx.active_sz)); exec->vtx.active_sz[i] = 0; } exec->vtx.vertex_size = 0; exec->begin_vertices_flags = FLUSH_UPDATE_CURRENT; }
void vbo_exec_vtx_init( struct vbo_exec_context *exec ) { struct gl_context *ctx = exec->ctx; struct vbo_context *vbo = vbo_context(ctx); GLuint i; /* Allocate a buffer object. Will just reuse this object * continuously, unless vbo_use_buffer_objects() is called to enable * use of real VBOs. */ _mesa_reference_buffer_object(ctx, &exec->vtx.bufferobj, ctx->Shared->NullBufferObj); ASSERT(!exec->vtx.buffer_map); exec->vtx.buffer_map = (GLfloat *)_mesa_align_malloc(VBO_VERT_BUFFER_SIZE, 64); exec->vtx.buffer_ptr = exec->vtx.buffer_map; vbo_exec_vtxfmt_init( exec ); /* Hook our functions into the dispatch table. */ _mesa_install_exec_vtxfmt( ctx, &exec->vtxfmt ); for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { ASSERT(i < Elements(exec->vtx.attrsz)); exec->vtx.attrsz[i] = 0; ASSERT(i < Elements(exec->vtx.active_sz)); exec->vtx.active_sz[i] = 0; } for (i = 0 ; i < VERT_ATTRIB_MAX; i++) { ASSERT(i < Elements(exec->vtx.inputs)); ASSERT(i < Elements(exec->vtx.arrays)); exec->vtx.inputs[i] = &exec->vtx.arrays[i]; } { struct gl_client_array *arrays = exec->vtx.arrays; unsigned i; memcpy(arrays, vbo->legacy_currval, 16 * sizeof(arrays[0])); memcpy(arrays + 16, vbo->generic_currval, 16 * sizeof(arrays[0])); for (i = 0; i < 16; ++i) { arrays[i ].BufferObj = NULL; arrays[i + 16].BufferObj = NULL; _mesa_reference_buffer_object(ctx, &arrays[i ].BufferObj, vbo->legacy_currval[i].BufferObj); _mesa_reference_buffer_object(ctx, &arrays[i + 16].BufferObj, vbo->generic_currval[i].BufferObj); } } exec->vtx.vertex_size = 0; exec->begin_vertices_flags = FLUSH_UPDATE_CURRENT; }
/** * Initialize GLvector objects and allocate storage. * \param v the vector object * \param flags bitwise-OR of VEC_* flags * \param count number of elements to allocate in vector * \param alignment desired memory alignment for the data (in bytes) */ void _mesa_vector4f_alloc( GLvector4f *v, GLbitfield flags, GLuint count, GLuint alignment ) { v->stride = 4 * sizeof(GLfloat); v->size = 2; v->storage = _mesa_align_malloc( count * 4 * sizeof(GLfloat), alignment ); v->storage_count = count; v->start = (GLfloat *) v->storage; v->data = (GLfloat (*)[4]) v->storage; v->count = 0; v->flags = size_bits[4] | flags | VEC_MALLOC; }
/** * Reallocate memory, with alignment. */ void * _mesa_align_realloc(void *oldBuffer, size_t oldSize, size_t newSize, unsigned long alignment) { #if defined(_WIN32) && defined(_MSC_VER) (void) oldSize; return _aligned_realloc(oldBuffer, newSize, alignment); #else const size_t copySize = (oldSize < newSize) ? oldSize : newSize; void *newBuf = _mesa_align_malloc(newSize, alignment); if (newBuf && oldBuffer && copySize > 0) { memcpy(newBuf, oldBuffer, copySize); } if (oldBuffer) _mesa_align_free(oldBuffer); return newBuf; #endif }
/** * Same as _mesa_align_malloc(), but using calloc(1, ) instead of * malloc() */ void * _mesa_align_calloc(size_t bytes, unsigned long alignment) { #if defined(HAVE_POSIX_MEMALIGN) void *mem; mem = _mesa_align_malloc(bytes, alignment); if (mem != NULL) { (void) memset(mem, 0, bytes); } return mem; #elif defined(_WIN32) && defined(_MSC_VER) void *mem; mem = _aligned_malloc(bytes, alignment); if (mem != NULL) { (void) memset(mem, 0, bytes); } return mem; #else uintptr_t ptr, buf; ASSERT( alignment > 0 ); ptr = (uintptr_t) calloc(1, bytes + alignment + sizeof(void *)); if (!ptr) return NULL; buf = (ptr + alignment + sizeof(void *)) & ~(uintptr_t)(alignment - 1); *(uintptr_t *)(buf - sizeof(void *)) = ptr; #ifdef DEBUG /* mark the non-aligned area */ while ( ptr < buf - sizeof(void *) ) { *(unsigned long *)ptr = 0xcdcdcdcd; ptr += sizeof(unsigned long); } #endif return (void *)buf; #endif /* defined(HAVE_POSIX_MEMALIGN) */ }
/** * Called the first time stage->run is called. In effect, don't * allocate data until the first time the stage is run. */ static GLboolean init_vp(struct gl_context *ctx, struct tnl_pipeline_stage *stage) { TNLcontext *tnl = TNL_CONTEXT(ctx); struct vertex_buffer *VB = &(tnl->vb); struct vp_stage_data *store; const GLuint size = VB->Size; stage->privatePtr = calloc(1, sizeof(*store)); store = VP_STAGE_DATA(stage); if (!store) return GL_FALSE; /* a few other misc allocations */ _mesa_vector4f_alloc( &store->ndcCoords, 0, size, 32 ); store->clipmask = _mesa_align_malloc(sizeof(GLubyte)*size, 32 ); return GL_TRUE; }
/** * Called via ctx->Driver.AllocTextureImageBuffer() */ GLboolean _swrast_alloc_texture_image_buffer(struct gl_context *ctx, struct gl_texture_image *texImage, gl_format format, GLsizei width, GLsizei height, GLsizei depth) { struct swrast_texture_image *swImg = swrast_texture_image(texImage); GLuint bytes = _mesa_format_image_size(format, width, height, depth); GLuint i; /* This _should_ be true (revisit if these ever fail) */ assert(texImage->Width == width); assert(texImage->Height == height); assert(texImage->Depth == depth); assert(!swImg->Buffer); swImg->Buffer = _mesa_align_malloc(bytes, 512); if (!swImg->Buffer) return GL_FALSE; /* RowStride and ImageOffsets[] describe how to address texels in 'Data' */ swImg->RowStride = width; /* Allocate the ImageOffsets array and initialize to typical values. * We allocate the array for 1D/2D textures too in order to avoid special- * case code in the texstore routines. */ swImg->ImageOffsets = (GLuint *) malloc(depth * sizeof(GLuint)); if (!swImg->ImageOffsets) return GL_FALSE; for (i = 0; i < depth; i++) { swImg->ImageOffsets[i] = i * width * height; } _swrast_init_texture_image(texImage, width, height, depth); return GL_TRUE; }
void vbo_exec_vtx_init( struct vbo_exec_context *exec ) { struct gl_context *ctx = exec->ctx; struct vbo_context *vbo = vbo_context(ctx); GLuint i; /* Allocate a buffer object. Will just reuse this object * continuously, unless vbo_use_buffer_objects() is called to enable * use of real VBOs. */ _mesa_reference_buffer_object(ctx, &exec->vtx.bufferobj, ctx->Shared->NullBufferObj); assert(!exec->vtx.buffer_map); exec->vtx.buffer_map = _mesa_align_malloc(VBO_VERT_BUFFER_SIZE, 64); exec->vtx.buffer_ptr = exec->vtx.buffer_map; vbo_exec_vtxfmt_init( exec ); _mesa_noop_vtxfmt_init(&exec->vtxfmt_noop); exec->vtx.enabled = 0; for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { assert(i < ARRAY_SIZE(exec->vtx.attrsz)); exec->vtx.attrsz[i] = 0; assert(i < ARRAY_SIZE(exec->vtx.attrtype)); exec->vtx.attrtype[i] = GL_FLOAT; assert(i < ARRAY_SIZE(exec->vtx.active_sz)); exec->vtx.active_sz[i] = 0; } for (i = 0 ; i < VERT_ATTRIB_MAX; i++) { assert(i < ARRAY_SIZE(exec->vtx.inputs)); assert(i < ARRAY_SIZE(exec->vtx.arrays)); exec->vtx.inputs[i] = &exec->vtx.arrays[i]; } { struct gl_client_array *arrays = exec->vtx.arrays; unsigned i; memcpy(arrays, &vbo->currval[VBO_ATTRIB_POS], VERT_ATTRIB_FF_MAX * sizeof(arrays[0])); for (i = 0; i < VERT_ATTRIB_FF_MAX; ++i) { struct gl_client_array *array; array = &arrays[VERT_ATTRIB_FF(i)]; array->BufferObj = NULL; _mesa_reference_buffer_object(ctx, &array->BufferObj, vbo->currval[VBO_ATTRIB_POS+i].BufferObj); } memcpy(arrays + VERT_ATTRIB_GENERIC(0), &vbo->currval[VBO_ATTRIB_GENERIC0], VERT_ATTRIB_GENERIC_MAX * sizeof(arrays[0])); for (i = 0; i < VERT_ATTRIB_GENERIC_MAX; ++i) { struct gl_client_array *array; array = &arrays[VERT_ATTRIB_GENERIC(i)]; array->BufferObj = NULL; _mesa_reference_buffer_object(ctx, &array->BufferObj, vbo->currval[VBO_ATTRIB_GENERIC0+i].BufferObj); } } exec->vtx.vertex_size = 0; exec->begin_vertices_flags = FLUSH_UPDATE_CURRENT; }
static int test_norm_function( normal_func func, int mtype, long *cycles ) { GLvector4f source[1], dest[1], dest2[1], ref[1], ref2[1]; GLmatrix mat[1]; GLfloat s[TEST_COUNT][5], d[TEST_COUNT][4], r[TEST_COUNT][4]; GLfloat d2[TEST_COUNT][4], r2[TEST_COUNT][4], length[TEST_COUNT]; GLfloat scale; GLfloat *m; int i, j; #ifdef RUN_DEBUG_BENCHMARK int cycle_i; /* the counter for the benchmarks we run */ #endif (void) cycles; mat->m = (GLfloat *) _mesa_align_malloc( 16 * sizeof(GLfloat), 16 ); mat->inv = m = mat->m; init_matrix( m ); scale = 1.0F + rnd () * norm_scale_types[mtype]; for ( i = 0 ; i < 4 ; i++ ) { for ( j = 0 ; j < 4 ; j++ ) { switch ( norm_templates[mtype][i * 4 + j] ) { case NIL: m[j * 4 + i] = 0.0; break; case ONE: m[j * 4 + i] = 1.0; break; case NEG: m[j * 4 + i] = -1.0; break; case VAR: break; default: exit(1); } } } for ( i = 0 ; i < TEST_COUNT ; i++ ) { ASSIGN_3V( d[i], 0.0, 0.0, 0.0 ); ASSIGN_3V( s[i], 0.0, 0.0, 0.0 ); ASSIGN_3V( d2[i], 0.0, 0.0, 0.0 ); for ( j = 0 ; j < 3 ; j++ ) s[i][j] = rnd(); length[i] = 1 / SQRTF( LEN_SQUARED_3FV( s[i] ) ); } source->data = (GLfloat(*)[4]) s; source->start = (GLfloat *) s; source->count = TEST_COUNT; source->stride = sizeof(s[0]); source->flags = 0; dest->data = d; dest->start = (GLfloat *) d; dest->count = TEST_COUNT; dest->stride = sizeof(float[4]); dest->flags = 0; dest2->data = d2; dest2->start = (GLfloat *) d2; dest2->count = TEST_COUNT; dest2->stride = sizeof(float[4]); dest2->flags = 0; ref->data = r; ref->start = (GLfloat *) r; ref->count = TEST_COUNT; ref->stride = sizeof(float[4]); ref->flags = 0; ref2->data = r2; ref2->start = (GLfloat *) r2; ref2->count = TEST_COUNT; ref2->stride = sizeof(float[4]); ref2->flags = 0; if ( norm_normalize_types[mtype] == 0 ) { ref_norm_transform_rescale( mat, scale, source, NULL, ref ); } else { ref_norm_transform_normalize( mat, scale, source, NULL, ref ); ref_norm_transform_normalize( mat, scale, source, length, ref2 ); } if ( mesa_profile ) { BEGIN_RACE( *cycles ); func( mat, scale, source, NULL, dest ); END_RACE( *cycles ); func( mat, scale, source, length, dest2 ); } else { func( mat, scale, source, NULL, dest ); func( mat, scale, source, length, dest2 ); } for ( i = 0 ; i < TEST_COUNT ; i++ ) { for ( j = 0 ; j < 3 ; j++ ) { if ( significand_match( d[i][j], r[i][j] ) < REQUIRED_PRECISION ) { printf( "-----------------------------\n" ); printf( "(i = %i, j = %i)\n", i, j ); printf( "%f \t %f \t [ratio = %e - %i bit missed]\n", d[i][0], r[i][0], r[i][0]/d[i][0], MAX_PRECISION - significand_match( d[i][0], r[i][0] ) ); printf( "%f \t %f \t [ratio = %e - %i bit missed]\n", d[i][1], r[i][1], r[i][1]/d[i][1], MAX_PRECISION - significand_match( d[i][1], r[i][1] ) ); printf( "%f \t %f \t [ratio = %e - %i bit missed]\n", d[i][2], r[i][2], r[i][2]/d[i][2], MAX_PRECISION - significand_match( d[i][2], r[i][2] ) ); return 0; } if ( norm_normalize_types[mtype] != 0 ) { if ( significand_match( d2[i][j], r2[i][j] ) < REQUIRED_PRECISION ) { printf( "------------------- precalculated length case ------\n" ); printf( "(i = %i, j = %i)\n", i, j ); printf( "%f \t %f \t [ratio = %e - %i bit missed]\n", d2[i][0], r2[i][0], r2[i][0]/d2[i][0], MAX_PRECISION - significand_match( d2[i][0], r2[i][0] ) ); printf( "%f \t %f \t [ratio = %e - %i bit missed]\n", d2[i][1], r2[i][1], r2[i][1]/d2[i][1], MAX_PRECISION - significand_match( d2[i][1], r2[i][1] ) ); printf( "%f \t %f \t [ratio = %e - %i bit missed]\n", d2[i][2], r2[i][2], r2[i][2]/d2[i][2], MAX_PRECISION - significand_match( d2[i][2], r2[i][2] ) ); return 0; } } } } _mesa_align_free( mat->m ); return 1; }
static int test_transform_function( transform_func func, int psize, int mtype, unsigned long *cycles ) { GLvector4f source[1], dest[1], ref[1]; GLmatrix mat[1]; GLfloat *m; int i, j; #ifdef RUN_DEBUG_BENCHMARK int cycle_i; /* the counter for the benchmarks we run */ #endif (void) cycles; if ( psize > 4 ) { _mesa_problem( NULL, "test_transform_function called with psize > 4\n" ); return 0; } mat->m = (GLfloat *) _mesa_align_malloc( 16 * sizeof(GLfloat), 16 ); mat->type = mtypes[mtype]; m = mat->m; ASSERT( ((long)m & 15) == 0 ); init_matrix( m ); for ( i = 0 ; i < 4 ; i++ ) { for ( j = 0 ; j < 4 ; j++ ) { switch ( templates[mtype][i * 4 + j] ) { case NIL: m[j * 4 + i] = 0.0; break; case ONE: m[j * 4 + i] = 1.0; break; case NEG: m[j * 4 + i] = -1.0; break; case VAR: break; default: ASSERT(0); return 0; } } } for ( i = 0 ; i < TEST_COUNT ; i++) { ASSIGN_4V( d[i], 0.0, 0.0, 0.0, 1.0 ); ASSIGN_4V( s[i], 0.0, 0.0, 0.0, 1.0 ); for ( j = 0 ; j < psize ; j++ ) s[i][j] = rnd(); } source->data = (GLfloat(*)[4])s; source->start = (GLfloat *)s; source->count = TEST_COUNT; source->stride = sizeof(s[0]); source->size = 4; source->flags = 0; dest->data = (GLfloat(*)[4])d; dest->start = (GLfloat *)d; dest->count = TEST_COUNT; dest->stride = sizeof(float[4]); dest->size = 0; dest->flags = 0; ref->data = (GLfloat(*)[4])r; ref->start = (GLfloat *)r; ref->count = TEST_COUNT; ref->stride = sizeof(float[4]); ref->size = 0; ref->flags = 0; ref_transform( ref, mat, source ); if ( mesa_profile ) { BEGIN_RACE( *cycles ); func( dest, mat->m, source ); END_RACE( *cycles ); } else { func( dest, mat->m, source ); } for ( i = 0 ; i < TEST_COUNT ; i++ ) { for ( j = 0 ; j < 4 ; j++ ) { if ( significand_match( d[i][j], r[i][j] ) < REQUIRED_PRECISION ) { printf("-----------------------------\n" ); printf("(i = %i, j = %i)\n", i, j ); printf("%f \t %f \t [diff = %e - %i bit missed]\n", d[i][0], r[i][0], r[i][0]-d[i][0], MAX_PRECISION - significand_match( d[i][0], r[i][0] ) ); printf("%f \t %f \t [diff = %e - %i bit missed]\n", d[i][1], r[i][1], r[i][1]-d[i][1], MAX_PRECISION - significand_match( d[i][1], r[i][1] ) ); printf("%f \t %f \t [diff = %e - %i bit missed]\n", d[i][2], r[i][2], r[i][2]-d[i][2], MAX_PRECISION - significand_match( d[i][2], r[i][2] ) ); printf("%f \t %f \t [diff = %e - %i bit missed]\n", d[i][3], r[i][3], r[i][3]-d[i][3], MAX_PRECISION - significand_match( d[i][3], r[i][3] ) ); return 0; } } } _mesa_align_free( mat->m ); return 1; }
/* Create the device specific context. */ GLboolean mach64CreateContext( const __GLcontextModes *glVisual, __DRIcontext *driContextPriv, void *sharedContextPrivate ) { GLcontext *ctx, *shareCtx; __DRIscreen *driScreen = driContextPriv->driScreenPriv; struct dd_function_table functions; mach64ContextPtr mmesa; mach64ScreenPtr mach64Screen; int i, heap; GLuint *c_textureSwapsPtr = NULL; #if DO_DEBUG MACH64_DEBUG = driParseDebugString(getenv("MACH64_DEBUG"), debug_control); #endif /* Allocate the mach64 context */ mmesa = (mach64ContextPtr) CALLOC( sizeof(*mmesa) ); if ( !mmesa ) return GL_FALSE; /* Init default driver functions then plug in our Mach64-specific functions * (the texture functions are especially important) */ _mesa_init_driver_functions( &functions ); mach64InitDriverFuncs( &functions ); mach64InitIoctlFuncs( &functions ); mach64InitTextureFuncs( &functions ); /* Allocate the Mesa context */ if (sharedContextPrivate) shareCtx = ((mach64ContextPtr) sharedContextPrivate)->glCtx; else shareCtx = NULL; mmesa->glCtx = _mesa_create_context(glVisual, shareCtx, &functions, (void *)mmesa); if (!mmesa->glCtx) { FREE(mmesa); return GL_FALSE; } driContextPriv->driverPrivate = mmesa; ctx = mmesa->glCtx; mmesa->driContext = driContextPriv; mmesa->driScreen = driScreen; mmesa->driDrawable = NULL; mmesa->hHWContext = driContextPriv->hHWContext; mmesa->driHwLock = &driScreen->pSAREA->lock; mmesa->driFd = driScreen->fd; mach64Screen = mmesa->mach64Screen = (mach64ScreenPtr)driScreen->private; /* Parse configuration files */ driParseConfigFiles (&mmesa->optionCache, &mach64Screen->optionCache, mach64Screen->driScreen->myNum, "mach64"); mmesa->sarea = (drm_mach64_sarea_t *)((char *)driScreen->pSAREA + sizeof(drm_sarea_t)); mmesa->CurrentTexObj[0] = NULL; mmesa->CurrentTexObj[1] = NULL; (void) memset( mmesa->texture_heaps, 0, sizeof( mmesa->texture_heaps ) ); make_empty_list( &mmesa->swapped ); mmesa->firstTexHeap = mach64Screen->firstTexHeap; mmesa->lastTexHeap = mach64Screen->firstTexHeap + mach64Screen->numTexHeaps; for ( i = mmesa->firstTexHeap ; i < mmesa->lastTexHeap ; i++ ) { mmesa->texture_heaps[i] = driCreateTextureHeap( i, mmesa, mach64Screen->texSize[i], 6, /* align to 64-byte boundary, use 12 for page-size boundary */ MACH64_NR_TEX_REGIONS, (drmTextureRegionPtr)mmesa->sarea->tex_list[i], &mmesa->sarea->tex_age[i], &mmesa->swapped, sizeof( mach64TexObj ), (destroy_texture_object_t *) mach64DestroyTexObj ); #if ENABLE_PERF_BOXES c_textureSwapsPtr = & mmesa->c_textureSwaps; #endif driSetTextureSwapCounterLocation( mmesa->texture_heaps[i], c_textureSwapsPtr ); } mmesa->RenderIndex = -1; /* Impossible value */ mmesa->vert_buf = NULL; mmesa->num_verts = 0; mmesa->new_state = MACH64_NEW_ALL; mmesa->dirty = MACH64_UPLOAD_ALL; /* Set the maximum texture size small enough that we can * guarentee that both texture units can bind a maximal texture * and have them both in memory (on-card or AGP) at once. * Test for 2 textures * bytes/texel * size * size. There's no * need to account for mipmaps since we only upload one level. */ ctx->Const.MaxTextureUnits = 2; ctx->Const.MaxTextureImageUnits = 2; ctx->Const.MaxTextureCoordUnits = 2; ctx->Const.MaxDrawBuffers = 1; heap = mach64Screen->IsPCI ? MACH64_CARD_HEAP : MACH64_AGP_HEAP; driCalculateMaxTextureLevels( & mmesa->texture_heaps[heap], 1, & ctx->Const, mach64Screen->cpp, 10, /* max 2D texture size is 1024x1024 */ 0, /* 3D textures unsupported. */ 0, /* cube textures unsupported. */ 0, /* texture rectangles unsupported. */ 1, /* mipmapping unsupported. */ GL_TRUE, /* need to have both textures in either local or AGP memory */ 0 ); #if ENABLE_PERF_BOXES mmesa->boxes = ( getenv( "LIBGL_PERFORMANCE_BOXES" ) != NULL ); #endif /* Allocate the vertex buffer */ mmesa->vert_buf = _mesa_align_malloc(MACH64_BUFFER_SIZE, 32); if ( !mmesa->vert_buf ) return GL_FALSE; mmesa->vert_used = 0; mmesa->vert_total = MACH64_BUFFER_SIZE; /* Initialize the software rasterizer and helper modules. */ _swrast_CreateContext( ctx ); _vbo_CreateContext( ctx ); _tnl_CreateContext( ctx ); _swsetup_CreateContext( ctx ); /* Install the customized pipeline: */ /* _tnl_destroy_pipeline( ctx ); */ /* _tnl_install_pipeline( ctx, mach64_pipeline ); */ /* Configure swrast and T&L to match hardware characteristics: */ _swrast_allow_pixel_fog( ctx, GL_FALSE ); _swrast_allow_vertex_fog( ctx, GL_TRUE ); _tnl_allow_pixel_fog( ctx, GL_FALSE ); _tnl_allow_vertex_fog( ctx, GL_TRUE ); driInitExtensions( ctx, card_extensions, GL_TRUE ); mach64InitVB( ctx ); mach64InitTriFuncs( ctx ); mach64DDInitStateFuncs( ctx ); mach64DDInitSpanFuncs( ctx ); mach64DDInitState( mmesa ); mmesa->do_irqs = (mmesa->mach64Screen->irq && !getenv("MACH64_NO_IRQS")); driContextPriv->driverPrivate = (void *)mmesa; if (driQueryOptionb(&mmesa->optionCache, "no_rast")) { fprintf(stderr, "disabling 3D acceleration\n"); FALLBACK(mmesa, MACH64_FALLBACK_DISABLE, 1); } return GL_TRUE; }