static void __setDefaults( void ) { crMemZero(pack_spu.context, CR_MAX_CONTEXTS * sizeof(ContextInfo)); pack_spu.numContexts = 0; crMemZero(pack_spu.thread, MAX_THREADS * sizeof(ThreadInfo)); pack_spu.numThreads = 0; }
static SPUFunctions *feedbackSPUInit( int id, SPU *child, SPU *self, unsigned int context_id, unsigned int num_contexts ) { (void) context_id; (void) num_contexts; #ifdef CHROMIUM_THREADSAFE crInitMutex(&feedback_spu.mutex); #endif feedback_spu.id = id; feedback_spu.has_child = 0; if (child) { crSPUInitDispatchTable( &(feedback_spu.child) ); crSPUCopyDispatchTable( &(feedback_spu.child), &(child->dispatch_table) ); feedback_spu.has_child = 1; } crSPUInitDispatchTable( &(feedback_spu.super) ); crSPUCopyDispatchTable( &(feedback_spu.super), &(self->superSPU->dispatch_table) ); feedbackspuGatherConfiguration(); /* create/init default state tracker */ crStateInit(); feedback_spu.defaultctx = crStateCreateContext(NULL, 0, NULL); crStateSetCurrent(feedback_spu.defaultctx); feedback_spu.numContexts = 0; crMemZero(feedback_spu.context, CR_MAX_CONTEXTS * sizeof(ContextInfo)); return &feedback_functions; }
static void InitDispatchTable(SPUDispatchTable *t) { crMemZero(t, sizeof(*t)); crSPUInitDispatchNops(t); /* drm1 */ t->MatrixMode = (MatrixModeFunc_t)MatrixMode; t->LoadIdentity = (LoadIdentityFunc_t)LoadIdentity; t->LoadMatrixf = (LoadMatrixfFunc_t)LoadMatrixf; t->LoadMatrixd = (LoadMatrixdFunc_t)LoadMatrixd; t->PushMatrix = (PushMatrixFunc_t)PushMatrix; t->PopMatrix = (PopMatrixFunc_t)PopMatrix; t->MultMatrixf = (MultMatrixfFunc_t)MultMatrixf; t->MultMatrixd = (MultMatrixdFunc_t)MultMatrixd; t->Rotatef = (RotatefFunc_t)Rotatef; t->Rotated = (RotatedFunc_t)Rotated; t->Translatef = (TranslatefFunc_t)Translatef; t->Translated = (TranslatedFunc_t)Translated; t->Scalef = (ScalefFunc_t)Scalef; t->Scaled = (ScaledFunc_t)Scaled; t->Vertex2f = (Vertex2fFunc_t)Vertex2f; t->Vertex2fv = (Vertex2fvFunc_t)Vertex2fv; t->Vertex3f = (Vertex3fFunc_t)Vertex3f; t->Vertex3fv = (Vertex3fvFunc_t)Vertex3fv; t->Vertex4f = (Vertex4fFunc_t)Vertex4f; t->Vertex4fv = (Vertex4fvFunc_t)Vertex4fv; t->VertexAttrib3fARB = (VertexAttrib3fARBFunc_t)VertexAttrib3fARB; }
static SPUFunctions * zpixSPUInit(int id, SPU * child, SPU * self, unsigned int context_id, unsigned int num_contexts) { int i = 0; (void) self; (void) context_id; (void) num_contexts; self->privatePtr = (void *) &zpix_spu; crMemZero(&zpix_spu, sizeof(zpix_spu)); zpix_spu.id = id; zpix_spu.has_child = 0; zpix_spu.server = NULL; if (child) { crSPUInitDispatchTable(&(zpix_spu.child)); crSPUCopyDispatchTable(&(zpix_spu.child), &(child->dispatch_table)); zpix_spu.has_child = 1; } crSPUInitDispatchTable(&(zpix_spu.super)); crSPUCopyDispatchTable(&(zpix_spu.super), &(self->superSPU->dispatch_table)); zpixspuGatherConfiguration(&zpix_spu); /* non-zero instance initialization values */ crDebug("Zpix SPU - verbose = %d", zpix_spu.verbose); zpix_spu.rXold = -1; zpix_spu.rYold = -1; /* set up shadow buffers */ for (i = 0; i < FBNUM; i++) { zpix_spu.b.fbWidth[i] = -1; zpix_spu.b.fbHeight[i] = -1; zpix_spu.b.fbLen[i] = 0; zpix_spu.b.fBuf[i] = NULL; zpix_spu.b.dBuf[i] = NULL; zpix_spu.zBuf[i] = NULL; } /* allocate some initial server shadows */ /*XXX this is just a convenience since storage amount is trivial */ #define DEFAULT_NUMBER_SERVER_SHADOWS 8 zpix_spu.n_sb = DEFAULT_NUMBER_SERVER_SHADOWS - 1; zpix_spu.sb = (SBUFS *) crAlloc((zpix_spu.n_sb + 1) * sizeof(SBUFS)); /* set highest valid index */ for (i = 0; i < zpix_spu.n_sb; i++) { zpix_spu.sb[i] = zpix_spu.b; } return &zpix_functions; }
void stubDestroyContext( unsigned long contextId ) { ContextInfo *context; context = (ContextInfo *) crHashtableSearch(stub.contextTable, contextId); CRASSERT(context); if (context->type == NATIVE) { #ifdef WINDOWS stub.wsInterface.wglDeleteContext( context->hglrc ); #elif defined(Darwin) stub.wsInterface.CGLDestroyContext( context->cglc ); #elif defined(GLX) stub.wsInterface.glXDestroyContext( context->dpy, context->glxContext ); #endif } else if (context->type == CHROMIUM) { /* Have pack SPU or tilesort SPU, etc. destroy the context */ CRASSERT(context->spuContext >= 0); stub.spu->dispatch_table.DestroyContext( context->spuContext ); } if (stub.currentContext == context) { stub.currentContext = NULL; } crMemZero(context, sizeof(ContextInfo)); /* just to be safe */ crHashtableDelete(stub.contextTable, contextId, crFree); }
/* * Release the buffer currently attached to the context. * Update/resync data structures. */ void crPackReleaseBuffer( CRPackContext *pc ) { CRPackBuffer *buf; CRASSERT( pc ); if (!pc->currentBuffer) { crWarning("crPackReleaseBuffer called with no current buffer"); return; /* nothing to do */ } CRASSERT( pc->currentBuffer->context == pc ); /* buffer to release */ buf = pc->currentBuffer; /* copy context's fields back into the buffer to update it */ *buf = pc->buffer; /* struct copy */ /* unbind buffer from context */ buf->context = NULL; pc->currentBuffer = NULL; /* zero-out context's packing fields just to be safe */ crMemZero(&(pc->buffer), sizeof(pc->buffer)); /* update the debugging fields */ if (pc->file) crFree(pc->file); pc->file = NULL; pc->line = -1; }
void REPLICATESPU_APIENTRY replicatespu_DestroyContext( GLint ctx ) { unsigned int i; ContextInfo *context = (ContextInfo *) crHashtableSearch(replicate_spu.contextTable, ctx); GET_THREAD(thread); if (!context) { crWarning("Replicate SPU: DestroyContext, bad context %d", ctx); return; } CRASSERT(thread); replicatespuFlushAll( (void *)thread ); for (i = 0; i < CR_MAX_REPLICANTS; i++) { if (!IS_CONNECTED(replicate_spu.rserver[i].conn)) continue; if (replicate_spu.swap) crPackDestroyContextSWAP( context->rserverCtx[i] ); else crPackDestroyContext( context->rserverCtx[i] ); replicatespuFlushOne(thread, i); } crStateDestroyContext( context->State ); /* Although we only allocate a display list manager once, * we free it every time; this is okay since the DLM itself * will track its uses and will only release the resources * when the last user has relinquished it. */ crDLMFreeDLM(context->displayListManager); crDLMFreeContext(context->dlmState); if (thread->currentContext == context) { thread->currentContext = NULL; crStateMakeCurrent( NULL ); crDLMSetCurrentState(NULL); } /* zero, just to be safe */ crMemZero(context, sizeof(ContextInfo)); /* Delete from both the context table, and the context list. */ crHashtableDelete(replicate_spu.contextTable, ctx, crFree); { CRListIterator *foundElement = crListFind(replicate_spu.contextList, (void *)ctx, CompareIntegers); if (foundElement != NULL) { crListErase(replicate_spu.contextList, foundElement); } } }
/* * Allocate the state (dirty) bits data structures. * This should be called before we create any contexts. * We'll also create the default/NULL context at this time and make * it the current context by default. This means that if someone * tries to set GL state before calling MakeCurrent() they'll be * modifying the default state object, and not segfaulting on a NULL * pointer somewhere. */ void crStateInit(void) { unsigned int i; /* Purely initialize the context bits */ if (!__currentBits) { __currentBits = (CRStateBits *) crCalloc( sizeof(CRStateBits) ); crStateClientInitBits( &(__currentBits->client) ); crStateLightingInitBits( &(__currentBits->lighting) ); } else crWarning("State tracker is being re-initialized..\n"); for (i=0;i<CR_MAX_CONTEXTS;i++) g_availableContexts[i] = 0; #ifdef CHROMIUM_THREADSAFE if (!__isContextTLSInited) { # ifndef RT_OS_WINDOWS /* tls destructor is implemented for all platforms except windows*/ crInitTSDF(&__contextTSD, crStateThreadTlsDtor); # else /* windows should do cleanup via DllMain THREAD_DETACH notification */ crInitTSD(&__contextTSD); # endif __isContextTLSInited = 1; } #endif if (defaultContext) { /* Free the default/NULL context. * Ensures context bits are reset */ #ifdef CHROMIUM_THREADSAFE SetCurrentContext(NULL); VBoxTlsRefRelease(defaultContext); #else crStateFreeContext(defaultContext); __currentContext = NULL; #endif } /* Reset diff_api */ crMemZero(&diff_api, sizeof(SPUDispatchTable)); /* Allocate the default/NULL context */ defaultContext = crStateCreateContextId(0, NULL, CR_RGB_BIT, NULL); CRASSERT(g_availableContexts[0] == 0); g_availableContexts[0] = 1; /* in use forever */ #ifdef CHROMIUM_THREADSAFE SetCurrentContext(defaultContext); #else __currentContext = defaultContext; #endif }
static SPUFunctions * readbackSPUInit( int id, SPU *child, SPU *self, unsigned int context_id, unsigned int num_contexts ) { WindowInfo *window; (void) context_id; (void) num_contexts; #ifdef CHROMIUM_THREADSAFE crDebug("Readback SPU: thread-safe"); #endif crMemZero(&readback_spu, sizeof(readback_spu)); readback_spu.id = id; readback_spu.has_child = 0; if (child) { crSPUInitDispatchTable( &(readback_spu.child) ); crSPUCopyDispatchTable( &(readback_spu.child), &(child->dispatch_table) ); readback_spu.has_child = 1; } else { /* don't override any API functions - use the Render SPU functions */ static SPUNamedFunctionTable empty_table[] = { { NULL, NULL } }; readback_functions.table = empty_table; } crSPUInitDispatchTable( &(readback_spu.super) ); crSPUCopyDispatchTable( &(readback_spu.super), &(self->superSPU->dispatch_table) ); readbackspuGatherConfiguration( &readback_spu ); readback_spu.contextTable = crAllocHashtable(); readback_spu.windowTable = crAllocHashtable(); /* create my default window (window number 0) */ window = (WindowInfo *) crCalloc(sizeof(WindowInfo)); CRASSERT(window); window->index = 0; window->renderWindow = 0; /* default render SPU window */ window->childWindow = 0; /* default child SPU window */ readbackspuTweakVisBits(readback_spu.default_visual, &window->childVisBits, &window->superVisBits); crHashtableAdd(readback_spu.windowTable, 0, window); readback_spu.gather_conn = NULL; return &readback_functions; }
/* * Allocate the state (dirty) bits data structures. * This should be called before we create any contexts. * We'll also create the default/NULL context at this time and make * it the current context by default. This means that if someone * tries to set GL state before calling MakeCurrent() they'll be * modifying the default state object, and not segfaulting on a NULL * pointer somewhere. */ void crStateInit(void) { unsigned int i; /* Purely initialize the context bits */ if (!__currentBits) { __currentBits = (CRStateBits *) crCalloc( sizeof(CRStateBits) ); crStateClientInitBits( &(__currentBits->client) ); crStateLightingInitBits( &(__currentBits->lighting) ); } else crWarning("State tracker is being re-initialized..\n"); for (i=0;i<CR_MAX_CONTEXTS;i++) g_availableContexts[i] = 0; if (defaultContext) { /* Free the default/NULL context. * Ensures context bits are reset */ crStateFreeContext(defaultContext); #ifdef CHROMIUM_THREADSAFE crSetTSD(&__contextTSD, NULL); #else __currentContext = NULL; #endif } /* Reset diff_api */ crMemZero(&diff_api, sizeof(SPUDispatchTable)); /* Allocate the default/NULL context */ defaultContext = crStateCreateContextId(0, NULL, CR_RGB_BIT, NULL); CRASSERT(g_availableContexts[0] == 0); g_availableContexts[0] = 1; /* in use forever */ #ifdef CHROMIUM_THREADSAFE crSetTSD(&__contextTSD, defaultContext); #else __currentContext = defaultContext; #endif }
static SPUFunctions *arraySPUInit( int id, SPU *child, SPU *self, unsigned int context_id, unsigned int num_contexts ) { (void) context_id; (void) num_contexts; #ifdef CHROMIUM_THREADSAFE crInitMutex(&_ArrayMutex); #endif array_spu.id = id; array_spu.has_child = 0; if (child) { crSPUInitDispatchTable( &(array_spu.child) ); crSPUCopyDispatchTable( &(array_spu.child), &(child->dispatch_table) ); array_spu.has_child = 1; } crSPUInitDispatchTable( &(array_spu.super) ); crSPUCopyDispatchTable( &(array_spu.super), &(self->superSPU->dispatch_table) ); arrayspuSetVBoxConfiguration(); crStateInit(); /*@todo seems default context ain't needed at all*/ array_spu.defaultctx = crStateCreateContext( NULL, 0, NULL ); #ifdef CR_ARB_vertex_buffer_object array_spu.defaultctx->bufferobject.retainBufferData = GL_TRUE; #endif /* we call SetCurrent instead of MakeCurrent as the differencer * isn't setup yet anyway */ crStateSetCurrent( array_spu.defaultctx ); array_spu.numContexts = 0; crMemZero(array_spu.context, CR_MAX_CONTEXTS * sizeof(ContextInfo)); return &array_functions; }
static SPUFunctions * tilesortSPUInit( int id, SPU *child, SPU *self, unsigned int context_id, unsigned int num_contexts ) { ThreadInfo *thread0 = &(tilesort_spu.thread[0]); (void) context_id; (void) num_contexts; (void) child; (void) self; #if DEBUG_FP_EXCEPTIONS { fpu_control_t mask; _FPU_GETCW(mask); mask &= ~(_FPU_MASK_IM | _FPU_MASK_DM | _FPU_MASK_ZM | _FPU_MASK_OM | _FPU_MASK_UM); _FPU_SETCW(mask); } #endif crRandSeed(id); crMemZero( &tilesort_spu, sizeof(TileSortSPU) ); #ifdef CHROMIUM_THREADSAFE crInitTSD(&_ThreadTSD); crSetTSD(&_ThreadTSD, thread0); crInitMutex(&_TileSortMutex); #endif thread0->state_server_index = -1; /* one-time init for thread */ tilesortspuInitEvaluators(); /* Init window, context hash tables */ tilesort_spu.windowTable = crAllocHashtable(); tilesort_spu.contextTable = crAllocHashtable(); tilesort_spu.listTable = crAllocHashtable(); tilesort_spu.id = id; tilesort_spu.glassesType = RED_BLUE; tilesortspuSetAnaglyphMask(&tilesort_spu); tilesortspuGatherConfiguration( child ); tilesortspuConnectToServers(); /* set up thread0's server connection */ tilesort_spu.geom_buffer_size = tilesort_spu.buffer_size; /* geom_buffer_mtu must fit in data's part of our buffers */ tilesort_spu.geom_buffer_mtu = crPackMaxData(tilesort_spu.buffer_size) /* 24 is the size of the bounds info packet * END_FLUFF is the size of data of the extra End opcode if needed * 4 since BoundsInfo opcode may take a whole 4 bytes * and 4 to let room for extra End's opcode, if needed */ - (24+END_FLUFF+4+4); /* the geometry must also fit in the mtu */ if (tilesort_spu.geom_buffer_mtu > tilesort_spu.MTU - sizeof(CRMessageOpcodes) - (24+END_FLUFF+4+4)) tilesort_spu.geom_buffer_mtu = tilesort_spu.MTU - sizeof(CRMessageOpcodes) - (24+END_FLUFF+4+4); tilesort_spu.swap = thread0->netServer[0].conn->swap; tilesortspuInitThreadPacking( thread0 ); tilesortspuCreateFunctions(); crStateInit(); tilesortspuCreateDiffAPI(); /* special dispatch tables for display lists */ if (tilesort_spu.listTrack || tilesort_spu.lazySendDLists) { crMemZero((void *)&tilesort_spu.packerDispatch, sizeof tilesort_spu.packerDispatch); crSPUInitDispatchTable(&tilesort_spu.packerDispatch); tilesortspuLoadPackTable(&tilesort_spu.packerDispatch); crSPUInitDispatchTable(&tilesort_spu.stateDispatch); tilesortspuLoadStateTable(&tilesort_spu.stateDispatch); } if (tilesort_spu.useDMX) { /* load OpenGL */ int n; crDebug("Tilesort SPU: Using DMX"); n = crLoadOpenGL( &tilesort_spu.ws, NULL); if (!n) { crWarning("Tilesort SPU: Unable to load OpenGL, disabling DMX"); tilesort_spu.useDMX = 0; } } else { crDebug("Tilesort SPU: Not using DMX"); } crDebug("Tilesort SPU: ---------- End of Init -------------"); return &tilesort_functions; }
int APIENTRY DrvDescribePixelFormat(HDC hdc, int iPixelFormat, UINT nBytes, LPPIXELFORMATDESCRIPTOR pfd) { CR_DDI_PROLOGUE(); if ( !pfd ) { return 2; } if ( nBytes != sizeof(*pfd) ) { crWarning( "DrvDescribePixelFormat: nBytes=%u?", nBytes ); return 2; } if (iPixelFormat==1) { crMemZero(pfd, sizeof(*pfd)); pfd->nSize = sizeof(*pfd); pfd->nVersion = 1; pfd->dwFlags = (PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER); pfd->dwFlags |= 0x8000; /* <- Needed for VSG Open Inventor to be happy */ pfd->iPixelType = PFD_TYPE_RGBA; pfd->cColorBits = 32; pfd->cRedBits = 8; pfd->cRedShift = 24; pfd->cGreenBits = 8; pfd->cGreenShift = 16; pfd->cBlueBits = 8; pfd->cBlueShift = 8; pfd->cAlphaBits = 8; pfd->cAlphaShift = 0; pfd->cAccumBits = 0; pfd->cAccumRedBits = 0; pfd->cAccumGreenBits = 0; pfd->cAccumBlueBits = 0; pfd->cAccumAlphaBits = 0; pfd->cDepthBits = 32; pfd->cStencilBits = 8; pfd->cAuxBuffers = 0; pfd->iLayerType = PFD_MAIN_PLANE; pfd->bReserved = 0; pfd->dwLayerMask = 0; pfd->dwVisibleMask = 0; pfd->dwDamageMask = 0; } else { crMemZero(pfd, sizeof(*pfd)); pfd->nVersion = 1; pfd->dwFlags = (PFD_DRAW_TO_WINDOW| PFD_SUPPORT_OPENGL); pfd->iPixelType = PFD_TYPE_RGBA; pfd->cColorBits = 32; pfd->cRedBits = 8; pfd->cRedShift = 16; pfd->cGreenBits = 8; pfd->cGreenShift = 8; pfd->cBlueBits = 8; pfd->cBlueShift = 0; pfd->cAlphaBits = 0; pfd->cAlphaShift = 0; pfd->cAccumBits = 64; pfd->cAccumRedBits = 16; pfd->cAccumGreenBits = 16; pfd->cAccumBlueBits = 16; pfd->cAccumAlphaBits = 0; pfd->cDepthBits = 16; pfd->cStencilBits = 8; pfd->cAuxBuffers = 0; pfd->iLayerType = PFD_MAIN_PLANE; pfd->bReserved = 0; pfd->dwLayerMask = 0; pfd->dwVisibleMask = 0; pfd->dwDamageMask = 0; } /* the max PFD index */ return 2; }
void PACKSPU_APIENTRY packspu_VBoxDetachThread() { int i; GET_THREAD(thread); if (thread) { crLockMutex(&_PackMutex); for (i=0; i<MAX_THREADS; ++i) { if (pack_spu.thread[i].inUse && thread==&pack_spu.thread[i] && thread->id==crThreadID() && thread->netServer.conn) { CRASSERT(pack_spu.numThreads>0); packspuFlush((void *) thread); if (pack_spu.thread[i].packer) { CR_LOCK_PACKER_CONTEXT(thread->packer); crPackSetContext(NULL); CR_UNLOCK_PACKER_CONTEXT(thread->packer); crPackDeleteContext(pack_spu.thread[i].packer); } crNetFreeConnection(pack_spu.thread[i].netServer.conn); pack_spu.numThreads--; /*note can't shift the array here, because other threads have TLS references to array elements*/ crMemZero(&pack_spu.thread[i], sizeof(ThreadInfo)); crSetTSD(&_PackTSD, NULL); if (i==pack_spu.idxThreadInUse) { for (i=0; i<MAX_THREADS; ++i) { if (pack_spu.thread[i].inUse) { pack_spu.idxThreadInUse=i; break; } } } break; } } for (i=0; i<CR_MAX_CONTEXTS; ++i) { ContextInfo *ctx = &pack_spu.context[i]; if (ctx->currentThread == thread) { CRASSERT(ctx->fAutoFlush); ctx->currentThread = NULL; } } crUnlockMutex(&_PackMutex); } crStateVBoxDetachThread(); }
static void setDefaults( void ) { crMemZero(replicate_spu.thread, MAX_THREADS * sizeof(ThreadInfo)); replicate_spu.numThreads = 0; }
/* * Examine the context's extension string and set the boolean extension * flags accordingly. This is to be called during context initialization. */ void crStateExtensionsInit( CRLimitsState *limits, CRExtensionState *extensions ) { /* init all booleans to false */ crMemZero(extensions, sizeof(CRExtensionState)); if (hasExtension((const char*)limits->extensions, "GL_ARB_depth_texture")) extensions->ARB_depth_texture = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_ARB_fragment_program")) extensions->ARB_fragment_program = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_ARB_imaging")) extensions->ARB_imaging = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_ARB_multisample")) extensions->ARB_multisample = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_ARB_multitexture")) extensions->ARB_multitexture = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_ARB_occlusion_query")) extensions->ARB_occlusion_query = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_ARB_point_parameters")) extensions->ARB_point_parameters = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_ARB_point_sprite")) extensions->ARB_point_sprite = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_ARB_shadow")) extensions->ARB_shadow = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_ARB_shadow_ambient")) extensions->ARB_shadow_ambient = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_ARB_texture_border_clamp") || hasExtension((const char*)limits->extensions, "GL_SGIS_texture_border_clamp")) extensions->ARB_texture_border_clamp = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_ARB_texture_compression")) extensions->ARB_texture_compression = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_ARB_texture_cube_map") || hasExtension((const char*)limits->extensions, "GL_EXT_texture_cube_map")) extensions->ARB_texture_cube_map = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_ARB_texture_env_add")) extensions->ARB_texture_env_add = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_ARB_texture_env_combine") || hasExtension((const char*)limits->extensions, "GL_EXT_texture_env_combine")) extensions->ARB_texture_env_combine = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_ARB_texture_env_crossbar")) extensions->ARB_texture_env_crossbar = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_ARB_texture_env_dot3") || hasExtension((const char*)limits->extensions, "GL_EXT_texture_env_dot3")) extensions->ARB_texture_env_dot3 = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_ARB_texture_mirrored_repeat")) extensions->ARB_texture_mirrored_repeat = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_ARB_texture_non_power_of_two")) extensions->ARB_texture_non_power_of_two = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_ARB_transpose_matrix")) extensions->ARB_transpose_matrix = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_ARB_vertex_buffer_object")) extensions->ARB_vertex_buffer_object = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_ARB_pixel_buffer_object")) extensions->ARB_pixel_buffer_object = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_ARB_vertex_program")) extensions->ARB_vertex_program = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_ARB_window_pos")) extensions->ARB_window_pos = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_EXT_blend_color")) extensions->EXT_blend_color= GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_EXT_blend_minmax")) extensions->EXT_blend_minmax = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_EXT_blend_func_separate")) extensions->EXT_blend_func_separate = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_EXT_blend_logic_op")) extensions->EXT_blend_logic_op = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_EXT_blend_subtract")) extensions->EXT_blend_subtract = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_EXT_clip_volume_hint")) extensions->EXT_clip_volume_hint = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_EXT_fog_coord")) extensions->EXT_fog_coord = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_EXT_multi_draw_arrays")) extensions->EXT_multi_draw_arrays = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_EXT_secondary_color")) extensions->EXT_secondary_color = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_EXT_separate_specular_color")) extensions->EXT_separate_specular_color = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_EXT_shadow_funcs")) extensions->EXT_shadow_funcs = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_EXT_stencil_wrap")) extensions->EXT_stencil_wrap = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_EXT_texture_edge_clamp") || hasExtension((const char*)limits->extensions, "GL_SGIS_texture_edge_clamp")) extensions->EXT_texture_edge_clamp = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_EXT_texture_filter_anisotropic")) extensions->EXT_texture_filter_anisotropic = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_EXT_texture_lod_bias")) extensions->EXT_texture_lod_bias = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_IBM_rasterpos_clip")) extensions->IBM_rasterpos_clip = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_NV_fog_distance")) extensions->NV_fog_distance = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_NV_fragment_program")) extensions->NV_fragment_program = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_NV_register_combiners")) extensions->NV_register_combiners = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_NV_register_combiners2")) extensions->NV_register_combiners2 = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_NV_texgen_reflection")) extensions->NV_texgen_reflection = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_NV_texture_rectangle") || hasExtension((const char*)limits->extensions, "GL_EXT_texture_rectangle")) extensions->NV_texture_rectangle = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_NV_vertex_program")) extensions->NV_vertex_program = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_NV_vertex_program1_1")) extensions->NV_vertex_program1_1 = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_NV_vertex_program2")) extensions->NV_vertex_program2 = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_EXT_texture3D")) extensions->EXT_texture3D = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GL_SGIS_generate_mipmap")) extensions->SGIS_generate_mipmap = GL_TRUE; if (hasExtension((const char*)limits->extensions, "GLX_EXT_texture_from_pixmap")) extensions->EXT_texture_from_pixmap = GL_TRUE; if (extensions->NV_vertex_program2) limits->maxVertexProgramEnvParams = 256; else limits->maxVertexProgramEnvParams = 96; if (extensions->NV_vertex_program || extensions->ARB_vertex_program) extensions->any_vertex_program = GL_TRUE; if (extensions->NV_fragment_program || extensions->ARB_fragment_program) extensions->any_fragment_program = GL_TRUE; if (extensions->any_vertex_program || extensions->any_fragment_program) extensions->any_program = GL_TRUE; #if 0 /* Now, determine what level of OpenGL we support */ if (extensions->ARB_multisample && extensions->ARB_multitexture && extensions->ARB_texture_border_clamp && extensions->ARB_texture_compression && extensions->ARB_texture_cube_map && extensions->ARB_texture_env_add && extensions->ARB_texture_env_combine && extensions->ARB_texture_env_dot3) { if (extensions->ARB_depth_texture && extensions->ARB_point_parameters && extensions->ARB_shadow && extensions->ARB_texture_env_crossbar && extensions->ARB_texture_mirrored_repeat && extensions->ARB_window_pos && extensions->EXT_blend_color && extensions->EXT_blend_func_separate && extensions->EXT_blend_logic_op && extensions->EXT_blend_minmax && extensions->EXT_blend_subtract && extensions->EXT_fog_coord && extensions->EXT_multi_draw_arrays && extensions->EXT_secondary_color && extensions->EXT_shadow_funcs && extensions->EXT_stencil_wrap && extensions->SGIS_generate_mipmap) { if (extensions->ARB_occlusion_query && extensions->ARB_vertex_buffer_object && extensions->ARB_texture_non_power_of_two && extensions->EXT_shadow_funcs) { extensions->version = (const GLubyte *) "1.5 Chromium " CR_VERSION_STRING; } else { extensions->version = (const GLubyte *) "1.4 Chromium " CR_VERSION_STRING; } } else { extensions->version = (const GLubyte *) "1.3 Chromium " CR_VERSION_STRING; } } else { extensions->version = (const GLubyte *) "1.2 Chromium " CR_VERSION_STRING; } #endif }
static SPUFunctions * renderSPUInit( int id, SPU *child, SPU *self, unsigned int context_id, unsigned int num_contexts ) { int numFuncs, numSpecial; GLint defaultWin, defaultCtx; WindowInfo *windowInfo; const char * pcpwSetting; int rc; (void) child; (void) context_id; (void) num_contexts; self->privatePtr = (void *) &render_spu; #ifdef CHROMIUM_THREADSAFE crDebug("Render SPU: thread-safe"); #endif crMemZero(&render_spu, sizeof(render_spu)); render_spu.id = id; renderspuSetVBoxConfiguration(&render_spu); if (render_spu.swap_master_url) swapsyncConnect(); /* Get our special functions. */ numSpecial = renderspuCreateFunctions( _cr_render_table ); #ifdef RT_OS_WINDOWS /* Start thread to create windows and process window messages */ crDebug("RenderSPU: Starting windows serving thread"); render_spu.hWinThreadReadyEvent = CreateEvent(NULL, FALSE, FALSE, NULL); if (!render_spu.hWinThreadReadyEvent) { crError("RenderSPU: Failed to create WinThreadReadyEvent! (%x)", GetLastError()); return NULL; } if (!CreateThread(NULL, 0, renderSPUWindowThreadProc, 0, 0, &render_spu.dwWinThreadId)) { crError("RenderSPU: Failed to start windows thread! (%x)", GetLastError()); return NULL; } WaitForSingleObject(render_spu.hWinThreadReadyEvent, INFINITE); #endif /* Get the OpenGL functions. */ numFuncs = crLoadOpenGL( &render_spu.ws, _cr_render_table + numSpecial ); if (numFuncs == 0) { crError("The render SPU was unable to load the native OpenGL library"); return NULL; } numFuncs += numSpecial; render_spu.contextTable = crAllocHashtableEx(1, INT32_MAX); render_spu.windowTable = crAllocHashtableEx(1, INT32_MAX); render_spu.dummyWindowTable = crAllocHashtable(); pcpwSetting = crGetenv("CR_RENDER_ENABLE_SINGLE_PRESENT_CONTEXT"); if (pcpwSetting) { if (pcpwSetting[0] == '0') pcpwSetting = NULL; } if (pcpwSetting) { /* TODO: need proper blitter synchronization, do not use so far! * the problem is that rendering can be done in multiple thread: the main command (hgcm) thread and the redraw thread * we currently use per-window synchronization, while we'll need a per-blitter synchronization if one blitter is used for multiple windows * this is not done currently */ crWarning("TODO: need proper blitter synchronization, do not use so far!"); render_spu.blitterTable = crAllocHashtable(); CRASSERT(render_spu.blitterTable); } else render_spu.blitterTable = NULL; CRASSERT(render_spu.default_visual & CR_RGB_BIT); rc = renderspu_SystemInit(); if (!RT_SUCCESS(rc)) { crError("renderspu_SystemInit failed rc %d", rc); return NULL; } #ifdef USE_OSMESA if (render_spu.use_osmesa) { if (!crLoadOSMesa(&render_spu.OSMesaCreateContext, &render_spu.OSMesaMakeCurrent, &render_spu.OSMesaDestroyContext)) { crError("Unable to load OSMesa library"); } } #endif #ifdef DARWIN # ifdef VBOX_WITH_COCOA_QT # else /* VBOX_WITH_COCOA_QT */ render_spu.hRootVisibleRegion = 0; render_spu.currentBufferName = 1; render_spu.uiDockUpdateTS = 0; /* Create a mutex for synchronizing events from the main Qt thread & this thread */ RTSemFastMutexCreate(&render_spu.syncMutex); /* Create our window groups */ CreateWindowGroup(kWindowGroupAttrMoveTogether | kWindowGroupAttrLayerTogether | kWindowGroupAttrSharedActivation | kWindowGroupAttrHideOnCollapse | kWindowGroupAttrFixedLevel, &render_spu.pMasterGroup); CreateWindowGroup(kWindowGroupAttrMoveTogether | kWindowGroupAttrLayerTogether | kWindowGroupAttrSharedActivation | kWindowGroupAttrHideOnCollapse | kWindowGroupAttrFixedLevel, &render_spu.pParentGroup); /* Make the correct z-layering */ SendWindowGroupBehind (render_spu.pParentGroup, render_spu.pMasterGroup); /* and set the gParentGroup as parent for gMasterGroup. */ SetWindowGroupParent (render_spu.pMasterGroup, render_spu.pParentGroup); /* Install the event handlers */ EventTypeSpec eventList[] = { {kEventClassVBox, kEventVBoxUpdateContext}, /* Update the context after show/size/move events */ {kEventClassVBox, kEventVBoxBoundsChanged} /* Clip/Pos the OpenGL windows when the main window is changed in pos/size */ }; /* We need to process events from our main window */ render_spu.hParentEventHandler = NewEventHandlerUPP(windowEvtHndlr); InstallApplicationEventHandler (render_spu.hParentEventHandler, GetEventTypeCount(eventList), eventList, NULL, NULL); render_spu.fInit = true; # endif /* VBOX_WITH_COCOA_QT */ #endif /* DARWIN */ /* * Create the default window and context. Their indexes are zero and * a client can use them without calling CreateContext or WindowCreate. */ crDebug("Render SPU: Creating default window (visBits=0x%x, id=0)", render_spu.default_visual); defaultWin = renderspuWindowCreateEx( NULL, render_spu.default_visual, CR_RENDER_DEFAULT_WINDOW_ID ); if (defaultWin != CR_RENDER_DEFAULT_WINDOW_ID) { crError("Render SPU: Couldn't get a double-buffered, RGB visual with Z!"); return NULL; } crDebug( "Render SPU: WindowCreate returned %d (0=normal)", defaultWin ); crDebug("Render SPU: Creating default context, visBits=0x%x", render_spu.default_visual ); defaultCtx = renderspuCreateContextEx( NULL, render_spu.default_visual, CR_RENDER_DEFAULT_CONTEXT_ID, 0 ); if (defaultCtx != CR_RENDER_DEFAULT_CONTEXT_ID) { crError("Render SPU: failed to create default context!"); return NULL; } renderspuMakeCurrent( defaultWin, 0, defaultCtx ); /* Get windowInfo for the default window */ windowInfo = (WindowInfo *) crHashtableSearch(render_spu.windowTable, CR_RENDER_DEFAULT_WINDOW_ID); CRASSERT(windowInfo); windowInfo->mapPending = GL_TRUE; /* * Get the OpenGL extension functions. * SIGH -- we have to wait until the very bitter end to load the * extensions, because the context has to be bound before * wglGetProcAddress will work correctly. No such issue with GLX though. */ numFuncs += crLoadOpenGLExtensions( &render_spu.ws, _cr_render_table + numFuncs ); CRASSERT(numFuncs < 1000); #ifdef WINDOWS /* * Same problem as above, these are extensions so we need to * load them after a context has been bound. As they're WGL * extensions too, we can't simply tag them into the spu_loader. * So we do them here for now. * Grrr, NVIDIA driver uses EXT for GetExtensionsStringEXT, * but ARB for others. Need further testing here.... */ render_spu.ws.wglGetExtensionsStringEXT = (wglGetExtensionsStringEXTFunc_t) render_spu.ws.wglGetProcAddress( "wglGetExtensionsStringEXT" ); render_spu.ws.wglChoosePixelFormatEXT = (wglChoosePixelFormatEXTFunc_t) render_spu.ws.wglGetProcAddress( "wglChoosePixelFormatARB" ); render_spu.ws.wglGetPixelFormatAttribivEXT = (wglGetPixelFormatAttribivEXTFunc_t) render_spu.ws.wglGetProcAddress( "wglGetPixelFormatAttribivARB" ); render_spu.ws.wglGetPixelFormatAttribfvEXT = (wglGetPixelFormatAttribfvEXTFunc_t) render_spu.ws.wglGetProcAddress( "wglGetPixelFormatAttribfvARB" ); if (render_spu.ws.wglGetProcAddress("glCopyTexSubImage3D")) { _cr_render_table[numFuncs].name = crStrdup("CopyTexSubImage3D"); _cr_render_table[numFuncs].fn = (SPUGenericFunction) render_spu.ws.wglGetProcAddress("glCopyTexSubImage3D"); ++numFuncs; crDebug("Render SPU: Found glCopyTexSubImage3D function"); } if (render_spu.ws.wglGetProcAddress("glDrawRangeElements")) { _cr_render_table[numFuncs].name = crStrdup("DrawRangeElements"); _cr_render_table[numFuncs].fn = (SPUGenericFunction) render_spu.ws.wglGetProcAddress("glDrawRangeElements"); ++numFuncs; crDebug("Render SPU: Found glDrawRangeElements function"); } if (render_spu.ws.wglGetProcAddress("glTexSubImage3D")) { _cr_render_table[numFuncs].name = crStrdup("TexSubImage3D"); _cr_render_table[numFuncs].fn = (SPUGenericFunction) render_spu.ws.wglGetProcAddress("glTexSubImage3D"); ++numFuncs; crDebug("Render SPU: Found glTexSubImage3D function"); } if (render_spu.ws.wglGetProcAddress("glTexImage3D")) { _cr_render_table[numFuncs].name = crStrdup("TexImage3D"); _cr_render_table[numFuncs].fn = (SPUGenericFunction) render_spu.ws.wglGetProcAddress("glTexImage3D"); ++numFuncs; crDebug("Render SPU: Found glTexImage3D function"); } if (render_spu.ws.wglGetExtensionsStringEXT) { crDebug("WGL - found wglGetExtensionsStringEXT\n"); } if (render_spu.ws.wglChoosePixelFormatEXT) { crDebug("WGL - found wglChoosePixelFormatEXT\n"); } #endif render_spu.barrierHash = crAllocHashtable(); render_spu.cursorX = 0; render_spu.cursorY = 0; render_spu.use_L2 = 0; render_spu.gather_conns = NULL; numFuncs = renderspu_SystemPostprocessFunctions(_cr_render_table, numFuncs, RT_ELEMENTS(_cr_render_table)); crDebug("Render SPU: ---------- End of Init -------------"); return &render_functions; }
static void stubContextFree( ContextInfo *context ) { crMemZero(context, sizeof(ContextInfo)); /* just to be safe */ crFree(context); }