void crStateMakeCurrent( CRContext *ctx ) { CRContext *current = GetCurrentContext(); if (ctx == NULL) ctx = defaultContext; if (current == ctx) return; /* no-op */ CRASSERT(ctx); if (current) { /* Check to see if the differencer exists first, we may not have one, aka the packspu */ if (diff_api.AlphaFunc) crStateSwitchContext( current, ctx ); } #ifdef CHROMIUM_THREADSAFE crSetTSD(&__contextTSD, ctx); #else __currentContext = ctx; #endif /* ensure matrix state is also current */ crStateMatrixMode(ctx->transform.matrixMode); }
/** * Install the given dispatch table as the table used for all gl* calls. */ static void stubSetDispatch( SPUDispatchTable *table ) { CRASSERT(table); #ifdef CHROMIUM_THREADSAFE /* always set the per-thread dispatch pointer */ crSetTSD(&stub.dispatchTSD, (void *) table); if (stub.threadSafe) { /* Do nothing - the thread-safe dispatch functions will call GetTSD() * to get a pointer to the dispatch table, and jump through it. */ } else #endif { /* Single thread mode - just install the caller's dispatch table */ /* This conditional is an optimization to try to avoid unnecessary * copying. It seems to work with atlantis, multiwin, etc. but * _could_ be a problem. (Brian) */ if (glim.copy_of != table->copy_of) crSPUCopyDispatchTable(&glim, table); } }
/* * Allocate a new ThreadInfo structure, setup a connection to the * server, allocate/init a packer context, bind this ThreadInfo to * the calling thread with crSetTSD(). * We'll always call this function at least once even if we're not * using threads. */ ThreadInfo *replicatespuNewThread( CRthread id ) { ThreadInfo *thread; #ifdef CHROMIUM_THREADSAFE_notyet crLockMutex(&_ReplicateMutex); #else CRASSERT(replicate_spu.numThreads == 0); #endif CRASSERT(replicate_spu.numThreads < MAX_THREADS); thread = &(replicate_spu.thread[replicate_spu.numThreads]); thread->id = id; thread->currentContext = NULL; /* connect to the server */ thread->server.name = crStrdup( replicate_spu.name ); thread->server.buffer_size = replicate_spu.buffer_size; if (replicate_spu.numThreads == 0) { replicatespuConnectToServer( &(thread->server) ); CRASSERT(thread->server.conn); replicate_spu.swap = thread->server.conn->swap; } else { /* a new pthread */ replicatespuFlushAll( &(replicate_spu.thread[0]) ); crNetNewClient( replicate_spu.thread[0].server.conn, &(thread->server)); CRASSERT(thread->server.conn); } /* packer setup */ CRASSERT(thread->packer == NULL); thread->packer = crPackNewContext( replicate_spu.swap ); CRASSERT(thread->packer); crPackInitBuffer( &(thread->buffer), crNetAlloc(thread->server.conn), thread->server.conn->buffer_size, thread->server.conn->mtu ); thread->buffer.canBarf = thread->server.conn->Barf ? GL_TRUE : GL_FALSE; crPackSetBuffer( thread->packer, &thread->buffer ); crPackFlushFunc( thread->packer, replicatespuFlush ); crPackFlushArg( thread->packer, (void *) thread ); crPackSendHugeFunc( thread->packer, replicatespuHuge ); crPackSetContext( thread->packer ); #ifdef CHROMIUM_THREADSAFE_notyet crSetTSD(&_ReplicateTSD, thread); #endif replicate_spu.numThreads++; #ifdef CHROMIUM_THREADSAFE_notyet crUnlockMutex(&_ReplicateMutex); #endif return thread; }
/* * Allocate the state (dirty) bits data structures. * This should be called before we create any contexts. * We'll also create the default/NULL context at this time and make * it the current context by default. This means that if someone * tries to set GL state before calling MakeCurrent() they'll be * modifying the default state object, and not segfaulting on a NULL * pointer somewhere. */ void crStateInit(void) { unsigned int i; /* Purely initialize the context bits */ if (!__currentBits) { __currentBits = (CRStateBits *) crCalloc( sizeof(CRStateBits) ); crStateClientInitBits( &(__currentBits->client) ); crStateLightingInitBits( &(__currentBits->lighting) ); } else crWarning("State tracker is being re-initialized..\n"); for (i=0;i<CR_MAX_CONTEXTS;i++) g_availableContexts[i] = 0; if (defaultContext) { /* Free the default/NULL context. * Ensures context bits are reset */ crStateFreeContext(defaultContext); #ifdef CHROMIUM_THREADSAFE crSetTSD(&__contextTSD, NULL); #else __currentContext = NULL; #endif } /* Reset diff_api */ crMemZero(&diff_api, sizeof(SPUDispatchTable)); /* Allocate the default/NULL context */ defaultContext = crStateCreateContextId(0, NULL, CR_RGB_BIT, NULL); CRASSERT(g_availableContexts[0] == 0); g_availableContexts[0] = 1; /* in use forever */ #ifdef CHROMIUM_THREADSAFE crSetTSD(&__contextTSD, defaultContext); #else __currentContext = defaultContext; #endif }
void PACKSPU_APIENTRY packspu_VBoxPackSetInjectThread(void) { crLockMutex(&_PackMutex); { int i; GET_THREAD(thread); CRASSERT(!thread); CRASSERT((pack_spu.numThreads>0) && (pack_spu.numThreads<MAX_THREADS)); for (i=0; i<MAX_THREADS; ++i) { if (!pack_spu.thread[i].inUse) { thread = &pack_spu.thread[i]; break; } } CRASSERT(thread); thread->inUse = GL_TRUE; thread->id = crThreadID(); thread->currentContext = NULL; thread->bInjectThread = GL_TRUE; thread->netServer.name = crStrdup(pack_spu.name); thread->netServer.buffer_size = 64 * 1024; crNetNewClient(pack_spu.thread[pack_spu.idxThreadInUse].netServer.conn, &(thread->netServer)); CRASSERT(thread->netServer.conn); CRASSERT(thread->packer == NULL); thread->packer = crPackNewContext( pack_spu.swap ); CRASSERT(thread->packer); crPackInitBuffer(&(thread->buffer), crNetAlloc(thread->netServer.conn), thread->netServer.conn->buffer_size, thread->netServer.conn->mtu); thread->buffer.canBarf = thread->netServer.conn->Barf ? GL_TRUE : GL_FALSE; crPackSetBuffer( thread->packer, &thread->buffer ); crPackFlushFunc( thread->packer, packspuFlush ); crPackFlushArg( thread->packer, (void *) thread ); crPackSendHugeFunc( thread->packer, packspuHuge ); crPackSetContext( thread->packer ); crSetTSD(&_PackTSD, thread); pack_spu.numThreads++; } crUnlockMutex(&_PackMutex); }
void PACKSPU_APIENTRY packspu_VBoxPackAttachThread() { #if 0 int i; GET_THREAD(thread); for (i=0; i<MAX_THREADS; ++i) { if (pack_spu.thread[i].inUse && thread==&pack_spu.thread[i] && thread->id==crThreadID()) { crError("2nd attach to same thread"); } } #endif crSetTSD(&_PackTSD, NULL); }
/* * As above, but don't call crStateSwitchContext(). */ void crStateSetCurrent( CRContext *ctx ) { CRContext *current = GetCurrentContext(); if (ctx == NULL) ctx = defaultContext; if (current == ctx) return; /* no-op */ CRASSERT(ctx); #ifdef CHROMIUM_THREADSAFE crSetTSD(&__contextTSD, ctx); #else __currentContext = ctx; #endif /* ensure matrix state is also current */ crStateMatrixMode(ctx->transform.matrixMode); }
void crStateDestroyContext( CRContext *ctx ) { CRContext *current = GetCurrentContext(); if (current == ctx) { /* destroying the current context - have to be careful here */ CRASSERT(defaultContext); /* Check to see if the differencer exists first, we may not have one, aka the packspu */ if (diff_api.AlphaFunc) crStateSwitchContext(current, defaultContext); #ifdef CHROMIUM_THREADSAFE crSetTSD(&__contextTSD, defaultContext); #else __currentContext = defaultContext; #endif /* ensure matrix state is also current */ crStateMatrixMode(defaultContext->transform.matrixMode); } g_availableContexts[ctx->id] = 0; crStateFreeContext(ctx); }
void RENDER_APIENTRY renderspuMakeCurrent(GLint crWindow, GLint nativeWindow, GLint ctx) { WindowInfo *window; ContextInfo *context; /* crDebug("%s win=%d native=0x%x ctx=%d", __FUNCTION__, crWindow, (int) nativeWindow, ctx); */ window = (WindowInfo *) crHashtableSearch(render_spu.windowTable, crWindow); context = (ContextInfo *) crHashtableSearch(render_spu.contextTable, ctx); if (window && context) { #ifdef CHROMIUM_THREADSAFE crSetTSD(&_RenderTSD, context); #else render_spu.currentContext = context; #endif context->currentWindow = window; if (!window) { crDebug("Render SPU: MakeCurrent invalid window id: %d", crWindow); return; } if (!context) { crDebug("Render SPU: MakeCurrent invalid context id: %d", ctx); return; } renderspu_SystemMakeCurrent( window, nativeWindow, context ); if (!context->everCurrent) { /* print OpenGL info */ const char *extString = (const char *) render_spu.ws.glGetString( GL_EXTENSIONS ); /* crDebug( "Render SPU: GL_EXTENSIONS: %s", render_spu.ws.glGetString( GL_EXTENSIONS ) ); */ crInfo( "Render SPU: GL_VENDOR: %s", render_spu.ws.glGetString( GL_VENDOR ) ); crInfo( "Render SPU: GL_RENDERER: %s", render_spu.ws.glGetString( GL_RENDERER ) ); crInfo( "Render SPU: GL_VERSION: %s", render_spu.ws.glGetString( GL_VERSION ) ); crInfo( "Render SPU: GL_EXTENSIONS: %s", render_spu.ws.glGetString( GL_EXTENSIONS ) ); if (crStrstr(extString, "GL_ARB_window_pos")) context->haveWindowPosARB = GL_TRUE; else context->haveWindowPosARB = GL_FALSE; context->everCurrent = GL_TRUE; } if (crWindow == CR_RENDER_DEFAULT_WINDOW_ID && window->mapPending && !render_spu.render_to_app_window && !render_spu.render_to_crut_window) { /* Window[CR_RENDER_DEFAULT_CONTEXT_ID] is special, it's the default window and normally hidden. * If the mapPending flag is set, then we should now make the window * visible. */ /*renderspu_SystemShowWindow( window, GL_TRUE );*/ window->mapPending = GL_FALSE; } window->everCurrent = GL_TRUE; } else if (!crWindow && !ctx) { renderspu_SystemMakeCurrent( NULL, 0, NULL ); #ifdef CHROMIUM_THREADSAFE crSetTSD(&_RenderTSD, NULL); #else render_spu.currentContext = NULL; #endif } else { crError("renderspuMakeCurrent invalid ids: crWindow(%d), ctx(%d)", crWindow, ctx); } }
void PACKSPU_APIENTRY packspu_VBoxDetachThread() { int i; GET_THREAD(thread); if (thread) { crLockMutex(&_PackMutex); for (i=0; i<MAX_THREADS; ++i) { if (pack_spu.thread[i].inUse && thread==&pack_spu.thread[i] && thread->id==crThreadID() && thread->netServer.conn) { CRASSERT(pack_spu.numThreads>0); packspuFlush((void *) thread); if (pack_spu.thread[i].packer) { CR_LOCK_PACKER_CONTEXT(thread->packer); crPackSetContext(NULL); CR_UNLOCK_PACKER_CONTEXT(thread->packer); crPackDeleteContext(pack_spu.thread[i].packer); } crNetFreeConnection(pack_spu.thread[i].netServer.conn); pack_spu.numThreads--; /*note can't shift the array here, because other threads have TLS references to array elements*/ crMemZero(&pack_spu.thread[i], sizeof(ThreadInfo)); crSetTSD(&_PackTSD, NULL); if (i==pack_spu.idxThreadInUse) { for (i=0; i<MAX_THREADS; ++i) { if (pack_spu.thread[i].inUse) { pack_spu.idxThreadInUse=i; break; } } } break; } } for (i=0; i<CR_MAX_CONTEXTS; ++i) { ContextInfo *ctx = &pack_spu.context[i]; if (ctx->currentThread == thread) { CRASSERT(ctx->fAutoFlush); ctx->currentThread = NULL; } } crUnlockMutex(&_PackMutex); } crStateVBoxDetachThread(); }
GLint PACKSPU_APIENTRY packspu_VBoxPackSetInjectThread(struct VBOXUHGSMI *pHgsmi) { GLint con = 0; int i; GET_THREAD(thread); CRASSERT(!thread); crLockMutex(&_PackMutex); { CRASSERT(CRPACKSPU_IS_WDDM_CRHGSMI() || (pack_spu.numThreads>0)); CRASSERT(pack_spu.numThreads<MAX_THREADS); for (i=0; i<MAX_THREADS; ++i) { if (!pack_spu.thread[i].inUse) { thread = &pack_spu.thread[i]; break; } } CRASSERT(thread); thread->inUse = GL_TRUE; if (!CRPACKSPU_IS_WDDM_CRHGSMI()) thread->id = crThreadID(); else thread->id = THREAD_OFFSET_MAGIC + i; thread->currentContext = NULL; thread->bInjectThread = GL_TRUE; thread->netServer.name = crStrdup(pack_spu.name); thread->netServer.buffer_size = 64 * 1024; packspuConnectToServer(&(thread->netServer) #if defined(VBOX_WITH_CRHGSMI) && defined(IN_GUEST) , pHgsmi #endif ); CRASSERT(thread->netServer.conn); CRASSERT(thread->packer == NULL); thread->packer = crPackNewContext( pack_spu.swap ); CRASSERT(thread->packer); crPackInitBuffer(&(thread->buffer), crNetAlloc(thread->netServer.conn), thread->netServer.conn->buffer_size, thread->netServer.conn->mtu); thread->buffer.canBarf = thread->netServer.conn->Barf ? GL_TRUE : GL_FALSE; crPackSetBuffer( thread->packer, &thread->buffer ); crPackFlushFunc( thread->packer, packspuFlush ); crPackFlushArg( thread->packer, (void *) thread ); crPackSendHugeFunc( thread->packer, packspuHuge ); crPackSetContext( thread->packer ); crSetTSD(&_PackTSD, thread); pack_spu.numThreads++; } crUnlockMutex(&_PackMutex); if (CRPACKSPU_IS_WDDM_CRHGSMI()) { CRASSERT(thread->id - THREAD_OFFSET_MAGIC < RT_ELEMENTS(pack_spu.thread) && GET_THREAD_VAL_ID(thread->id) == thread); con = thread->id; } return con; }
/* * Allocate a new ThreadInfo structure, setup a connection to the * server, allocate/init a packer context, bind this ThreadInfo to * the calling thread with crSetTSD(). * We'll always call this function at least once even if we're not * using threads. */ ThreadInfo *packspuNewThread( unsigned long id ) { ThreadInfo *thread=NULL; int i; #ifdef CHROMIUM_THREADSAFE crLockMutex(&_PackMutex); #else CRASSERT(pack_spu.numThreads == 0); #endif CRASSERT(pack_spu.numThreads < MAX_THREADS); for (i=0; i<MAX_THREADS; ++i) { if (!pack_spu.thread[i].inUse) { thread = &pack_spu.thread[i]; break; } } CRASSERT(thread); thread->inUse = GL_TRUE; thread->id = id; thread->currentContext = NULL; thread->bInjectThread = GL_FALSE; /* connect to the server */ thread->netServer.name = crStrdup( pack_spu.name ); thread->netServer.buffer_size = pack_spu.buffer_size; if (pack_spu.numThreads == 0) { packspuConnectToServer( &(thread->netServer) ); if (!thread->netServer.conn) { return NULL; } pack_spu.swap = thread->netServer.conn->swap; } else { /* a new pthread */ crNetNewClient(pack_spu.thread[pack_spu.idxThreadInUse].netServer.conn, &(thread->netServer)); CRASSERT(thread->netServer.conn); } /* packer setup */ CRASSERT(thread->packer == NULL); thread->packer = crPackNewContext( pack_spu.swap ); CRASSERT(thread->packer); crPackInitBuffer( &(thread->buffer), crNetAlloc(thread->netServer.conn), thread->netServer.conn->buffer_size, thread->netServer.conn->mtu ); thread->buffer.canBarf = thread->netServer.conn->Barf ? GL_TRUE : GL_FALSE; crPackSetBuffer( thread->packer, &thread->buffer ); crPackFlushFunc( thread->packer, packspuFlush ); crPackFlushArg( thread->packer, (void *) thread ); crPackSendHugeFunc( thread->packer, packspuHuge ); crPackSetContext( thread->packer ); #ifdef CHROMIUM_THREADSAFE crSetTSD(&_PackTSD, thread); #endif pack_spu.numThreads++; #ifdef CHROMIUM_THREADSAFE crUnlockMutex(&_PackMutex); #endif return thread; }
static SPUFunctions * tilesortSPUInit( int id, SPU *child, SPU *self, unsigned int context_id, unsigned int num_contexts ) { ThreadInfo *thread0 = &(tilesort_spu.thread[0]); (void) context_id; (void) num_contexts; (void) child; (void) self; #if DEBUG_FP_EXCEPTIONS { fpu_control_t mask; _FPU_GETCW(mask); mask &= ~(_FPU_MASK_IM | _FPU_MASK_DM | _FPU_MASK_ZM | _FPU_MASK_OM | _FPU_MASK_UM); _FPU_SETCW(mask); } #endif crRandSeed(id); crMemZero( &tilesort_spu, sizeof(TileSortSPU) ); #ifdef CHROMIUM_THREADSAFE crInitTSD(&_ThreadTSD); crSetTSD(&_ThreadTSD, thread0); crInitMutex(&_TileSortMutex); #endif thread0->state_server_index = -1; /* one-time init for thread */ tilesortspuInitEvaluators(); /* Init window, context hash tables */ tilesort_spu.windowTable = crAllocHashtable(); tilesort_spu.contextTable = crAllocHashtable(); tilesort_spu.listTable = crAllocHashtable(); tilesort_spu.id = id; tilesort_spu.glassesType = RED_BLUE; tilesortspuSetAnaglyphMask(&tilesort_spu); tilesortspuGatherConfiguration( child ); tilesortspuConnectToServers(); /* set up thread0's server connection */ tilesort_spu.geom_buffer_size = tilesort_spu.buffer_size; /* geom_buffer_mtu must fit in data's part of our buffers */ tilesort_spu.geom_buffer_mtu = crPackMaxData(tilesort_spu.buffer_size) /* 24 is the size of the bounds info packet * END_FLUFF is the size of data of the extra End opcode if needed * 4 since BoundsInfo opcode may take a whole 4 bytes * and 4 to let room for extra End's opcode, if needed */ - (24+END_FLUFF+4+4); /* the geometry must also fit in the mtu */ if (tilesort_spu.geom_buffer_mtu > tilesort_spu.MTU - sizeof(CRMessageOpcodes) - (24+END_FLUFF+4+4)) tilesort_spu.geom_buffer_mtu = tilesort_spu.MTU - sizeof(CRMessageOpcodes) - (24+END_FLUFF+4+4); tilesort_spu.swap = thread0->netServer[0].conn->swap; tilesortspuInitThreadPacking( thread0 ); tilesortspuCreateFunctions(); crStateInit(); tilesortspuCreateDiffAPI(); /* special dispatch tables for display lists */ if (tilesort_spu.listTrack || tilesort_spu.lazySendDLists) { crMemZero((void *)&tilesort_spu.packerDispatch, sizeof tilesort_spu.packerDispatch); crSPUInitDispatchTable(&tilesort_spu.packerDispatch); tilesortspuLoadPackTable(&tilesort_spu.packerDispatch); crSPUInitDispatchTable(&tilesort_spu.stateDispatch); tilesortspuLoadStateTable(&tilesort_spu.stateDispatch); } if (tilesort_spu.useDMX) { /* load OpenGL */ int n; crDebug("Tilesort SPU: Using DMX"); n = crLoadOpenGL( &tilesort_spu.ws, NULL); if (!n) { crWarning("Tilesort SPU: Unable to load OpenGL, disabling DMX"); tilesort_spu.useDMX = 0; } } else { crDebug("Tilesort SPU: Not using DMX"); } crDebug("Tilesort SPU: ---------- End of Init -------------"); return &tilesort_functions; }
/* Set packing context for the calling thread */ void crPackSetContext( CRPackContext *pc ) { crSetTSD( &_PackerTSD, pc ); }