GLint PACKSPU_APIENTRY packspu_CreateContext( const char *dpyName, GLint visual, GLint shareCtx ) { GET_THREAD(thread); int writeback = 1; GLint serverCtx = (GLint) -1; int slot; #ifdef CHROMIUM_THREADSAFE crLockMutex(&_PackMutex); #endif if (!thread) { thread = packspuNewThread(crThreadID()); } CRASSERT(thread); CRASSERT(thread->packer); if (shareCtx > 0) { /* translate to server ctx id */ shareCtx -= MAGIC_OFFSET; if (shareCtx >= 0 && shareCtx < pack_spu.numContexts) { shareCtx = pack_spu.context[shareCtx].serverCtx; } } crPackSetContext( thread->packer ); /* Pack the command */ if (pack_spu.swap) crPackCreateContextSWAP( dpyName, visual, shareCtx, &serverCtx, &writeback ); else crPackCreateContext( dpyName, visual, shareCtx, &serverCtx, &writeback ); /* Flush buffer and get return value */ packspuFlush(thread); if (!(thread->netServer.conn->actual_network)) { /* HUMUNGOUS HACK TO MATCH SERVER NUMBERING * * The hack exists solely to make file networking work for now. This * is totally gross, but since the server expects the numbers to start * from 5000, we need to write them out this way. This would be * marginally less gross if the numbers (500 and 5000) were maybe * some sort of #define'd constants somewhere so the client and the * server could be aware of how each other were numbering things in * cases like file networking where they actually * care. * * -Humper * */ serverCtx = 5000; } else { while (writeback) crNetRecv(); if (pack_spu.swap) { serverCtx = (GLint) SWAP32(serverCtx); } if (serverCtx < 0) { #ifdef CHROMIUM_THREADSAFE crUnlockMutex(&_PackMutex); #endif crWarning("Failure in packspu_CreateContext"); return -1; /* failed */ } } /* find an empty context slot */ for (slot = 0; slot < pack_spu.numContexts; slot++) { if (!pack_spu.context[slot].clientState) { /* found empty slot */ break; } } if (slot == pack_spu.numContexts) { pack_spu.numContexts++; } /* Fill in the new context info */ /* XXX fix-up sharedCtx param here */ pack_spu.context[slot].clientState = crStateCreateContext(NULL, visual, NULL); pack_spu.context[slot].clientState->bufferobject.retainBufferData = GL_TRUE; pack_spu.context[slot].serverCtx = serverCtx; #ifdef CHROMIUM_THREADSAFE crUnlockMutex(&_PackMutex); #endif return MAGIC_OFFSET + slot; }
GLint REPLICATESPU_APIENTRY replicatespu_CreateContext( const char *dpyName, GLint visual, GLint shareCtx ) { static GLint freeCtxID = MAGIC_OFFSET; char headspuname[10]; ContextInfo *context, *sharedContext = NULL; unsigned int i; if (shareCtx > 0) { sharedContext = (ContextInfo *) crHashtableSearch(replicate_spu.contextTable, shareCtx); } replicatespuFlushAll( &(replicate_spu.thread[0]) ); #ifdef CHROMIUM_THREADSAFE_notyet crLockMutex(&_ReplicateMutex); #endif replicatespuStartVnc(dpyName); /* * Alloc new ContextInfo object */ context = (ContextInfo *) crCalloc(sizeof(ContextInfo)); if (!context) { crWarning("Replicate SPU: Out of memory in CreateContext"); return -1; } /* Contexts that don't share display lists get their own * display list managers. Contexts that do, share the * display list managers of the contexts they're sharing * with (man, some grammarian has to go over that and make * it actually sound like English). */ if (sharedContext) { context->displayListManager = sharedContext->displayListManager; /* Let the DLM know that a second context is using the * same display list manager, so it can manage when its * resources are released. */ crDLMUseDLM(context->displayListManager); } else { context->displayListManager = crDLMNewDLM(0, NULL); if (!context->displayListManager) { crWarning("Replicate SPU: could not initialize display list manager."); } } /* Fill in the new context info */ if (sharedContext) context->State = crStateCreateContext(NULL, visual, sharedContext->State); else context->State = crStateCreateContext(NULL, visual, NULL); context->rserverCtx[0] = 1; /* not really used */ context->visBits = visual; context->currentWindow = 0; /* not bound */ context->dlmState = crDLMNewContext(context->displayListManager); context->displayListMode = GL_FALSE; /* not compiling */ context->displayListIdentifier = 0; context->shareCtx = shareCtx; #if 0 /* Set the Current pointers now.... */ crStateSetCurrentPointers( context->State, &(replicate_spu.thread[0].packer->current) ); #endif for (i = 1; i < CR_MAX_REPLICANTS; i++) { int r_writeback = 1, rserverCtx = -1; int sharedServerCtx; sharedServerCtx = sharedContext ? sharedContext->rserverCtx[i] : 0; if (!IS_CONNECTED(replicate_spu.rserver[i].conn)) continue; if (replicate_spu.swap) crPackCreateContextSWAP( dpyName, visual, sharedServerCtx, &rserverCtx, &r_writeback ); else crPackCreateContext( dpyName, visual, sharedServerCtx, &rserverCtx, &r_writeback ); /* Flush buffer and get return value */ replicatespuFlushOne( &(replicate_spu.thread[0]), i ); while (r_writeback) crNetRecv(); if (replicate_spu.swap) rserverCtx = (GLint) SWAP32(rserverCtx); if (rserverCtx < 0) { #ifdef CHROMIUM_THREADSAFE_notyet crUnlockMutex(&_ReplicateMutex); #endif crWarning("Replicate SPU: CreateContext failed."); return -1; /* failed */ } context->rserverCtx[i] = rserverCtx; } if (!crStrcmp( headspuname, "nop" )) replicate_spu.NOP = 0; else replicate_spu.NOP = 1; #ifdef CHROMIUM_THREADSAFE_notyet crUnlockMutex(&_ReplicateMutex); #endif crListPushBack(replicate_spu.contextList, (void *)freeCtxID); crHashtableAdd(replicate_spu.contextTable, freeCtxID, context); return freeCtxID++; }
/** * Replicate our contexts on a new server (indicated by NewServerIndex). * XXX It may be a problem if we try to attach to a shared context, * when that shared context has not yet been created. */ static void replicatespuReplicateContext(void *element, void *arg) { GLint ctx = (GLint) element; ThreadInfo *thread = (ThreadInfo *) arg; ContextInfo *context = crHashtableSearch(replicate_spu.contextTable, ctx); ContextInfo *sharedContext = NULL; CRContext *tempState; GLint return_val = 0, shareCtx = context->shareCtx, sharedServerCtx = 0; int writeback; if (!context->State) { /* XXX need this? */ crWarning("ReplicateSPU: replicating context with no state!"); return; } if (shareCtx > 0) { sharedContext = (ContextInfo *) crHashtableSearch(replicate_spu.contextTable, shareCtx); if (sharedContext) sharedServerCtx = sharedContext->rserverCtx[NewServerIndex]; } /* * Send CreateContext to new server and get return value */ if (replicate_spu.swap) crPackCreateContextSWAP( replicate_spu.dpyName, context->visBits, sharedServerCtx, &return_val, &writeback); else crPackCreateContext( replicate_spu.dpyName, context->visBits, sharedServerCtx, &return_val, &writeback); replicatespuFlushOne(thread, NewServerIndex); writeback = 1; while (writeback) crNetRecv(); if (replicate_spu.swap) return_val = (GLint) SWAP32(return_val); if (return_val <= 0) { crWarning("Replicate SPU: CreateContext failed"); return; } context->rserverCtx[NewServerIndex] = return_val; /* * Create a new CRContext record representing the state of the new * server (all default state). We'll diff against this to send all the * needed state to the server. * When done, we can dispose of this context. */ tempState = crStateCreateContext(NULL, context->visBits, NULL); /* Bind the remote context. The window's not really significant. */ { int serverWindow; if (context->currentWindow) serverWindow = context->currentWindow->id[NewServerIndex]; else serverWindow = 0; if (replicate_spu.swap) crPackMakeCurrentSWAP( serverWindow, 0, return_val ); else crPackMakeCurrent( serverWindow, 0, return_val ); } /* Send state differences, all texture objects and all display lists * to the new server. * XXX We could be more efficient; in the case of a shared context, * we only need to replicate textures and display lists once... */ crStateDiffContext( tempState, context->State ); replicatespuReplicateTextures(tempState, context->State); replicatespuReplicateLists(tempState, context->displayListManager); /* XXX this call may not be needed */ replicatespuFlushOne(thread, NewServerIndex); /* Destroy the temporary context, no longer needed */ crStateDestroyContext( tempState ); }