static SPUFunctions * packSPUInit( int id, SPU *child, SPU *self, unsigned int context_id, unsigned int num_contexts ) { ThreadInfo *thread; (void) context_id; (void) num_contexts; (void) child; (void) self; #if defined(CHROMIUM_THREADSAFE) && !defined(WINDOWS) crInitMutex(&_PackMutex); #endif #ifdef CHROMIUM_THREADSAFE crInitTSD(&_PackerTSD); crInitTSD(&_PackTSD); #endif pack_spu.id = id; packspuSetVBoxConfiguration( child ); #if defined(WINDOWS) && defined(VBOX_WITH_WDDM) pack_spu.bIsWDDMCrHgsmi = isVBoxWDDMCrHgsmi(); #endif #ifdef VBOX_WITH_CRPACKSPU_DUMPER memset(&pack_spu.Dumper, 0, sizeof (pack_spu.Dumper)); #endif if (!CRPACKSPU_IS_WDDM_CRHGSMI()) { /* This connects to the server, sets up the packer, etc. */ thread = packspuNewThread( #if defined(VBOX_WITH_CRHGSMI) && defined(IN_GUEST) NULL #endif ); if (!thread) { return NULL; } CRASSERT( thread == &(pack_spu.thread[0]) ); pack_spu.idxThreadInUse = 0; } packspuCreateFunctions(); crStateInit(); return &pack_functions; }
/** * Matte spu init function * \param id * \param child * \param self * \param context_id * \param num_contexts */ static SPUFunctions * matteSPUInit( int id, SPU *child, SPU *self, unsigned int context_id, unsigned int num_contexts ) { (void) self; (void) context_id; (void) num_contexts; #ifdef CHROMIUM_THREADSAFE crInitTSD(&matteTSD); #endif matte_spu.id = id; matte_spu.has_child = 0; matte_spu.server = NULL; if (child) { crSPUInitDispatchTable( &(matte_spu.child) ); crSPUCopyDispatchTable( &(matte_spu.child), &(child->dispatch_table) ); matte_spu.has_child = 1; } crSPUInitDispatchTable( &(matte_spu.super) ); crSPUCopyDispatchTable( &(matte_spu.super), &(self->superSPU->dispatch_table) ); mattespuGatherConfiguration(); matte_spu.contextTable = crAllocHashtable(); return &matte_functions; }
/* * Allocate the state (dirty) bits data structures. * This should be called before we create any contexts. * We'll also create the default/NULL context at this time and make * it the current context by default. This means that if someone * tries to set GL state before calling MakeCurrent() they'll be * modifying the default state object, and not segfaulting on a NULL * pointer somewhere. */ void crStateInit(void) { unsigned int i; /* Purely initialize the context bits */ if (!__currentBits) { __currentBits = (CRStateBits *) crCalloc( sizeof(CRStateBits) ); crStateClientInitBits( &(__currentBits->client) ); crStateLightingInitBits( &(__currentBits->lighting) ); } else crWarning("State tracker is being re-initialized..\n"); for (i=0;i<CR_MAX_CONTEXTS;i++) g_availableContexts[i] = 0; #ifdef CHROMIUM_THREADSAFE if (!__isContextTLSInited) { # ifndef RT_OS_WINDOWS /* tls destructor is implemented for all platforms except windows*/ crInitTSDF(&__contextTSD, crStateThreadTlsDtor); # else /* windows should do cleanup via DllMain THREAD_DETACH notification */ crInitTSD(&__contextTSD); # endif __isContextTLSInited = 1; } #endif if (defaultContext) { /* Free the default/NULL context. * Ensures context bits are reset */ #ifdef CHROMIUM_THREADSAFE SetCurrentContext(NULL); VBoxTlsRefRelease(defaultContext); #else crStateFreeContext(defaultContext); __currentContext = NULL; #endif } /* Reset diff_api */ crMemZero(&diff_api, sizeof(SPUDispatchTable)); /* Allocate the default/NULL context */ defaultContext = crStateCreateContextId(0, NULL, CR_RGB_BIT, NULL); CRASSERT(g_availableContexts[0] == 0); g_availableContexts[0] = 1; /* in use forever */ #ifdef CHROMIUM_THREADSAFE SetCurrentContext(defaultContext); #else __currentContext = defaultContext; #endif }
static SPUFunctions *hiddenlineSPUInit( int id, SPU *child, SPU *self, unsigned int context_id, unsigned int num_contexts ) { (void) context_id; (void) num_contexts; #ifdef CHROMIUM_THREADSAFE crDebug("Hiddenline SPU: thread-safe"); #endif hiddenline_spu.id = id; hiddenline_spu.has_child = 0; if (child) { crSPUInitDispatchTable( &(hiddenline_spu.child) ); crSPUCopyDispatchTable( &(hiddenline_spu.child), &(child->dispatch_table) ); hiddenline_spu.has_child = 1; } crSPUInitDispatchTable( &(hiddenline_spu.super) ); crSPUCopyDispatchTable( &(hiddenline_spu.super), &(self->superSPU->dispatch_table) ); hiddenlinespuGatherConfiguration( child ); /* We need to track state so that the packer can deal with pixel data */ crStateInit(); hiddenlinespuCreateFunctions(); hiddenline_spu.contextTable = crAllocHashtable(); #ifdef CHROMIUM_THREADSAFE crInitTSD(&_HiddenlineTSD); #else hiddenline_spu.currentContext = NULL; #endif crInitMutex(&(hiddenline_spu.mutex)); return &hiddenline_functions; }
/** * This function creates and initializes a new display list * manager. It returns a pointer to the manager, or NULL in * the case of insufficient memory. The dispatch table pointer * is passed in to allow the utilities to muck with the table * to gain functional control when GL calls are made. */ CRDLM DLM_APIENTRY *crDLMNewDLM(unsigned int userConfigSize, const CRDLMConfig *userConfig) { CRDLM *dlm; /* This is the default configuration. We'll overwrite it later * with user-supplied configuration information. */ CRDLMConfig config = { CRDLM_DEFAULT_BUFFERSIZE, }; dlm = crAlloc(sizeof(*dlm)); if (!dlm) { return NULL; } /* Start off by initializing all entries that require further * memory allocation, so we can free up all the memory if there's * a problem. */ if (!(dlm->displayLists = crAllocHashtable())) { crFree(dlm); return NULL; } /* The creator counts as the first user. */ dlm->userCount = 1; #ifdef CHROMIUM_THREADSAFE /* This mutex ensures that only one thread is changing the displayLists * hash at a time. Note that we may also need a mutex to guarantee that * the hash is not changed by one thread while another thread is * traversing it; this issue has not yet been resolved. */ crInitMutex(&(dlm->dlMutex)); /* Although the thread-specific data (TSD) functions will initialize * the thread key themselves when needed, those functions do not allow * us to specify a thread destructor. Since a thread could potentially * exit with considerable memory allocated (e.g. if a thread exits * after it has issued NewList but before EndList, and while there * are considerable content buffers allocated), I do the initialization * myself, in order to be able to reclaim those resources if a thread * exits. */ crInitTSDF(&(dlm->tsdKey), threadDestructor); crInitTSD(&CRDLMTSDKey); #endif /* Copy over any appropriate configuration values */ if (userConfig != NULL) { /* Copy over as much configuration information as is provided. * Note that if the CRDLMConfig structure strictly grows, this * allows forward compatability - routines compiled with * older versions of the structure will only initialize that * section of the structure that they know about. */ crMemcpy((void *)&config, (void *) userConfig, MIN(userConfigSize, sizeof(config))); } dlm->bufferSize = config.bufferSize; /* Return the pointer to the newly-allocated display list manager */ return dlm; }
/* Windows crap */ BOOL WINAPI DllMain(HINSTANCE hDLLInst, DWORD fdwReason, LPVOID lpvReserved) { (void) lpvReserved; switch (fdwReason) { case DLL_PROCESS_ATTACH: { CRNetServer ns; #ifdef CHROMIUM_THREADSAFE crInitTSD(&g_stubCurrentContextTSD); #endif crInitMutex(&stub_init_mutex); #ifdef VDBG_VEHANDLER vboxVDbgVEHandlerRegister(); #endif crNetInit(NULL, NULL); ns.name = "vboxhgcm://host:0"; ns.buffer_size = 1024; crNetServerConnect(&ns #if defined(VBOX_WITH_CRHGSMI) && defined(IN_GUEST) , NULL #endif ); if (!ns.conn) { crDebug("Failed to connect to host (is guest 3d acceleration enabled?), aborting ICD load."); #ifdef VDBG_VEHANDLER vboxVDbgVEHandlerUnregister(); #endif return FALSE; } else crNetFreeConnection(ns.conn); break; } case DLL_PROCESS_DETACH: { /* do exactly the same thing as for DLL_THREAD_DETACH since * DLL_THREAD_DETACH is not called for the thread doing DLL_PROCESS_DETACH according to msdn docs */ stubSetCurrentContext(NULL); if (stub_initialized) { CRASSERT(stub.spu); stub.spu->dispatch_table.VBoxDetachThread(); } stubSPUSafeTearDown(); #ifdef CHROMIUM_THREADSAFE crFreeTSD(&g_stubCurrentContextTSD); #endif #ifdef VDBG_VEHANDLER vboxVDbgVEHandlerUnregister(); #endif break; } case DLL_THREAD_ATTACH: { if (stub_initialized) { CRASSERT(stub.spu); stub.spu->dispatch_table.VBoxAttachThread(); } break; } case DLL_THREAD_DETACH: { stubSetCurrentContext(NULL); if (stub_initialized) { CRASSERT(stub.spu); stub.spu->dispatch_table.VBoxDetachThread(); } break; } default: break; } return TRUE; }
/* Windows crap */ BOOL WINAPI DllMain(HINSTANCE hDLLInst, DWORD fdwReason, LPVOID lpvReserved) { (void) lpvReserved; switch (fdwReason) { case DLL_PROCESS_ATTACH: { CRNetServer ns; const char * env; #if defined(DEBUG_misha) HMODULE hCrUtil; char aName[MAX_PATH]; GetModuleFileNameA(hDLLInst, aName, RT_ELEMENTS(aName)); crDbgCmdSymLoadPrint(aName, hDLLInst); hCrUtil = GetModuleHandleA("VBoxOGLcrutil.dll"); Assert(hCrUtil); crDbgCmdSymLoadPrint("VBoxOGLcrutil.dll", hCrUtil); #endif #ifdef CHROMIUM_THREADSAFE crInitTSD(&g_stubCurrentContextTSD); #endif crInitMutex(&stub_init_mutex); #ifdef VDBG_VEHANDLER env = crGetenv("CR_DBG_VEH_ENABLE"); g_VBoxVehEnable = crStrParseI32(env, # ifdef DEBUG_misha 1 # else 0 # endif ); if (g_VBoxVehEnable) { char procName[1024]; size_t cProcName; size_t cChars; env = crGetenv("CR_DBG_VEH_FLAGS"); g_VBoxVehFlags = crStrParseI32(env, 0 # ifdef DEBUG_misha | VBOXVEH_F_BREAK # else | VBOXVEH_F_DUMP # endif ); env = crGetenv("CR_DBG_VEH_DUMP_DIR"); if (!env) env = VBOXMD_DUMP_DIR_DEFAULT; g_cVBoxMdFilePrefixLen = strlen(env); if (RT_ELEMENTS(g_aszwVBoxMdFilePrefix) <= g_cVBoxMdFilePrefixLen + 26 + (sizeof (VBOXMD_DUMP_NAME_PREFIX_W) - sizeof (WCHAR)) / sizeof (WCHAR)) { g_cVBoxMdFilePrefixLen = 0; env = ""; } mbstowcs_s(&cChars, g_aszwVBoxMdFilePrefix, g_cVBoxMdFilePrefixLen + 1, env, _TRUNCATE); Assert(cChars == g_cVBoxMdFilePrefixLen + 1); g_cVBoxMdFilePrefixLen = cChars - 1; if (g_cVBoxMdFilePrefixLen && g_aszwVBoxMdFilePrefix[g_cVBoxMdFilePrefixLen - 1] != L'\\') g_aszwVBoxMdFilePrefix[g_cVBoxMdFilePrefixLen++] = L'\\'; memcpy(g_aszwVBoxMdFilePrefix + g_cVBoxMdFilePrefixLen, VBOXMD_DUMP_NAME_PREFIX_W, sizeof (VBOXMD_DUMP_NAME_PREFIX_W) - sizeof (WCHAR)); g_cVBoxMdFilePrefixLen += (sizeof (VBOXMD_DUMP_NAME_PREFIX_W) - sizeof (WCHAR)) / sizeof (WCHAR); crGetProcName(procName, RT_ELEMENTS(procName)); cProcName = strlen(procName); if (RT_ELEMENTS(g_aszwVBoxMdFilePrefix) > g_cVBoxMdFilePrefixLen + cProcName + 1 + 26) { mbstowcs_s(&cChars, g_aszwVBoxMdFilePrefix + g_cVBoxMdFilePrefixLen, cProcName + 1, procName, _TRUNCATE); Assert(cChars == cProcName + 1); g_cVBoxMdFilePrefixLen += cChars - 1; g_aszwVBoxMdFilePrefix[g_cVBoxMdFilePrefixLen++] = L'_'; } /* sanity */ g_aszwVBoxMdFilePrefix[g_cVBoxMdFilePrefixLen] = L'\0'; env = crGetenv("CR_DBG_VEH_DUMP_TYPE"); g_enmVBoxMdDumpType = crStrParseI32(env, MiniDumpNormal | MiniDumpWithDataSegs | MiniDumpWithFullMemory | MiniDumpWithHandleData //// | MiniDumpFilterMemory //// | MiniDumpScanMemory // | MiniDumpWithUnloadedModules //// | MiniDumpWithIndirectlyReferencedMemory //// | MiniDumpFilterModulePaths // | MiniDumpWithProcessThreadData // | MiniDumpWithPrivateReadWriteMemory //// | MiniDumpWithoutOptionalData // | MiniDumpWithFullMemoryInfo // | MiniDumpWithThreadInfo // | MiniDumpWithCodeSegs // | MiniDumpWithFullAuxiliaryState // | MiniDumpWithPrivateWriteCopyMemory // | MiniDumpIgnoreInaccessibleMemory // | MiniDumpWithTokenInformation //// | MiniDumpWithModuleHeaders //// | MiniDumpFilterTriage ); vboxVDbgVEHandlerRegister(); } #endif crNetInit(NULL, NULL); ns.name = "vboxhgcm://host:0"; ns.buffer_size = 1024; crNetServerConnect(&ns #if defined(VBOX_WITH_CRHGSMI) && defined(IN_GUEST) , NULL #endif ); if (!ns.conn) { crDebug("Failed to connect to host (is guest 3d acceleration enabled?), aborting ICD load."); #ifdef VDBG_VEHANDLER if (g_VBoxVehEnable) vboxVDbgVEHandlerUnregister(); #endif return FALSE; } else { crNetFreeConnection(ns.conn); } #if defined(VBOX_WITH_CRHGSMI) && defined(IN_GUEST) VBoxCrHgsmiInit(); #endif break; } case DLL_PROCESS_DETACH: { /* do exactly the same thing as for DLL_THREAD_DETACH since * DLL_THREAD_DETACH is not called for the thread doing DLL_PROCESS_DETACH according to msdn docs */ stubSetCurrentContext(NULL); if (stub_initialized) { CRASSERT(stub.spu); stub.spu->dispatch_table.VBoxDetachThread(); } #if defined(VBOX_WITH_CRHGSMI) && defined(IN_GUEST) VBoxCrHgsmiTerm(); #endif stubSPUSafeTearDown(); #ifdef CHROMIUM_THREADSAFE crFreeTSD(&g_stubCurrentContextTSD); #endif #ifdef VDBG_VEHANDLER if (g_VBoxVehEnable) vboxVDbgVEHandlerUnregister(); #endif break; } case DLL_THREAD_ATTACH: { if (stub_initialized) { CRASSERT(stub.spu); stub.spu->dispatch_table.VBoxAttachThread(); } break; } case DLL_THREAD_DETACH: { stubSetCurrentContext(NULL); if (stub_initialized) { CRASSERT(stub.spu); stub.spu->dispatch_table.VBoxDetachThread(); } break; } default: break; } return TRUE; }
static SPUFunctions * tilesortSPUInit( int id, SPU *child, SPU *self, unsigned int context_id, unsigned int num_contexts ) { ThreadInfo *thread0 = &(tilesort_spu.thread[0]); (void) context_id; (void) num_contexts; (void) child; (void) self; #if DEBUG_FP_EXCEPTIONS { fpu_control_t mask; _FPU_GETCW(mask); mask &= ~(_FPU_MASK_IM | _FPU_MASK_DM | _FPU_MASK_ZM | _FPU_MASK_OM | _FPU_MASK_UM); _FPU_SETCW(mask); } #endif crRandSeed(id); crMemZero( &tilesort_spu, sizeof(TileSortSPU) ); #ifdef CHROMIUM_THREADSAFE crInitTSD(&_ThreadTSD); crSetTSD(&_ThreadTSD, thread0); crInitMutex(&_TileSortMutex); #endif thread0->state_server_index = -1; /* one-time init for thread */ tilesortspuInitEvaluators(); /* Init window, context hash tables */ tilesort_spu.windowTable = crAllocHashtable(); tilesort_spu.contextTable = crAllocHashtable(); tilesort_spu.listTable = crAllocHashtable(); tilesort_spu.id = id; tilesort_spu.glassesType = RED_BLUE; tilesortspuSetAnaglyphMask(&tilesort_spu); tilesortspuGatherConfiguration( child ); tilesortspuConnectToServers(); /* set up thread0's server connection */ tilesort_spu.geom_buffer_size = tilesort_spu.buffer_size; /* geom_buffer_mtu must fit in data's part of our buffers */ tilesort_spu.geom_buffer_mtu = crPackMaxData(tilesort_spu.buffer_size) /* 24 is the size of the bounds info packet * END_FLUFF is the size of data of the extra End opcode if needed * 4 since BoundsInfo opcode may take a whole 4 bytes * and 4 to let room for extra End's opcode, if needed */ - (24+END_FLUFF+4+4); /* the geometry must also fit in the mtu */ if (tilesort_spu.geom_buffer_mtu > tilesort_spu.MTU - sizeof(CRMessageOpcodes) - (24+END_FLUFF+4+4)) tilesort_spu.geom_buffer_mtu = tilesort_spu.MTU - sizeof(CRMessageOpcodes) - (24+END_FLUFF+4+4); tilesort_spu.swap = thread0->netServer[0].conn->swap; tilesortspuInitThreadPacking( thread0 ); tilesortspuCreateFunctions(); crStateInit(); tilesortspuCreateDiffAPI(); /* special dispatch tables for display lists */ if (tilesort_spu.listTrack || tilesort_spu.lazySendDLists) { crMemZero((void *)&tilesort_spu.packerDispatch, sizeof tilesort_spu.packerDispatch); crSPUInitDispatchTable(&tilesort_spu.packerDispatch); tilesortspuLoadPackTable(&tilesort_spu.packerDispatch); crSPUInitDispatchTable(&tilesort_spu.stateDispatch); tilesortspuLoadStateTable(&tilesort_spu.stateDispatch); } if (tilesort_spu.useDMX) { /* load OpenGL */ int n; crDebug("Tilesort SPU: Using DMX"); n = crLoadOpenGL( &tilesort_spu.ws, NULL); if (!n) { crWarning("Tilesort SPU: Unable to load OpenGL, disabling DMX"); tilesort_spu.useDMX = 0; } } else { crDebug("Tilesort SPU: Not using DMX"); } crDebug("Tilesort SPU: ---------- End of Init -------------"); return &tilesort_functions; }