DECLEXPORT(EGLBoolean) eglReleaseThread() { struct VBEGLTLS *pTls = getTls(); if (!(pTls)) return EGL_TRUE; RTMemFree(pTls); RTTlsSet(g_tls, NULL); return EGL_TRUE; }
static struct VBEGLTLS *getTls(void) { struct VBEGLTLS *pTls; RTOnce(&g_tlsOnce, tlsInitOnce, NULL); pTls = (struct VBEGLTLS *)RTTlsGet(g_tls); if (RT_LIKELY(pTls)) return pTls; pTls = (struct VBEGLTLS *)RTMemAlloc(sizeof(*pTls)); if (!VALID_PTR(pTls)) return NULL; pTls->cErr = EGL_SUCCESS; pTls->enmAPI = EGL_NONE; pTls->hCurrent = EGL_NO_CONTEXT; pTls->hCurrentDisplay = EGL_NO_DISPLAY; pTls->hCurrentDraw = EGL_NO_SURFACE; pTls->hCurrentRead = EGL_NO_SURFACE; RTTlsSet(g_tls, pTls); return pTls; }
/** * The emulation thread main function, with Virtual CPU ID for debugging. * * @returns Thread exit code. * @param ThreadSelf The handle to the executing thread. * @param pUVCpu Pointer to the user mode per-VCpu structure. * @param idCpu The virtual CPU ID, for backtrace purposes. */ int vmR3EmulationThreadWithId(RTTHREAD ThreadSelf, PUVMCPU pUVCpu, VMCPUID idCpu) { PUVM pUVM = pUVCpu->pUVM; int rc; AssertReleaseMsg(VALID_PTR(pUVM) && pUVM->u32Magic == UVM_MAGIC, ("Invalid arguments to the emulation thread!\n")); rc = RTTlsSet(pUVM->vm.s.idxTLS, pUVCpu); AssertReleaseMsgRCReturn(rc, ("RTTlsSet %x failed with %Rrc\n", pUVM->vm.s.idxTLS, rc), rc); if ( pUVM->pVmm2UserMethods && pUVM->pVmm2UserMethods->pfnNotifyEmtInit) pUVM->pVmm2UserMethods->pfnNotifyEmtInit(pUVM->pVmm2UserMethods, pUVM, pUVCpu); /* * The request loop. */ rc = VINF_SUCCESS; Log(("vmR3EmulationThread: Emulation thread starting the days work... Thread=%#x pUVM=%p\n", ThreadSelf, pUVM)); VMSTATE enmBefore = VMSTATE_CREATED; /* (only used for logging atm.) */ for (;;) { /* * During early init there is no pVM, so make a special path * for that to keep things clearly separate. */ if (!pUVM->pVM) { /* * Check for termination first. */ if (pUVM->vm.s.fTerminateEMT) { rc = VINF_EM_TERMINATE; break; } /* * Only the first VCPU may initialize the VM during early init * and must therefore service all VMCPUID_ANY requests. * See also VMR3Create */ if ( (pUVM->vm.s.pNormalReqs || pUVM->vm.s.pPriorityReqs) && pUVCpu->idCpu == 0) { /* * Service execute in any EMT request. */ rc = VMR3ReqProcessU(pUVM, VMCPUID_ANY, false /*fPriorityOnly*/); Log(("vmR3EmulationThread: Req rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), pUVM->pVM ? VMR3GetStateName(pUVM->pVM->enmVMState) : "CREATING")); } else if (pUVCpu->vm.s.pNormalReqs || pUVCpu->vm.s.pPriorityReqs) { /* * Service execute in specific EMT request. */ rc = VMR3ReqProcessU(pUVM, pUVCpu->idCpu, false /*fPriorityOnly*/); Log(("vmR3EmulationThread: Req (cpu=%u) rc=%Rrc, VM state %s -> %s\n", pUVCpu->idCpu, rc, VMR3GetStateName(enmBefore), pUVM->pVM ? VMR3GetStateName(pUVM->pVM->enmVMState) : "CREATING")); } else { /* * Nothing important is pending, so wait for something. */ rc = VMR3WaitU(pUVCpu); if (RT_FAILURE(rc)) { AssertLogRelMsgFailed(("VMR3WaitU failed with %Rrc\n", rc)); break; } } } else { /* * Pending requests which needs servicing? * * We check for state changes in addition to status codes when * servicing requests. (Look after the ifs.) */ PVM pVM = pUVM->pVM; enmBefore = pVM->enmVMState; if (pUVM->vm.s.fTerminateEMT) { rc = VINF_EM_TERMINATE; break; } if (VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS)) { rc = VMMR3EmtRendezvousFF(pVM, &pVM->aCpus[idCpu]); Log(("vmR3EmulationThread: Rendezvous rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState))); } else if (pUVM->vm.s.pNormalReqs || pUVM->vm.s.pPriorityReqs) { /* * Service execute in any EMT request. */ rc = VMR3ReqProcessU(pUVM, VMCPUID_ANY, false /*fPriorityOnly*/); Log(("vmR3EmulationThread: Req rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState))); } else if (pUVCpu->vm.s.pNormalReqs || pUVCpu->vm.s.pPriorityReqs) { /* * Service execute in specific EMT request. */ rc = VMR3ReqProcessU(pUVM, pUVCpu->idCpu, false /*fPriorityOnly*/); Log(("vmR3EmulationThread: Req (cpu=%u) rc=%Rrc, VM state %s -> %s\n", pUVCpu->idCpu, rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState))); } else if (VM_FF_ISSET(pVM, VM_FF_DBGF)) { /* * Service the debugger request. */ rc = DBGFR3VMMForcedAction(pVM); Log(("vmR3EmulationThread: Dbg rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState))); } else if (VM_FF_TESTANDCLEAR(pVM, VM_FF_RESET)) { /* * Service a delayed reset request. */ rc = VMR3Reset(pVM); VM_FF_CLEAR(pVM, VM_FF_RESET); Log(("vmR3EmulationThread: Reset rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState))); } else { /* * Nothing important is pending, so wait for something. */ rc = VMR3WaitU(pUVCpu); if (RT_FAILURE(rc)) { AssertLogRelMsgFailed(("VMR3WaitU failed with %Rrc\n", rc)); break; } } /* * Check for termination requests, these have extremely high priority. */ if ( rc == VINF_EM_TERMINATE || pUVM->vm.s.fTerminateEMT) break; } /* * Some requests (both VMR3Req* and the DBGF) can potentially resume * or start the VM, in that case we'll get a change in VM status * indicating that we're now running. */ if ( RT_SUCCESS(rc) && pUVM->pVM) { PVM pVM = pUVM->pVM; PVMCPU pVCpu = &pVM->aCpus[idCpu]; if ( pVM->enmVMState == VMSTATE_RUNNING && VMCPUSTATE_IS_STARTED(VMCPU_GET_STATE(pVCpu))) { rc = EMR3ExecuteVM(pVM, pVCpu); Log(("vmR3EmulationThread: EMR3ExecuteVM() -> rc=%Rrc, enmVMState=%d\n", rc, pVM->enmVMState)); if (EMGetState(pVCpu) == EMSTATE_GURU_MEDITATION) vmR3SetGuruMeditation(pVM); } } } /* forever */ /* * Cleanup and exit. */ Log(("vmR3EmulationThread: Terminating emulation thread! Thread=%#x pUVM=%p rc=%Rrc enmBefore=%d enmVMState=%d\n", ThreadSelf, pUVM, rc, enmBefore, pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_TERMINATED)); if ( idCpu == 0 && pUVM->pVM) { PVM pVM = pUVM->pVM; vmR3SetTerminated(pVM); pUVM->pVM = NULL; /** @todo SMP: This isn't 100% safe. We should wait for the other * threads to finish before destroy the VM. */ int rc2 = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_GVMM_DESTROY_VM, 0, NULL); AssertLogRelRC(rc2); } if ( pUVM->pVmm2UserMethods && pUVM->pVmm2UserMethods->pfnNotifyEmtTerm) pUVM->pVmm2UserMethods->pfnNotifyEmtTerm(pUVM->pVmm2UserMethods, pUVM, pUVCpu); pUVCpu->vm.s.NativeThreadEMT = NIL_RTNATIVETHREAD; Log(("vmR3EmulationThread: EMT is terminated.\n")); return rc; }
/** * Entry point. */ extern "C" DECLEXPORT(int) TrustedMain(int argc, char **argv, char **envp) { int rcRet = 0; /* error count */ RTR3InitExe(argc, &argv, RTR3INIT_FLAGS_SUPLIB); PVM pVM; PUVM pUVM; int rc = VMR3Create(1, NULL, NULL, NULL, NULL, NULL, &pVM, &pUVM); if (RT_SUCCESS(rc)) { /* * Little hack to avoid the VM_ASSERT_EMT assertion. */ RTTlsSet(pVM->pUVM->vm.s.idxTLS, &pVM->pUVM->aCpus[0]); pVM->pUVM->aCpus[0].pUVM = pVM->pUVM; pVM->pUVM->aCpus[0].vm.s.NativeThreadEMT = RTThreadNativeSelf(); rc = tstPDMACStressTestPatternInit(); if (RT_SUCCESS(rc)) { unsigned cFilesOpened = 0; /* Open the endpoints. */ for (cFilesOpened = 0; cFilesOpened < NR_OPEN_ENDPOINTS; cFilesOpened++) { rc = tstPDMACStressTestFileOpen(pVM, &g_aTestFiles[cFilesOpened], cFilesOpened); if (RT_FAILURE(rc)) break; } if (RT_SUCCESS(rc)) { /* Tests are running now. */ RTPrintf(TESTCASE ": Successfully opened all files. Running tests forever now or until an error is hit :)\n"); RTThreadSleep(RT_INDEFINITE_WAIT); } /* Close opened endpoints. */ for (unsigned i = 0; i < cFilesOpened; i++) tstPDMACStressTestFileClose(&g_aTestFiles[i]); tstPDMACStressTestPatternDestroy(); } else { RTPrintf(TESTCASE ": failed to init test pattern!! rc=%Rrc\n", rc); rcRet++; } rc = VMR3Destroy(pUVM); AssertMsg(rc == VINF_SUCCESS, ("%s: Destroying VM failed rc=%Rrc!!\n", __FUNCTION__, rc)); } else { RTPrintf(TESTCASE ": failed to create VM!! rc=%Rrc\n", rc); rcRet++; } return rcRet; }
/** * Entry point. */ extern "C" DECLEXPORT(int) TrustedMain(int argc, char **argv, char **envp) { int rcRet = 0; /* error count */ PPDMASYNCCOMPLETIONENDPOINT pEndpointSrc, pEndpointDst; RTR3InitExe(argc, &argv, RTR3INIT_FLAGS_SUPLIB); if (argc != 3) { RTPrintf(TESTCASE ": Usage is ./tstPDMAsyncCompletion <source> <dest>\n"); return 1; } PVM pVM; PUVM pUVM; int rc = VMR3Create(1, NULL, NULL, NULL, NULL, NULL, &pVM, &pUVM); if (RT_SUCCESS(rc)) { /* * Little hack to avoid the VM_ASSERT_EMT assertion. */ RTTlsSet(pVM->pUVM->vm.s.idxTLS, &pVM->pUVM->aCpus[0]); pVM->pUVM->aCpus[0].pUVM = pVM->pUVM; pVM->pUVM->aCpus[0].vm.s.NativeThreadEMT = RTThreadNativeSelf(); /* * Create the template. */ PPDMASYNCCOMPLETIONTEMPLATE pTemplate; rc = PDMR3AsyncCompletionTemplateCreateInternal(pVM, &pTemplate, AsyncTaskCompleted, NULL, "Test"); if (RT_FAILURE(rc)) { RTPrintf(TESTCASE ": Error while creating the template!! rc=%d\n", rc); return 1; } /* * Create event semaphore. */ rc = RTSemEventCreate(&g_FinishedEventSem); AssertRC(rc); /* * Create the temporary buffers. */ for (unsigned i=0; i < NR_TASKS; i++) { g_AsyncCompletionTasksBuffer[i] = (uint8_t *)RTMemAllocZ(BUFFER_SIZE); if (!g_AsyncCompletionTasksBuffer[i]) { RTPrintf(TESTCASE ": out of memory!\n"); return ++rcRet; } } /* Create the destination as the async completion API can't do this. */ RTFILE FileTmp; rc = RTFileOpen(&FileTmp, argv[2], RTFILE_O_READWRITE | RTFILE_O_OPEN_CREATE | RTFILE_O_DENY_NONE); if (RT_FAILURE(rc)) { RTPrintf(TESTCASE ": Error while creating the destination!! rc=%d\n", rc); return ++rcRet; } RTFileClose(FileTmp); /* Create our file endpoint */ rc = PDMR3AsyncCompletionEpCreateForFile(&pEndpointSrc, argv[1], 0, pTemplate); if (RT_SUCCESS(rc)) { rc = PDMR3AsyncCompletionEpCreateForFile(&pEndpointDst, argv[2], 0, pTemplate); if (RT_SUCCESS(rc)) { PDMR3PowerOn(pVM); /* Wait for all threads to finish initialization. */ RTThreadSleep(100); int fReadPass = true; uint64_t cbSrc; size_t offSrc = 0; size_t offDst = 0; uint32_t cTasksUsed = 0; rc = PDMR3AsyncCompletionEpGetSize(pEndpointSrc, &cbSrc); if (RT_SUCCESS(rc)) { /* Copy the data. */ for (;;) { if (fReadPass) { cTasksUsed = (BUFFER_SIZE * NR_TASKS) <= (cbSrc - offSrc) ? NR_TASKS : ((cbSrc - offSrc) / BUFFER_SIZE) + ((cbSrc - offSrc) % BUFFER_SIZE) > 0 ? 1 : 0; g_cTasksLeft = cTasksUsed; for (uint32_t i = 0; i < cTasksUsed; i++) { size_t cbRead = ((size_t)offSrc + BUFFER_SIZE) <= cbSrc ? BUFFER_SIZE : cbSrc - offSrc; RTSGSEG DataSeg; DataSeg.pvSeg = g_AsyncCompletionTasksBuffer[i]; DataSeg.cbSeg = cbRead; rc = PDMR3AsyncCompletionEpRead(pEndpointSrc, offSrc, &DataSeg, 1, cbRead, NULL, &g_AsyncCompletionTasks[i]); AssertRC(rc); offSrc += cbRead; if (offSrc == cbSrc) break; } } else { g_cTasksLeft = cTasksUsed; for (uint32_t i = 0; i < cTasksUsed; i++) { size_t cbWrite = (offDst + BUFFER_SIZE) <= cbSrc ? BUFFER_SIZE : cbSrc - offDst; RTSGSEG DataSeg; DataSeg.pvSeg = g_AsyncCompletionTasksBuffer[i]; DataSeg.cbSeg = cbWrite; rc = PDMR3AsyncCompletionEpWrite(pEndpointDst, offDst, &DataSeg, 1, cbWrite, NULL, &g_AsyncCompletionTasks[i]); AssertRC(rc); offDst += cbWrite; if (offDst == cbSrc) break; } } rc = RTSemEventWait(g_FinishedEventSem, RT_INDEFINITE_WAIT); AssertRC(rc); if (!fReadPass && (offDst == cbSrc)) break; else if (fReadPass) fReadPass = false; else { cTasksUsed = 0; fReadPass = true; } } } else { RTPrintf(TESTCASE ": Error querying size of the endpoint!! rc=%d\n", rc); rcRet++; } PDMR3PowerOff(pVM); PDMR3AsyncCompletionEpClose(pEndpointDst); } PDMR3AsyncCompletionEpClose(pEndpointSrc); } rc = VMR3Destroy(pUVM); AssertMsg(rc == VINF_SUCCESS, ("%s: Destroying VM failed rc=%Rrc!!\n", __FUNCTION__, rc)); VMR3ReleaseUVM(pUVM); /* * Clean up. */ for (uint32_t i = 0; i < NR_TASKS; i++) { RTMemFree(g_AsyncCompletionTasksBuffer[i]); } } else { RTPrintf(TESTCASE ": failed to create VM!! rc=%Rrc\n", rc); rcRet++; } return rcRet; }