void vrdpDrvStrokePath(SURFOBJ *pso, PATHOBJ *ppo, CLIPOBJ *pco, XFORMOBJ *pxo, BRUSHOBJ *pbo, POINTL *pptlBrushOrg, LINEATTRS *plineattrs, MIX mix) { PVBOXDISPDEV pDev = (PVBOXDISPDEV)pso->dhpdev; /* * StrokePath operation is supported by RDP_ORDER_POLYGON/POLYLINE/ELLIPSE. */ VRDPCLIPRECTS clipRects; int clipResult; RECTFX rcfxBounds; RECTL rclBoundsOrdered; LOGF(("pso = %p, ppo = %p, pco = %p, pxo = %p, pbo = %p, pptlBrushOrg = %p, plineattrs = %p, mix = 0x%08X", pso, ppo, pco, pxo, pbo, pptlBrushOrg, plineattrs, mix)); LOGF(("ppo: fl = 0x%08X, cCurves = %d", ppo->fl, ppo->cCurves)); PATHOBJ_vGetBounds(ppo, &rcfxBounds); rclBoundsOrdered.left = FXTOLFLOOR(rcfxBounds.xLeft); rclBoundsOrdered.right = FXTOLCEILING(rcfxBounds.xRight); rclBoundsOrdered.top = FXTOLFLOOR(rcfxBounds.yTop); rclBoundsOrdered.bottom = FXTOLCEILING(rcfxBounds.yBottom); vrdpOrderRect(&rclBoundsOrdered); LOG(("ppo: bounds %x-%x, %x-%x, %d-%d %d-%d", rcfxBounds.xLeft, rcfxBounds.xRight, rcfxBounds.yTop, rcfxBounds.yBottom, rclBoundsOrdered.left, rclBoundsOrdered.right, rclBoundsOrdered.top, rclBoundsOrdered.bottom)); clipResult = vrdpGetIntersectingClipRects(&clipRects, pso, &rclBoundsOrdered, pco, NULL); if (clipResult == VRDP_CLIP_NO_INTERSECTION) { /* Do nothing. The operation does not affect anything. */ LOG(("VRDP_CLIP_NO_INTERSECTION!!!")); dumpPCO (&rclBoundsOrdered, pco); } else if (clipResult == VRDP_CLIP_TOO_MANY_RECTS) { /* A very complex clip. Better to emulate it. */ LOG(("VRDP_CLIP_TOO_MANY_RECTS!!!")); dumpPCO (&rclBoundsOrdered, pco); vrdpReportDirtyRects(pDev, &clipRects); } else if (pbo->iSolidColor == 0xFFFFFFFF) { /* Not solid brushes are not supported. */ vrdpReportDirtyRects(pDev, &clipRects); } else if (ppo->fl & PO_ELLIPSE) { if (VBoxVBVAOrderSupported(&pDev->vbvaCtx, VRDE_ORDER_ELLIPSE)) { VRDEORDERELLIPSE order; order.pt1.x = (int16_t)FXTOLROUND(rcfxBounds.xLeft + 4); order.pt1.y = (int16_t)FXTOLROUND(rcfxBounds.yTop + 4); order.pt2.x = (int16_t)FXTOLROUND(rcfxBounds.xRight - 4); order.pt2.y = (int16_t)FXTOLROUND(rcfxBounds.yBottom - 4); order.mix = (uint8_t)(mix & 0x1F); order.fillMode = 0; order.rgb = vrdpColor2RGB(pso, pbo->iSolidColor); vrdpReportOrderGeneric(pDev, &clipRects, &order, sizeof (order), VRDE_ORDER_ELLIPSE); } else { WARN(("ELLIPSE not supported")); vrdpReportDirtyRects (pDev, &clipRects); } } else if ( (ppo->fl & PO_BEZIERS) == 0 && (plineattrs->fl & LA_GEOMETRIC) == 0 && plineattrs->pstyle == NULL) { unsigned i; PATHDATA pd; BOOL bMore; VRDEORDERPOLYLINE order; VRDEORDERPOINT ptStart; VRDEORDERBOUNDS bounds; order.rgb = vrdpColor2RGB(pso, pbo->iSolidColor); order.mix = (uint8_t)(mix & 0x1F); PATHOBJ_vEnumStart(ppo); order.points.c = 0; do { POINTFIX *pptfx; VRDEORDERPOINT pt; bMore = PATHOBJ_bEnum (ppo, &pd); LOG(("pd: flags = 0x%08X, count = %d", pd.flags, pd.count)); pptfx = &pd.pptfx[0]; if (pd.flags & PD_BEGINSUBPATH) { /* Setup first point. Start a new order. */ LOG(("BEGINSUBPATH")); Assert(order.points.c == 0); vrdpPointFX2Point(pptfx, &ptStart); order.ptStart = ptStart; pt = ptStart; bounds.pt1 = bounds.pt2 = ptStart; pptfx++; i = 1; } else { LOG(("Continue order")); i = 0; } for (; i < pd.count; i++, pptfx++) { LOG(("pd: %2d: %x,%x %d,%d", i, pptfx->x, pptfx->y, FXTOLROUND(pptfx->x), FXTOLROUND(pptfx->y))); vrdpPointFX2Point (pptfx, &pt); vrdpPolyPointsAdd (&order.points, &pt); vrdpExtendOrderBounds (&bounds, &pt); if (order.points.c == RT_ELEMENTS(order.points.a)) { /* Flush the order and start a new order. */ LOG(("Report order, points overflow.")); vrdpReportOrderGenericBounds(pDev, &clipRects, &bounds, &order, sizeof (order), VRDE_ORDER_POLYLINE); order.points.c = 0; order.ptStart = pt; bounds.pt1 = bounds.pt2 = pt; } } if (pd.flags & PD_CLOSEFIGURE) { /* Encode the start point as the end point. */ LOG(("Report order, CLOSEFIGURE")); if ( ptStart.x != pt.x || ptStart.y != pt.y) { Assert(order.points.c < RT_ELEMENTS(order.points.a)); vrdpPolyPointsAdd (&order.points, &ptStart); vrdpExtendOrderBounds (&bounds, &ptStart); } } if (pd.flags & PD_ENDSUBPATH) { /* Finish the order. */ LOG(("Report order, ENDSUBPATH")); if (order.points.c > 0) { vrdpReportOrderGenericBounds(pDev, &clipRects, &bounds, &order, sizeof (order), VRDE_ORDER_POLYLINE); } order.points.c = 0; } } while (bMore); } else { /* Not supported. */ WARN(("not supported: ppo->fl = %08X, plineattrs->fl = %08X, plineattrs->pstyle = %08X", ppo->fl, plineattrs->fl, plineattrs->pstyle)); vrdpReportDirtyRects(pDev, &clipRects); } return; }
static void Test4(unsigned cThreads, unsigned cSeconds, unsigned uWritePercent, bool fYield, bool fQuiet) { unsigned i; uint64_t acIterations[32]; RTTHREAD aThreads[RT_ELEMENTS(acIterations)]; AssertRelease(cThreads <= RT_ELEMENTS(acIterations)); RTTestSubF(g_hTest, "Test4 - %u threads, %u sec, %u%% writes, %syielding", cThreads, cSeconds, uWritePercent, fYield ? "" : "non-"); /* * Init globals. */ g_fYield = fYield; g_fQuiet = fQuiet; g_fTerminate = false; g_uWritePercent = uWritePercent; g_cConcurrentWriters = 0; g_cConcurrentReaders = 0; RTTEST_CHECK_RC_RETV(g_hTest, RTCritSectRwInit(&g_CritSectRw), VINF_SUCCESS); /* * Create the threads and let them block on the semrw. */ RTTEST_CHECK_RC_RETV(g_hTest, RTCritSectRwEnterExcl(&g_CritSectRw), VINF_SUCCESS); for (i = 0; i < cThreads; i++) { acIterations[i] = 0; RTTEST_CHECK_RC_RETV(g_hTest, RTThreadCreateF(&aThreads[i], Test4Thread, &acIterations[i], 0, RTTHREADTYPE_DEFAULT, RTTHREADFLAGS_WAITABLE, "test-%u", i), VINF_SUCCESS); } /* * Do the test run. */ uint32_t cErrorsBefore = RTTestErrorCount(g_hTest); uint64_t u64StartTS = RTTimeNanoTS(); RTTEST_CHECK_RC(g_hTest, RTCritSectRwLeaveExcl(&g_CritSectRw), VINF_SUCCESS); RTThreadSleep(cSeconds * 1000); ASMAtomicWriteBool(&g_fTerminate, true); uint64_t ElapsedNS = RTTimeNanoTS() - u64StartTS; /* * Clean up the threads and semaphore. */ for (i = 0; i < cThreads; i++) RTTEST_CHECK_RC(g_hTest, RTThreadWait(aThreads[i], 5000, NULL), VINF_SUCCESS); RTTEST_CHECK_MSG(g_hTest, g_cConcurrentWriters == 0, (g_hTest, "g_cConcurrentWriters=%u at end of test\n", g_cConcurrentWriters)); RTTEST_CHECK_MSG(g_hTest, g_cConcurrentReaders == 0, (g_hTest, "g_cConcurrentReaders=%u at end of test\n", g_cConcurrentReaders)); RTTEST_CHECK_RC(g_hTest, RTCritSectRwDelete(&g_CritSectRw), VINF_SUCCESS); if (RTTestErrorCount(g_hTest) != cErrorsBefore) RTThreadSleep(100); /* * Collect and display the results. */ uint64_t cItrTotal = acIterations[0]; for (i = 1; i < cThreads; i++) cItrTotal += acIterations[i]; uint64_t cItrNormal = cItrTotal / cThreads; uint64_t cItrMinOK = cItrNormal / 20; /* 5% */ uint64_t cItrMaxDeviation = 0; for (i = 0; i < cThreads; i++) { uint64_t cItrDelta = RT_ABS((int64_t)(acIterations[i] - cItrNormal)); if (acIterations[i] < cItrMinOK) RTTestFailed(g_hTest, "Thread %u did less than 5%% of the iterations - %llu (it) vs. %llu (5%%) - %llu%%\n", i, acIterations[i], cItrMinOK, cItrDelta * 100 / cItrNormal); else if (cItrDelta > cItrNormal / 2) RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS, "Warning! Thread %u deviates by more than 50%% - %llu (it) vs. %llu (avg) - %llu%%\n", i, acIterations[i], cItrNormal, cItrDelta * 100 / cItrNormal); if (cItrDelta > cItrMaxDeviation) cItrMaxDeviation = cItrDelta; } //RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS, // "Threads: %u Total: %llu Per Sec: %llu Avg: %llu ns Max dev: %llu%%\n", // cThreads, // cItrTotal, // cItrTotal / cSeconds, // ElapsedNS / cItrTotal, // cItrMaxDeviation * 100 / cItrNormal // ); // RTTestValue(g_hTest, "Thruput", cItrTotal * UINT32_C(1000000000) / ElapsedNS, RTTESTUNIT_CALLS_PER_SEC); RTTestValue(g_hTest, "Max diviation", cItrMaxDeviation * 100 / cItrNormal, RTTESTUNIT_PCT); }
DECLEXPORT(EGLSurface) eglCreatePbufferSurface(EGLDisplay hDisplay, EGLConfig config, EGLint const *paAttributes) { Display *pDisplay = (Display *)hDisplay; enum { CPS_WIDTH = 0, CPS_HEIGHT, CPS_LARGEST, CPS_PRESERVED, CPS_TEX_FORMAT, CPS_TEX_TARGET, CPS_MIPMAP_TEX, CPS_END }; int acIndices[CPS_END]; int aAttributes[CPS_END * 2]; int cIndex = 0; unsigned i; GLXPbuffer hPbuffer; if (!VALID_PTR(hDisplay)) { setEGLError(EGL_NOT_INITIALIZED); return EGL_NO_SURFACE; } for (i = 0; i < RT_ELEMENTS(acIndices); ++i) acIndices[i] = -1; if (paAttributes != NULL) while (*paAttributes != EGL_NONE) { switch (*paAttributes) { case EGL_WIDTH: setAttribute(&acIndices[CPS_WIDTH], &cIndex, aAttributes, GLX_PBUFFER_WIDTH, paAttributes[1]); break; case EGL_HEIGHT: setAttribute(&acIndices[CPS_HEIGHT], &cIndex, aAttributes, GLX_LARGEST_PBUFFER, paAttributes[1]); break; case EGL_LARGEST_PBUFFER: setAttribute(&acIndices[CPS_LARGEST], &cIndex, aAttributes, GLX_PBUFFER_HEIGHT, paAttributes[1]); break; case EGL_BUFFER_PRESERVED: setAttribute(&acIndices[CPS_PRESERVED], &cIndex, aAttributes, GLX_PRESERVED_CONTENTS, paAttributes[1]); break; case EGL_TEXTURE_FORMAT: setAttribute(&acIndices[CPS_TEX_FORMAT], &cIndex, aAttributes, GLX_TEXTURE_FORMAT_EXT, paAttributes[1]); break; case EGL_TEXTURE_TARGET: setAttribute(&acIndices[CPS_TEX_TARGET], &cIndex, aAttributes, GLX_TEXTURE_TARGET_EXT, paAttributes[1]); break; case EGL_MIPMAP_TEXTURE: setAttribute(&acIndices[CPS_MIPMAP_TEX], &cIndex, aAttributes, GLX_MIPMAP_TEXTURE_EXT, paAttributes[1]); break; case EGL_VG_ALPHA_FORMAT: case EGL_VG_COLORSPACE: { setEGLError(EGL_BAD_MATCH); return EGL_NO_SURFACE; } } paAttributes += 2; } AssertRelease(cIndex < RT_ELEMENTS(aAttributes) - 1); aAttributes[cIndex + 1] = None; hPbuffer = glXCreatePbuffer(pDisplay, (GLXFBConfig)config, aAttributes); if (hPbuffer == None) { setEGLError(EGL_BAD_ALLOC); return EGL_NO_SURFACE; } AssertRelease(hPbuffer < VBEGL_WINDOW_SURFACE); /* Greater than the maximum XID. */ clearEGLError(); return (EGLSurface)(hPbuffer | VBEGL_PBUFFER_SURFACE); }
int main(int argc, char **argv) { RTR3InitExe(argc, &argv, 0); RTDIGESTTYPE enmDigestType = RTDIGESTTYPE_INVALID; const char *pszDigestType = "NotSpecified"; enum { kMethod_Full, kMethod_Block, kMethod_File, kMethod_CVAS } enmMethod = kMethod_Block; uint64_t offStart = 0; uint64_t cbMax = UINT64_MAX; bool fTestcase = false; static const RTGETOPTDEF s_aOptions[] = { { "--type", 't', RTGETOPT_REQ_STRING }, { "--method", 'm', RTGETOPT_REQ_STRING }, { "--help", 'h', RTGETOPT_REQ_NOTHING }, { "--length", 'l', RTGETOPT_REQ_UINT64 }, { "--offset", 'o', RTGETOPT_REQ_UINT64 }, { "--testcase", 'x', RTGETOPT_REQ_NOTHING }, }; int ch; RTGETOPTUNION ValueUnion; RTGETOPTSTATE GetState; RTGetOptInit(&GetState, argc, argv, s_aOptions, RT_ELEMENTS(s_aOptions), 1, RTGETOPTINIT_FLAGS_OPTS_FIRST); while ((ch = RTGetOpt(&GetState, &ValueUnion))) { switch (ch) { case 't': if (!RTStrICmp(ValueUnion.psz, "crc32")) { pszDigestType = "CRC32"; enmDigestType = RTDIGESTTYPE_CRC32; } else if (!RTStrICmp(ValueUnion.psz, "crc64")) { pszDigestType = "CRC64"; enmDigestType = RTDIGESTTYPE_CRC64; } else if (!RTStrICmp(ValueUnion.psz, "md2")) { pszDigestType = "MD2"; enmDigestType = RTDIGESTTYPE_MD2; } else if (!RTStrICmp(ValueUnion.psz, "md5")) { pszDigestType = "MD5"; enmDigestType = RTDIGESTTYPE_MD5; } else if (!RTStrICmp(ValueUnion.psz, "sha1")) { pszDigestType = "SHA-1"; enmDigestType = RTDIGESTTYPE_SHA1; } else if (!RTStrICmp(ValueUnion.psz, "sha224")) { pszDigestType = "SHA-224"; enmDigestType = RTDIGESTTYPE_SHA224; } else if (!RTStrICmp(ValueUnion.psz, "sha256")) { pszDigestType = "SHA-256"; enmDigestType = RTDIGESTTYPE_SHA256; } else if (!RTStrICmp(ValueUnion.psz, "sha384")) { pszDigestType = "SHA-384"; enmDigestType = RTDIGESTTYPE_SHA384; } else if (!RTStrICmp(ValueUnion.psz, "sha512")) { pszDigestType = "SHA-512"; enmDigestType = RTDIGESTTYPE_SHA512; } else if (!RTStrICmp(ValueUnion.psz, "sha512/224")) { pszDigestType = "SHA-512/224"; enmDigestType = RTDIGESTTYPE_SHA512T224; } else if (!RTStrICmp(ValueUnion.psz, "sha512/256")) { pszDigestType = "SHA-512/256"; enmDigestType = RTDIGESTTYPE_SHA512T256; } else { Error("Invalid digest type: %s\n", ValueUnion.psz); return 1; } break; case 'm': if (!RTStrICmp(ValueUnion.psz, "full")) enmMethod = kMethod_Full; else if (!RTStrICmp(ValueUnion.psz, "block")) enmMethod = kMethod_Block; else if (!RTStrICmp(ValueUnion.psz, "file")) enmMethod = kMethod_File; else if (!RTStrICmp(ValueUnion.psz, "cvas")) enmMethod = kMethod_CVAS; else { Error("Invalid digest method: %s\n", ValueUnion.psz); return 1; } break; case 'l': cbMax = ValueUnion.u64; break; case 'o': offStart = ValueUnion.u64; break; case 'x': fTestcase = true; break; case 'h': RTPrintf("usage: tstRTDigest -t <digest-type> [-o <offset>] [-l <length>] [-x] file [file2 [..]]\n"); return 1; case VINF_GETOPT_NOT_OPTION: { if (enmDigestType == RTDIGESTTYPE_INVALID) return Error("No digest type was specified\n"); switch (enmMethod) { case kMethod_Full: return Error("Full file method is not implemented\n"); case kMethod_File: if (offStart != 0 || cbMax != UINT64_MAX) return Error("The -l and -o options do not work with the 'file' method."); switch (enmDigestType) { case RTDIGESTTYPE_SHA1: { char *pszDigest; int rc = RTSha1DigestFromFile(ValueUnion.psz, &pszDigest, NULL, NULL); if (RT_FAILURE(rc)) return Error("RTSha1Digest(%s,) -> %Rrc\n", ValueUnion.psz, rc); RTPrintf("%s %s\n", pszDigest, ValueUnion.psz); RTStrFree(pszDigest); break; } case RTDIGESTTYPE_SHA256: { char *pszDigest; int rc = RTSha256DigestFromFile(ValueUnion.psz, &pszDigest, NULL, NULL); if (RT_FAILURE(rc)) return Error("RTSha256Digest(%s,) -> %Rrc\n", ValueUnion.psz, rc); RTPrintf("%s %s\n", pszDigest, ValueUnion.psz); RTStrFree(pszDigest); break; } default: return Error("The file method isn't implemented for this digest\n"); } break; case kMethod_Block: { RTFILE hFile; int rc = RTFileOpen(&hFile, ValueUnion.psz, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_WRITE); if (RT_FAILURE(rc)) return Error("RTFileOpen(,%s,) -> %Rrc\n", ValueUnion.psz, rc); if (offStart != 0) { rc = RTFileSeek(hFile, offStart, RTFILE_SEEK_BEGIN, NULL); if (RT_FAILURE(rc)) return Error("RTFileSeek(%s,%ull) -> %Rrc\n", ValueUnion.psz, offStart, rc); } uint64_t cbMaxLeft = cbMax; size_t cbRead; uint8_t abBuf[_64K]; char *pszDigest = (char *)&abBuf[0]; switch (enmDigestType) { case RTDIGESTTYPE_CRC32: { uint32_t uCRC32 = RTCrc32Start(); for (;;) { rc = MyReadFile(hFile, abBuf, sizeof(abBuf), &cbRead, &cbMaxLeft); if (RT_FAILURE(rc) || !cbRead) break; uCRC32 = RTCrc32Process(uCRC32, abBuf, cbRead); } uCRC32 = RTCrc32Finish(uCRC32); RTStrPrintf(pszDigest, sizeof(abBuf), "%08RX32", uCRC32); break; } case RTDIGESTTYPE_CRC64: { uint64_t uCRC64 = RTCrc64Start(); for (;;) { rc = MyReadFile(hFile, abBuf, sizeof(abBuf), &cbRead, &cbMaxLeft); if (RT_FAILURE(rc) || !cbRead) break; uCRC64 = RTCrc64Process(uCRC64, abBuf, cbRead); } uCRC64 = RTCrc64Finish(uCRC64); RTStrPrintf(pszDigest, sizeof(abBuf), "%016RX64", uCRC64); break; } case RTDIGESTTYPE_MD2: { RTMD2CONTEXT Ctx; RTMd2Init(&Ctx); for (;;) { rc = MyReadFile(hFile, abBuf, sizeof(abBuf), &cbRead, &cbMaxLeft); if (RT_FAILURE(rc) || !cbRead) break; RTMd2Update(&Ctx, abBuf, cbRead); } uint8_t abDigest[RTMD2_HASH_SIZE]; RTMd2Final(&Ctx, abDigest); RTMd2ToString(abDigest, pszDigest, sizeof(abBuf)); break; } case RTDIGESTTYPE_MD5: { RTMD5CONTEXT Ctx; RTMd5Init(&Ctx); for (;;) { rc = MyReadFile(hFile, abBuf, sizeof(abBuf), &cbRead, &cbMaxLeft); if (RT_FAILURE(rc) || !cbRead) break; RTMd5Update(&Ctx, abBuf, cbRead); } uint8_t abDigest[RTMD5HASHSIZE]; RTMd5Final(abDigest, &Ctx); RTMd5ToString(abDigest, pszDigest, sizeof(abBuf)); break; } case RTDIGESTTYPE_SHA1: { RTSHA1CONTEXT Ctx; RTSha1Init(&Ctx); for (;;) { rc = MyReadFile(hFile, abBuf, sizeof(abBuf), &cbRead, &cbMaxLeft); if (RT_FAILURE(rc) || !cbRead) break; RTSha1Update(&Ctx, abBuf, cbRead); } uint8_t abDigest[RTSHA1_HASH_SIZE]; RTSha1Final(&Ctx, abDigest); RTSha1ToString(abDigest, pszDigest, sizeof(abBuf)); break; } case RTDIGESTTYPE_SHA256: { RTSHA256CONTEXT Ctx; RTSha256Init(&Ctx); for (;;) { rc = MyReadFile(hFile, abBuf, sizeof(abBuf), &cbRead, &cbMaxLeft); if (RT_FAILURE(rc) || !cbRead) break; RTSha256Update(&Ctx, abBuf, cbRead); } uint8_t abDigest[RTSHA256_HASH_SIZE]; RTSha256Final(&Ctx, abDigest); RTSha256ToString(abDigest, pszDigest, sizeof(abBuf)); break; } case RTDIGESTTYPE_SHA512: { RTSHA512CONTEXT Ctx; RTSha512Init(&Ctx); for (;;) { rc = MyReadFile(hFile, abBuf, sizeof(abBuf), &cbRead, &cbMaxLeft); if (RT_FAILURE(rc) || !cbRead) break; RTSha512Update(&Ctx, abBuf, cbRead); } uint8_t abDigest[RTSHA512_HASH_SIZE]; RTSha512Final(&Ctx, abDigest); RTSha512ToString(abDigest, pszDigest, sizeof(abBuf)); break; } default: return Error("Internal error #1\n"); } RTFileClose(hFile); if (RT_FAILURE(rc) && rc != VERR_EOF) { RTPrintf("Partial: %s %s\n", pszDigest, ValueUnion.psz); return Error("RTFileRead(%s) -> %Rrc\n", ValueUnion.psz, rc); } if (!fTestcase) RTPrintf("%s %s\n", pszDigest, ValueUnion.psz); else if (offStart) RTPrintf(" { &g_abRandom72KB[%#4llx], %5llu, \"%s\", \"%s %llu bytes @%llu\" },\n", offStart, cbMax - cbMaxLeft, pszDigest, pszDigestType, offStart, cbMax - cbMaxLeft); else RTPrintf(" { &g_abRandom72KB[0], %5llu, \"%s\", \"%s %llu bytes\" },\n", cbMax - cbMaxLeft, pszDigest, pszDigestType, cbMax - cbMaxLeft); break; } /* * Process a SHS response file: * http://csrc.nist.gov/groups/STM/cavp/index.html#03 */ case kMethod_CVAS: { RTCRDIGEST hDigest; int rc = RTCrDigestCreateByType(&hDigest, enmDigestType); if (RT_FAILURE(rc)) return Error("Failed to create digest calculator for %s: %Rrc", pszDigestType, rc); uint32_t const cbDigest = RTCrDigestGetHashSize(hDigest); if (!cbDigest || cbDigest >= _1K) return Error("Unexpected hash size: %#x\n", cbDigest); PRTSTREAM pFile; rc = RTStrmOpen(ValueUnion.psz, "r", &pFile); if (RT_FAILURE(rc)) return Error("Failed to open CVAS file '%s': %Rrc", ValueUnion.psz, rc); /* * Parse the input file. * ASSUME order: Len, Msg, MD. */ static char s_szLine[_256K]; char *psz; uint32_t cPassed = 0; uint32_t cErrors = 0; uint32_t iLine = 1; for (;;) { psz = MyGetNextSignificantLine(pFile, s_szLine, sizeof(s_szLine), &iLine, &rc); if (!psz) break; /* Skip [L = 20] stuff. */ if (*psz == '[') continue; /* Message length. */ uint64_t cMessageBits; if (RTStrNICmp(psz, RT_STR_TUPLE("Len ="))) return Error("%s(%d): Expected 'Len =' found '%.10s...'", ValueUnion.psz, iLine, psz); psz = RTStrStripL(psz + 5); rc = RTStrToUInt64Full(psz, 0, &cMessageBits); if (rc != VINF_SUCCESS) return Error("%s(%d): Error parsing length '%s': %Rrc\n", ValueUnion.psz, iLine, psz, rc); /* The message text. */ psz = MyGetNextSignificantLine(pFile, s_szLine, sizeof(s_szLine), &iLine, &rc); if (!psz) return Error("%s(%d): Expected message text not EOF.", ValueUnion.psz, iLine); if (RTStrNICmp(psz, RT_STR_TUPLE("Msg ="))) return Error("%s(%d): Expected 'Msg =' found '%.10s...'", ValueUnion.psz, iLine, psz); psz = RTStrStripL(psz + 5); size_t const cbMessage = (cMessageBits + 7) / 8; static uint8_t s_abMessage[sizeof(s_szLine) / 2]; if (cbMessage > 0) { rc = RTStrConvertHexBytes(psz, s_abMessage, cbMessage, 0 /*fFlags*/); if (rc != VINF_SUCCESS) return Error("%s(%d): Error parsing message '%.10s...': %Rrc\n", ValueUnion.psz, iLine, psz, rc); } /* The message digest. */ psz = MyGetNextSignificantLine(pFile, s_szLine, sizeof(s_szLine), &iLine, &rc); if (!psz) return Error("%s(%d): Expected message digest not EOF.", ValueUnion.psz, iLine); if (RTStrNICmp(psz, RT_STR_TUPLE("MD ="))) return Error("%s(%d): Expected 'MD =' found '%.10s...'", ValueUnion.psz, iLine, psz); psz = RTStrStripL(psz + 4); static uint8_t s_abExpectedDigest[_1K]; rc = RTStrConvertHexBytes(psz, s_abExpectedDigest, cbDigest, 0 /*fFlags*/); if (rc != VINF_SUCCESS) return Error("%s(%d): Error parsing message digest '%.10s...': %Rrc\n", ValueUnion.psz, iLine, psz, rc); /* * Do the testing. */ rc = RTCrDigestReset(hDigest); if (rc != VINF_SUCCESS) return Error("RTCrDigestReset failed: %Rrc", rc); rc = RTCrDigestUpdate(hDigest, s_abMessage, cbMessage); if (rc != VINF_SUCCESS) return Error("RTCrDigestUpdate failed: %Rrc", rc); static uint8_t s_abActualDigest[_1K]; rc = RTCrDigestFinal(hDigest, s_abActualDigest, cbDigest); if (rc != VINF_SUCCESS) return Error("RTCrDigestFinal failed: %Rrc", rc); if (memcmp(s_abActualDigest, s_abExpectedDigest, cbDigest) == 0) cPassed++; else { Error("%s(%d): Message digest mismatch. Expected %.*RThxs, got %.*RThxs.", ValueUnion.psz, iLine, cbDigest, s_abExpectedDigest, cbDigest, s_abActualDigest); cErrors++; } } RTStrmClose(pFile); if (cErrors > 0) return Error("Failed: %u error%s (%u passed)", cErrors, cErrors == 1 ? "" : "s", cPassed); RTPrintf("Passed %u test%s.\n", cPassed, cPassed == 1 ? "" : "s"); if (RT_FAILURE(rc)) return Error("Failed: %Rrc", rc); break; } default: return Error("Internal error #2\n"); } break; } default: return RTGetOptPrintError(ch, &ValueUnion); } } return 0; }
int main(int argc, char **argv) { RTR3InitExe(argc, &argv, RTR3INIT_FLAGS_SUPLIB); /* * Options are mandatory. */ if (argc <= 1) return usage(); /* * Parse the options. */ static const RTGETOPTDEF s_aOptions[] = { { "--flags", 'f', RTGETOPT_REQ_STRING }, { "--groups", 'g', RTGETOPT_REQ_STRING }, { "--dest", 'd', RTGETOPT_REQ_STRING }, { "--what", 'o', RTGETOPT_REQ_STRING }, { "--which", 'l', RTGETOPT_REQ_STRING }, }; const char *pszFlags = ""; const char *pszGroups = ""; const char *pszDest = ""; SUPLOGGER enmWhich = SUPLOGGER_DEBUG; enum { kSupLoggerCtl_Set, kSupLoggerCtl_Create, kSupLoggerCtl_Destroy } enmWhat = kSupLoggerCtl_Set; int ch; int i = 1; RTGETOPTUNION Val; RTGETOPTSTATE GetState; RTGetOptInit(&GetState, argc, argv, s_aOptions, RT_ELEMENTS(s_aOptions), 1, 0); while ((ch = RTGetOpt(&GetState, &Val))) { switch (ch) { case 'f': pszFlags = Val.psz; break; case 'g': pszGroups = Val.psz; break; case 'd': pszDest = Val.psz; break; case 'o': if (!strcmp(Val.psz, "set")) enmWhat = kSupLoggerCtl_Set; else if (!strcmp(Val.psz, "create")) enmWhat = kSupLoggerCtl_Create; else if (!strcmp(Val.psz, "destroy")) enmWhat = kSupLoggerCtl_Destroy; else { RTStrmPrintf(g_pStdErr, "SUPLoggerCtl: error: Unknown operation '%s'.\n", Val.psz); return 1; } break; case 'l': if (!strcmp(Val.psz, "debug")) enmWhich = SUPLOGGER_DEBUG; else if (!strcmp(Val.psz, "release")) enmWhich = SUPLOGGER_RELEASE; else { RTStrmPrintf(g_pStdErr, "SUPLoggerCtl: error: Unknown logger '%s'.\n", Val.psz); return 1; } break; case 'h': return usage(); case 'V': RTPrintf("%sr%s\n", RTBldCfgVersion(), RTBldCfgRevisionStr()); return 0; case VINF_GETOPT_NOT_OPTION: RTStrmPrintf(g_pStdErr, "SUPLoggerCtl: error: Unexpected argument '%s'.\n", Val.psz); return 1; default: return RTGetOptPrintError(ch, &Val); } } /* * Do the requested job. */ int rc; switch (enmWhat) { case kSupLoggerCtl_Set: rc = SUPR3LoggerSettings(enmWhich, pszFlags, pszGroups, pszDest); break; case kSupLoggerCtl_Create: rc = SUPR3LoggerCreate(enmWhich, pszFlags, pszGroups, pszDest); break; case kSupLoggerCtl_Destroy: rc = SUPR3LoggerDestroy(enmWhich); break; default: rc = VERR_INTERNAL_ERROR; break; } if (RT_SUCCESS(rc)) RTPrintf("SUPLoggerCtl: Success\n"); else RTStrmPrintf(g_pStdErr, "SUPLoggerCtl: error: rc=%Rrc\n", rc); return RT_SUCCESS(rc) ? 0 : 1; }
/** * Gets the guest-CPU context suitable for dumping into the core file. * * @param pVM Pointer to the VM. * @param pCtx Pointer to the guest-CPU context. * @param pDbgfCpu Where to dump the guest-CPU data. */ static void dbgfR3GetCoreCpu(PVM pVM, PCPUMCTX pCtx, PDBGFCORECPU pDbgfCpu) { #define DBGFCOPYSEL(a_dbgfsel, a_cpumselreg) \ do { \ (a_dbgfsel).uBase = (a_cpumselreg).u64Base; \ (a_dbgfsel).uLimit = (a_cpumselreg).u32Limit; \ (a_dbgfsel).uAttr = (a_cpumselreg).Attr.u; \ (a_dbgfsel).uSel = (a_cpumselreg).Sel; \ } while (0) pDbgfCpu->rax = pCtx->rax; pDbgfCpu->rbx = pCtx->rbx; pDbgfCpu->rcx = pCtx->rcx; pDbgfCpu->rdx = pCtx->rdx; pDbgfCpu->rsi = pCtx->rsi; pDbgfCpu->rdi = pCtx->rdi; pDbgfCpu->r8 = pCtx->r8; pDbgfCpu->r9 = pCtx->r9; pDbgfCpu->r10 = pCtx->r10; pDbgfCpu->r11 = pCtx->r11; pDbgfCpu->r12 = pCtx->r12; pDbgfCpu->r13 = pCtx->r13; pDbgfCpu->r14 = pCtx->r14; pDbgfCpu->r15 = pCtx->r15; pDbgfCpu->rip = pCtx->rip; pDbgfCpu->rsp = pCtx->rsp; pDbgfCpu->rbp = pCtx->rbp; DBGFCOPYSEL(pDbgfCpu->cs, pCtx->cs); DBGFCOPYSEL(pDbgfCpu->ds, pCtx->ds); DBGFCOPYSEL(pDbgfCpu->es, pCtx->es); DBGFCOPYSEL(pDbgfCpu->fs, pCtx->fs); DBGFCOPYSEL(pDbgfCpu->gs, pCtx->gs); DBGFCOPYSEL(pDbgfCpu->ss, pCtx->ss); pDbgfCpu->cr0 = pCtx->cr0; pDbgfCpu->cr2 = pCtx->cr2; pDbgfCpu->cr3 = pCtx->cr3; pDbgfCpu->cr4 = pCtx->cr4; AssertCompile(RT_ELEMENTS(pDbgfCpu->dr) == RT_ELEMENTS(pCtx->dr)); for (unsigned i = 0; i < RT_ELEMENTS(pDbgfCpu->dr); i++) pDbgfCpu->dr[i] = pCtx->dr[i]; pDbgfCpu->gdtr.uAddr = pCtx->gdtr.pGdt; pDbgfCpu->gdtr.cb = pCtx->gdtr.cbGdt; pDbgfCpu->idtr.uAddr = pCtx->idtr.pIdt; pDbgfCpu->idtr.cb = pCtx->idtr.cbIdt; DBGFCOPYSEL(pDbgfCpu->ldtr, pCtx->ldtr); DBGFCOPYSEL(pDbgfCpu->tr, pCtx->tr); pDbgfCpu->sysenter.cs = pCtx->SysEnter.cs; pDbgfCpu->sysenter.eip = pCtx->SysEnter.eip; pDbgfCpu->sysenter.esp = pCtx->SysEnter.esp; pDbgfCpu->msrEFER = pCtx->msrEFER; pDbgfCpu->msrSTAR = pCtx->msrSTAR; pDbgfCpu->msrPAT = pCtx->msrPAT; pDbgfCpu->msrLSTAR = pCtx->msrLSTAR; pDbgfCpu->msrCSTAR = pCtx->msrCSTAR; pDbgfCpu->msrSFMASK = pCtx->msrSFMASK; pDbgfCpu->msrKernelGSBase = pCtx->msrKERNELGSBASE; pDbgfCpu->msrApicBase = pCtx->msrApicBase; pDbgfCpu->aXcr[0] = pCtx->aXcr[0]; pDbgfCpu->aXcr[1] = pCtx->aXcr[1]; AssertCompile(sizeof(pDbgfCpu->ext) == sizeof(*pCtx->pXStateR3)); pDbgfCpu->cbExt = pVM->cpum.ro.GuestFeatures.cbMaxExtendedState; if (RT_LIKELY(pDbgfCpu->cbExt)) memcpy(&pDbgfCpu->ext, pCtx->pXStateR3, pDbgfCpu->cbExt); #undef DBGFCOPYSEL }
/** * Initializes the KVM GIM provider. * * @returns VBox status code. * @param pVM Pointer to the VM. * @param uVersion The interface version this VM should use. */ VMMR3_INT_DECL(int) gimR3KvmInit(PVM pVM) { AssertReturn(pVM, VERR_INVALID_PARAMETER); AssertReturn(pVM->gim.s.enmProviderId == GIMPROVIDERID_KVM, VERR_INTERNAL_ERROR_5); int rc; PGIMKVM pKvm = &pVM->gim.s.u.Kvm; /* * Determine interface capabilities based on the version. */ if (!pVM->gim.s.u32Version) { /* Basic features. */ pKvm->uBaseFeat = 0 | GIM_KVM_BASE_FEAT_CLOCK_OLD //| GIM_KVM_BASE_FEAT_NOP_IO_DELAY //| GIM_KVM_BASE_FEAT_MMU_OP | GIM_KVM_BASE_FEAT_CLOCK //| GIM_KVM_BASE_FEAT_ASYNC_PF //| GIM_KVM_BASE_FEAT_STEAL_TIME //| GIM_KVM_BASE_FEAT_PV_EOI | GIM_KVM_BASE_FEAT_PV_UNHALT ; /* Rest of the features are determined in gimR3KvmInitCompleted(). */ } /* * Expose HVP (Hypervisor Present) bit to the guest. */ CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_HVP); /* * Modify the standard hypervisor leaves for KVM. */ CPUMCPUIDLEAF HyperLeaf; RT_ZERO(HyperLeaf); HyperLeaf.uLeaf = UINT32_C(0x40000000); HyperLeaf.uEax = UINT32_C(0x40000001); /* Minimum value for KVM is 0x40000001. */ HyperLeaf.uEbx = 0x4B4D564B; /* 'KVMK' */ HyperLeaf.uEcx = 0x564B4D56; /* 'VMKV' */ HyperLeaf.uEdx = 0x0000004D; /* 'M000' */ rc = CPUMR3CpuIdInsert(pVM, &HyperLeaf); AssertLogRelRCReturn(rc, rc); /* * Add KVM specific leaves. */ HyperLeaf.uLeaf = UINT32_C(0x40000001); HyperLeaf.uEax = pKvm->uBaseFeat; HyperLeaf.uEbx = 0; /* Reserved */ HyperLeaf.uEcx = 0; /* Reserved */ HyperLeaf.uEdx = 0; /* Reserved */ rc = CPUMR3CpuIdInsert(pVM, &HyperLeaf); AssertLogRelRCReturn(rc, rc); /* * Insert all MSR ranges of KVM. */ for (unsigned i = 0; i < RT_ELEMENTS(g_aMsrRanges_Kvm); i++) { rc = CPUMR3MsrRangesInsert(pVM, &g_aMsrRanges_Kvm[i]); AssertLogRelRCReturn(rc, rc); } /* * Setup hypercall and #UD handling. */ for (VMCPUID i = 0; i < pVM->cCpus; i++) VMMHypercallsEnable(&pVM->aCpus[i]); if (ASMIsAmdCpu()) { pKvm->fTrapXcptUD = true; pKvm->uOpCodeNative = OP_VMMCALL; } else { Assert(ASMIsIntelCpu() || ASMIsViaCentaurCpu()); pKvm->fTrapXcptUD = false; pKvm->uOpCodeNative = OP_VMCALL; } /* We always need to trap VMCALL/VMMCALL hypercall using #UDs for raw-mode VMs. */ if (!HMIsEnabled(pVM)) pKvm->fTrapXcptUD = true; return VINF_SUCCESS; }
/** * Initializes the COM runtime. * * This method must be called on each thread of the client application that * wants to access COM facilities. The initialization must be performed before * calling any other COM method or attempting to instantiate COM objects. * * On platforms using XPCOM, this method uses the following scheme to search for * XPCOM runtime: * * 1. If the VBOX_APP_HOME environment variable is set, the path it specifies * is used to search XPCOM libraries and components. If this method fails to * initialize XPCOM runtime using this path, it will immediately return a * failure and will NOT check for other paths as described below. * * 2. If VBOX_APP_HOME is not set, this methods tries the following paths in the * given order: * * a) Compiled-in application data directory (as returned by * RTPathAppPrivateArch()) * b) "/usr/lib/virtualbox" (Linux only) * c) "/opt/VirtualBox" (Linux only) * * The first path for which the initialization succeeds will be used. * * On MS COM platforms, the COM runtime is provided by the system and does not * need to be searched for. * * Once the COM subsystem is no longer necessary on a given thread, Shutdown() * must be called to free resources allocated for it. Note that a thread may * call Initialize() several times but for each of tese calls there must be a * corresponding Shutdown() call. * * @return S_OK on success and a COM result code in case of failure. */ HRESULT Initialize(bool fGui) { HRESULT rc = E_FAIL; #if !defined(VBOX_WITH_XPCOM) /* * We initialize COM in GUI thread in STA, to be compliant with QT and * OLE requirments (for example to allow D&D), while other threads * initialized in regular MTA. To allow fast proxyless access from * GUI thread to COM objects, we explicitly provide our COM objects * with free threaded marshaller. * !!!!! Please think twice before touching this code !!!!! */ DWORD flags = fGui ? COINIT_APARTMENTTHREADED | COINIT_SPEED_OVER_MEMORY : COINIT_MULTITHREADED | COINIT_DISABLE_OLE1DDE | COINIT_SPEED_OVER_MEMORY; rc = CoInitializeEx(NULL, flags); /* the overall result must be either S_OK or S_FALSE (S_FALSE means * "already initialized using the same apartment model") */ AssertMsg(rc == S_OK || rc == S_FALSE, ("rc=%08X\n", rc)); /* To be flow compatible with the XPCOM case, we return here if this isn't * the main thread or if it isn't its first initialization call. * Note! CoInitializeEx and CoUninitialize does it's own reference * counting, so this exercise is entirely for the EventQueue init. */ bool fRc; RTTHREAD hSelf = RTThreadSelf(); if (hSelf != NIL_RTTHREAD) ASMAtomicCmpXchgHandle(&gCOMMainThread, hSelf, NIL_RTTHREAD, fRc); else fRc = false; if (fGui) Assert(RTThreadIsMain(hSelf)); if (!fRc) { if ( gCOMMainThread == hSelf && SUCCEEDED(rc)) gCOMMainInitCount++; AssertComRC(rc); return rc; } Assert(RTThreadIsMain(hSelf)); /* this is the first main thread initialization */ Assert(gCOMMainInitCount == 0); if (SUCCEEDED(rc)) gCOMMainInitCount = 1; #else /* !defined (VBOX_WITH_XPCOM) */ /* Unused here */ NOREF(fGui); if (ASMAtomicXchgBool(&gIsXPCOMInitialized, true) == true) { /* XPCOM is already initialized on the main thread, no special * initialization is necessary on additional threads. Just increase * the init counter if it's a main thread again (to correctly support * nested calls to Initialize()/Shutdown() for compatibility with * Win32). */ nsCOMPtr<nsIEventQueue> eventQ; rc = NS_GetMainEventQ(getter_AddRefs(eventQ)); if (NS_SUCCEEDED(rc)) { PRBool isOnMainThread = PR_FALSE; rc = eventQ->IsOnCurrentThread(&isOnMainThread); if (NS_SUCCEEDED(rc) && isOnMainThread) ++gXPCOMInitCount; } AssertComRC(rc); return rc; } Assert(RTThreadIsMain(RTThreadSelf())); /* this is the first initialization */ gXPCOMInitCount = 1; bool const fInitEventQueues = true; /* prepare paths for registry files */ char szCompReg[RTPATH_MAX]; char szXptiDat[RTPATH_MAX]; int vrc = GetVBoxUserHomeDirectory(szCompReg, sizeof(szCompReg)); AssertRCReturn(vrc, NS_ERROR_FAILURE); strcpy(szXptiDat, szCompReg); vrc = RTPathAppend(szCompReg, sizeof(szCompReg), "compreg.dat"); AssertRCReturn(vrc, NS_ERROR_FAILURE); vrc = RTPathAppend(szXptiDat, sizeof(szXptiDat), "xpti.dat"); AssertRCReturn(vrc, NS_ERROR_FAILURE); LogFlowFunc(("component registry : \"%s\"\n", szCompReg)); LogFlowFunc(("XPTI data file : \"%s\"\n", szXptiDat)); #if defined (XPCOM_GLUE) XPCOMGlueStartup(nsnull); #endif static const char *kAppPathsToProbe[] = { NULL, /* 0: will use VBOX_APP_HOME */ NULL, /* 1: will try RTPathAppPrivateArch() */ #ifdef RT_OS_LINUX "/usr/lib/virtualbox", "/opt/VirtualBox", #elif RT_OS_SOLARIS "/opt/VirtualBox/amd64", "/opt/VirtualBox/i386", #elif RT_OS_DARWIN "/Application/VirtualBox.app/Contents/MacOS", #endif }; /* Find out the directory where VirtualBox binaries are located */ for (size_t i = 0; i < RT_ELEMENTS(kAppPathsToProbe); ++ i) { char szAppHomeDir[RTPATH_MAX]; if (i == 0) { /* Use VBOX_APP_HOME if present */ vrc = RTEnvGetEx(RTENV_DEFAULT, "VBOX_APP_HOME", szAppHomeDir, sizeof(szAppHomeDir), NULL); if (vrc == VERR_ENV_VAR_NOT_FOUND) continue; AssertRC(vrc); } else if (i == 1) { /* Use RTPathAppPrivateArch() first */ vrc = RTPathAppPrivateArch(szAppHomeDir, sizeof(szAppHomeDir)); AssertRC(vrc); } else { /* Iterate over all other paths */ szAppHomeDir[RTPATH_MAX - 1] = '\0'; strncpy(szAppHomeDir, kAppPathsToProbe[i], RTPATH_MAX - 1); vrc = VINF_SUCCESS; } if (RT_FAILURE(vrc)) { rc = NS_ERROR_FAILURE; continue; } char szCompDir[RTPATH_MAX]; vrc = RTPathAppend(strcpy(szCompDir, szAppHomeDir), sizeof(szCompDir), "components"); if (RT_FAILURE(vrc)) { rc = NS_ERROR_FAILURE; continue; } LogFlowFunc(("component directory : \"%s\"\n", szCompDir)); nsCOMPtr<DirectoryServiceProvider> dsProv; dsProv = new DirectoryServiceProvider(); if (dsProv) rc = dsProv->init(szCompReg, szXptiDat, szCompDir, szAppHomeDir); else rc = NS_ERROR_OUT_OF_MEMORY; if (NS_FAILED(rc)) break; /* Setup the application path for NS_InitXPCOM2. Note that we properly * answer the NS_XPCOM_CURRENT_PROCESS_DIR query in our directory * service provider but it seems to be activated after the directory * service is used for the first time (see the source NS_InitXPCOM2). So * use the same value here to be on the safe side. */ nsCOMPtr <nsIFile> appDir; { char *appDirCP = NULL; vrc = RTStrUtf8ToCurrentCP(&appDirCP, szAppHomeDir); if (RT_SUCCESS(vrc)) { nsCOMPtr<nsILocalFile> file; rc = NS_NewNativeLocalFile(nsEmbedCString(appDirCP), PR_FALSE, getter_AddRefs(file)); if (NS_SUCCEEDED(rc)) appDir = do_QueryInterface(file, &rc); RTStrFree(appDirCP); } else rc = NS_ERROR_FAILURE; } if (NS_FAILED(rc)) break; /* Set VBOX_XPCOM_HOME to the same app path to make XPCOM sources that * still use it instead of the directory service happy */ vrc = RTEnvSetEx(RTENV_DEFAULT, "VBOX_XPCOM_HOME", szAppHomeDir); AssertRC(vrc); /* Finally, initialize XPCOM */ { nsCOMPtr<nsIServiceManager> serviceManager; rc = NS_InitXPCOM2(getter_AddRefs(serviceManager), appDir, dsProv); if (NS_SUCCEEDED(rc)) { nsCOMPtr<nsIComponentRegistrar> registrar = do_QueryInterface(serviceManager, &rc); if (NS_SUCCEEDED(rc)) { rc = registrar->AutoRegister(nsnull); if (NS_SUCCEEDED(rc)) { /* We succeeded, stop probing paths */ LogFlowFunc(("Succeeded.\n")); break; } } } } /* clean up before the new try */ rc = NS_ShutdownXPCOM(nsnull); if (i == 0) { /* We failed with VBOX_APP_HOME, don't probe other paths */ break; } } #endif /* !defined (VBOX_WITH_XPCOM) */ // for both COM and XPCOM, we only get here if this is the main thread; // only then initialize the autolock system (AutoLock.cpp) Assert(RTThreadIsMain(RTThreadSelf())); util::InitAutoLockSystem(); AssertComRC(rc); /* * Init the main event queue (ASSUMES it cannot fail). */ if (SUCCEEDED(rc)) EventQueue::init(); return rc; }
static status_t VBoxGuestHaikuAttach(const pci_info *pDevice) { status_t status; int rc = VINF_SUCCESS; int iResId = 0; struct VBoxGuestDeviceState *pState = &sState; static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES; PRTLOGGER pRelLogger; cUsers = 0; /* * Initialize IPRT R0 driver, which internally calls OS-specific r0 init. */ rc = RTR0Init(0); if (RT_FAILURE(rc)) { LogFunc(("RTR0Init failed.\n")); return ENXIO; } #ifdef DO_LOG /* * Create the release log. * (We do that here instead of common code because we want to log * early failures using the LogRel macro.) */ rc = RTLogCreate(&pRelLogger, 0|RTLOGFLAGS_PREFIX_THREAD /* fFlags */, "all", "VBOX_RELEASE_LOG", RT_ELEMENTS(s_apszGroups), s_apszGroups, RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER | RTLOGDEST_USER, NULL); dprintf(MODULE_NAME ": RTLogCreate: %d\n", rc); if (RT_SUCCESS(rc)) { //RTLogGroupSettings(pRelLogger, g_szLogGrp); //RTLogFlags(pRelLogger, g_szLogFlags); //RTLogDestinations(pRelLogger, "/var/log/vboxguest.log"); RTLogRelSetDefaultInstance(pRelLogger); RTLogSetDefaultInstance(pRelLogger);//XXX } #endif Log((MODULE_NAME ": plip!\n")); LogAlways((MODULE_NAME ": plop!\n")); /* * Allocate I/O port resource. */ pState->uIOPortBase = pDevice->u.h0.base_registers[0]; //XXX check flags for IO ? if (pState->uIOPortBase) { /* * Map the MMIO region. */ uint32 phys = pDevice->u.h0.base_registers[1]; //XXX check flags for mem ? pState->VMMDevMemSize = pDevice->u.h0.base_register_sizes[1]; pState->iVMMDevMemAreaId = map_physical_memory("VirtualBox Guest MMIO", phys, pState->VMMDevMemSize, B_ANY_KERNEL_BLOCK_ADDRESS, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, &pState->pMMIOBase); if (pState->iVMMDevMemAreaId > 0 && pState->pMMIOBase) { /* * Call the common device extension initializer. */ rc = VBoxGuestInitDevExt(&g_DevExt, pState->uIOPortBase, pState->pMMIOBase, pState->VMMDevMemSize, #if ARCH_BITS == 64 VBOXOSTYPE_Haiku_x64, #else VBOXOSTYPE_Haiku, #endif VMMDEV_EVENT_MOUSE_POSITION_CHANGED); if (RT_SUCCESS(rc)) { /* * Add IRQ of VMMDev. */ pState->iIrqResId = pDevice->u.h0.interrupt_line; rc = VBoxGuestHaikuAddIRQ(pState); if (RT_SUCCESS(rc)) { dprintf(MODULE_NAME ": loaded successfully\n"); return 0; } else dprintf((MODULE_NAME ":VBoxGuestInitDevExt failed.\n")); VBoxGuestDeleteDevExt(&g_DevExt); } else dprintf((MODULE_NAME ":VBoxGuestHaikuAddIRQ failed.\n")); } else dprintf((MODULE_NAME ":MMIO region setup failed.\n")); } else dprintf((MODULE_NAME ":IOport setup failed.\n")); RTR0Term(); return ENXIO; }
RTDECL(int) RTDirReadEx(PRTDIR pDir, PRTDIRENTRYEX pDirEntry, size_t *pcbDirEntry, RTFSOBJATTRADD enmAdditionalAttribs, uint32_t fFlags) { /** @todo Symlinks: Find[First|Next]FileW will return info about the link, so RTPATH_F_FOLLOW_LINK is not handled correctly. */ /* * Validate input. */ if (!pDir || pDir->u32Magic != RTDIR_MAGIC) { AssertMsgFailed(("Invalid pDir=%p\n", pDir)); return VERR_INVALID_PARAMETER; } if (!pDirEntry) { AssertMsgFailed(("Invalid pDirEntry=%p\n", pDirEntry)); return VERR_INVALID_PARAMETER; } if ( enmAdditionalAttribs < RTFSOBJATTRADD_NOTHING || enmAdditionalAttribs > RTFSOBJATTRADD_LAST) { AssertMsgFailed(("Invalid enmAdditionalAttribs=%p\n", enmAdditionalAttribs)); return VERR_INVALID_PARAMETER; } AssertMsgReturn(RTPATH_F_IS_VALID(fFlags, 0), ("%#x\n", fFlags), VERR_INVALID_PARAMETER); size_t cbDirEntry = sizeof(*pDirEntry); if (pcbDirEntry) { cbDirEntry = *pcbDirEntry; if (cbDirEntry < RT_UOFFSETOF(RTDIRENTRYEX, szName[2])) { AssertMsgFailed(("Invalid *pcbDirEntry=%d (min %d)\n", *pcbDirEntry, RT_OFFSETOF(RTDIRENTRYEX, szName[2]))); return VERR_INVALID_PARAMETER; } } /* * Fetch data? */ if (!pDir->fDataUnread) { RTStrFree(pDir->pszName); pDir->pszName = NULL; BOOL fRc = FindNextFileW(pDir->hDir, &pDir->Data); if (!fRc) { int iErr = GetLastError(); if (pDir->hDir == INVALID_HANDLE_VALUE || iErr == ERROR_NO_MORE_FILES) return VERR_NO_MORE_FILES; return RTErrConvertFromWin32(iErr); } } /* * Convert the filename to UTF-8. */ if (!pDir->pszName) { int rc = RTUtf16ToUtf8((PCRTUTF16)pDir->Data.cFileName, &pDir->pszName); if (RT_FAILURE(rc)) { pDir->pszName = NULL; return rc; } pDir->cchName = strlen(pDir->pszName); } /* * Check if we've got enough space to return the data. */ const char *pszName = pDir->pszName; const size_t cchName = pDir->cchName; const size_t cbRequired = RT_OFFSETOF(RTDIRENTRYEX, szName[1]) + cchName; if (pcbDirEntry) *pcbDirEntry = cbRequired; if (cbRequired > cbDirEntry) return VERR_BUFFER_OVERFLOW; /* * Setup the returned data. */ pDir->fDataUnread = false; pDirEntry->cbName = (uint16_t)cchName; Assert(pDirEntry->cbName == cchName); memcpy(pDirEntry->szName, pszName, cchName + 1); if (pDir->Data.cAlternateFileName[0]) { /* copy and calc length */ PCRTUTF16 pwszSrc = (PCRTUTF16)pDir->Data.cAlternateFileName; PRTUTF16 pwszDst = pDirEntry->wszShortName; uint32_t off = 0; while (pwszSrc[off] && off < RT_ELEMENTS(pDirEntry->wszShortName) - 1U) { pwszDst[off] = pwszSrc[off]; off++; } pDirEntry->cwcShortName = (uint16_t)off; /* zero the rest */ do pwszDst[off++] = '\0'; while (off < RT_ELEMENTS(pDirEntry->wszShortName)); } else { memset(pDirEntry->wszShortName, 0, sizeof(pDirEntry->wszShortName)); pDirEntry->cwcShortName = 0; } pDirEntry->Info.cbObject = ((uint64_t)pDir->Data.nFileSizeHigh << 32) | (uint64_t)pDir->Data.nFileSizeLow; pDirEntry->Info.cbAllocated = pDirEntry->Info.cbObject; Assert(sizeof(uint64_t) == sizeof(pDir->Data.ftCreationTime)); RTTimeSpecSetNtTime(&pDirEntry->Info.BirthTime, *(uint64_t *)&pDir->Data.ftCreationTime); RTTimeSpecSetNtTime(&pDirEntry->Info.AccessTime, *(uint64_t *)&pDir->Data.ftLastAccessTime); RTTimeSpecSetNtTime(&pDirEntry->Info.ModificationTime, *(uint64_t *)&pDir->Data.ftLastWriteTime); pDirEntry->Info.ChangeTime = pDirEntry->Info.ModificationTime; pDirEntry->Info.Attr.fMode = rtFsModeFromDos((pDir->Data.dwFileAttributes << RTFS_DOS_SHIFT) & RTFS_DOS_MASK_NT, pszName, cchName); /* * Requested attributes (we cannot provide anything actually). */ switch (enmAdditionalAttribs) { case RTFSOBJATTRADD_EASIZE: pDirEntry->Info.Attr.enmAdditional = RTFSOBJATTRADD_EASIZE; pDirEntry->Info.Attr.u.EASize.cb = 0; break; case RTFSOBJATTRADD_UNIX: pDirEntry->Info.Attr.enmAdditional = RTFSOBJATTRADD_UNIX; pDirEntry->Info.Attr.u.Unix.uid = ~0U; pDirEntry->Info.Attr.u.Unix.gid = ~0U; pDirEntry->Info.Attr.u.Unix.cHardlinks = 1; pDirEntry->Info.Attr.u.Unix.INodeIdDevice = 0; /** @todo Use the volume serial number (see GetFileInformationByHandle). */ pDirEntry->Info.Attr.u.Unix.INodeId = 0; /** @todo Use the fileid (see GetFileInformationByHandle). */ pDirEntry->Info.Attr.u.Unix.fFlags = 0; pDirEntry->Info.Attr.u.Unix.GenerationId = 0; pDirEntry->Info.Attr.u.Unix.Device = 0; break; case RTFSOBJATTRADD_NOTHING: pDirEntry->Info.Attr.enmAdditional = RTFSOBJATTRADD_NOTHING; break; case RTFSOBJATTRADD_UNIX_OWNER: pDirEntry->Info.Attr.enmAdditional = RTFSOBJATTRADD_UNIX_OWNER; pDirEntry->Info.Attr.u.UnixOwner.uid = ~0U; pDirEntry->Info.Attr.u.UnixOwner.szName[0] = '\0'; /** @todo return something sensible here. */ break; case RTFSOBJATTRADD_UNIX_GROUP: pDirEntry->Info.Attr.enmAdditional = RTFSOBJATTRADD_UNIX_GROUP; pDirEntry->Info.Attr.u.UnixGroup.gid = ~0U; pDirEntry->Info.Attr.u.UnixGroup.szName[0] = '\0'; break; default: AssertMsgFailed(("Impossible!\n")); return VERR_INTERNAL_ERROR; } return VINF_SUCCESS; }
/** * Change the traceing configuration of the VM. * * @returns VBox status code. * @retval VINF_SUCCESS * @retval VERR_NOT_FOUND if any of the trace point groups mentioned in the * config string cannot be found. (Or if the string cannot be made * sense of.) No change made. * @retval VERR_INVALID_VM_HANDLE * @retval VERR_INVALID_POINTER * * @param pVM The cross context VM structure. * @param pszConfig The configuration change specification. * * Trace point group names, optionally prefixed by a '-' to * indicate that the group is being disabled. A special * group 'all' can be used to enable or disable all trace * points. * * Drivers, devices and USB devices each have their own * trace point group which can be accessed by prefixing * their official PDM name by 'drv', 'dev' or 'usb' * respectively. */ VMMDECL(int) DBGFR3TraceConfig(PVM pVM, const char *pszConfig) { VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE); AssertPtrReturn(pszConfig, VERR_INVALID_POINTER); if (pVM->hTraceBufR3 == NIL_RTTRACEBUF) return VERR_DBGF_NO_TRACE_BUFFER; /* * We do this in two passes, the first pass just validates the input string * and the second applies the changes. */ for (uint32_t uPass = 0; uPass < 1; uPass++) { char ch; while ((ch = *pszConfig) != '\0') { if (RT_C_IS_SPACE(ch)) continue; /* * Operation prefix. */ bool fNo = false; do { if (ch == 'n' && pszConfig[1] == 'o') { fNo = !fNo; pszConfig++; } else if (ch == '+') fNo = false; else if (ch == '-' || ch == '!' || ch == '~') fNo = !fNo; else break; } while ((ch = *++pszConfig) != '\0'); if (ch == '\0') break; /* * Extract the name. */ const char *pszName = pszConfig; while ( ch != '\0' && !RT_C_IS_SPACE(ch) && !RT_C_IS_PUNCT(ch)) ch = *++pszConfig; size_t const cchName = pszConfig - pszName; /* * 'all' - special group that enables or disables all trace points. */ if (cchName == 3 && !strncmp(pszName, "all", 3)) { if (uPass != 0) { uint32_t iCpu = pVM->cCpus; if (!fNo) while (iCpu-- > 0) pVM->aCpus[iCpu].fTraceGroups = UINT32_MAX; else while (iCpu-- > 0) pVM->aCpus[iCpu].fTraceGroups = 0; PDMR3TracingConfig(pVM, NULL, 0, !fNo, uPass > 0); } } else { /* * A specific group, try the VMM first then PDM. */ uint32_t i = RT_ELEMENTS(g_aVmmTpGroups); while (i-- > 0) if ( g_aVmmTpGroups[i].cchName == cchName && !strncmp(g_aVmmTpGroups[i].pszName, pszName, cchName)) { if (uPass != 0) { uint32_t iCpu = pVM->cCpus; if (!fNo) while (iCpu-- > 0) pVM->aCpus[iCpu].fTraceGroups |= g_aVmmTpGroups[i].fMask; else while (iCpu-- > 0) pVM->aCpus[iCpu].fTraceGroups &= ~g_aVmmTpGroups[i].fMask; } break; } if (i == UINT32_MAX) { int rc = PDMR3TracingConfig(pVM, pszName, cchName, !fNo, uPass > 0); if (RT_FAILURE(rc)) return rc; } } } } return VINF_SUCCESS; }
int VBOXCALL supdrvOSLdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage, const uint8_t *pbImageBits, PSUPLDRLOAD pReq) { NOREF(pDevExt); NOREF(pReq); if (pImage->pvNtSectionObj) { /* * Usually, the entire image matches exactly. */ if (!memcmp(pImage->pvImage, pbImageBits, pImage->cbImageBits)) return VINF_SUCCESS; /* * However, on Windows Server 2003 (sp2 x86) both import thunk tables * are fixed up and we typically get a mismatch in the INIT section. * * So, lets see if everything matches when excluding the * OriginalFirstThunk tables. To make life simpler, set the max number * of imports to 16 and just record and sort the locations that needs * to be excluded from the comparison. */ IMAGE_NT_HEADERS const *pNtHdrs; pNtHdrs = (IMAGE_NT_HEADERS const *)(pbImageBits + ( *(uint16_t *)pbImageBits == IMAGE_DOS_SIGNATURE ? ((IMAGE_DOS_HEADER const *)pbImageBits)->e_lfanew : 0)); if ( pNtHdrs->Signature == IMAGE_NT_SIGNATURE && pNtHdrs->OptionalHeader.Magic == IMAGE_NT_OPTIONAL_HDR_MAGIC && pNtHdrs->OptionalHeader.NumberOfRvaAndSizes > IMAGE_DIRECTORY_ENTRY_IMPORT && pNtHdrs->OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_IMPORT].Size >= sizeof(IMAGE_IMPORT_DESCRIPTOR) && pNtHdrs->OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_IMPORT].VirtualAddress > sizeof(IMAGE_NT_HEADERS) && pNtHdrs->OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_IMPORT].VirtualAddress < pImage->cbImageBits ) { struct MyRegion { uint32_t uRva; uint32_t cb; } aExcludeRgns[16]; unsigned cExcludeRgns = 0; uint32_t cImpsLeft = pNtHdrs->OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_IMPORT].Size / sizeof(IMAGE_IMPORT_DESCRIPTOR); IMAGE_IMPORT_DESCRIPTOR const *pImp; pImp = (IMAGE_IMPORT_DESCRIPTOR const *)(pbImageBits + pNtHdrs->OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_IMPORT].VirtualAddress); while ( cImpsLeft-- > 0 && cExcludeRgns < RT_ELEMENTS(aExcludeRgns)) { uint32_t uRvaThunk = pImp->OriginalFirstThunk; if ( uRvaThunk > sizeof(IMAGE_NT_HEADERS) && uRvaThunk <= pImage->cbImageBits - sizeof(IMAGE_THUNK_DATA) && uRvaThunk != pImp->FirstThunk) { /* Find the size of the thunk table. */ IMAGE_THUNK_DATA const *paThunk = (IMAGE_THUNK_DATA const *)(pbImageBits + uRvaThunk); uint32_t cMaxThunks = (pImage->cbImageBits - uRvaThunk) / sizeof(IMAGE_THUNK_DATA); uint32_t cThunks = 0; while (cThunks < cMaxThunks && paThunk[cThunks].u1.Function != 0) cThunks++; /* Ordered table insert. */ unsigned i = 0; for (; i < cExcludeRgns; i++) if (uRvaThunk < aExcludeRgns[i].uRva) break; if (i != cExcludeRgns) memmove(&aExcludeRgns[i + 1], &aExcludeRgns[i], (cExcludeRgns - i) * sizeof(aExcludeRgns[0])); aExcludeRgns[i].uRva = uRvaThunk; aExcludeRgns[i].cb = cThunks * sizeof(IMAGE_THUNK_DATA); cExcludeRgns++; } /* advance */ pImp++; } /* * Ok, do the comparison. */ int iDiff = 0; uint32_t uRvaNext = 0; for (unsigned i = 0; !iDiff && i < cExcludeRgns; i++) { if (uRvaNext < aExcludeRgns[i].uRva) iDiff = supdrvNtCompare(pImage, pbImageBits, uRvaNext, aExcludeRgns[i].uRva - uRvaNext); uRvaNext = aExcludeRgns[i].uRva + aExcludeRgns[i].cb; } if (!iDiff && uRvaNext < pImage->cbImageBits) iDiff = supdrvNtCompare(pImage, pbImageBits, uRvaNext, pImage->cbImageBits - uRvaNext); if (!iDiff) return VINF_SUCCESS; } else supdrvNtCompare(pImage, pbImageBits, 0, pImage->cbImageBits); return VERR_LDR_MISMATCH_NATIVE; } return VERR_INTERNAL_ERROR_4; }
/** * Parses a out the next block from a version string. * * @returns true if numeric, false if not. * @param ppszVer The string cursor, IN/OUT. * @param pi32Value Where to return the value if numeric. * @param pcchBlock Where to return the block length. */ static bool rtStrVersionParseBlock(const char **ppszVer, int32_t *pi32Value, size_t *pcchBlock) { const char *psz = *ppszVer; /* * Check for end-of-string. */ if (!*psz) { *pi32Value = 0; *pcchBlock = 0; return false; } /* * Try convert the block to a number the simple way. */ char ch; bool fNumeric = RT_C_IS_DIGIT(*psz); if (fNumeric) { do ch = *++psz; while (ch && RT_C_IS_DIGIT(ch)); int rc = RTStrToInt32Ex(*ppszVer, NULL, 10, pi32Value); if (RT_FAILURE(rc) || rc == VWRN_NUMBER_TOO_BIG) { AssertRC(rc); fNumeric = false; *pi32Value = 0; } } else { /* * Find the end of the current string. Make a special case for SVN * revision numbers that immediately follows a release tag string. */ do ch = *++psz; while ( ch && !RT_C_IS_DIGIT(ch) && !RTSTRVER_IS_PUNCTUACTION(ch)); size_t cchBlock = psz - *ppszVer; if ( cchBlock > 1 && psz[-1] == 'r' && RT_C_IS_DIGIT(*psz)) { psz--; cchBlock--; } /* * Translate standard pre release terms to negative values. */ static const struct { size_t cch; const char *psz; int32_t iValue; } s_aTerms[] = { { 2, "RC", -100000 }, { 3, "PRE", -200000 }, { 5, "GAMMA", -300000 }, { 4, "BETA", -400000 }, { 5, "ALPHA", -500000 } }; int32_t iVal1 = 0; for (unsigned i = 0; i < RT_ELEMENTS(s_aTerms); i++) if ( cchBlock == s_aTerms[i].cch && !RTStrNCmp(s_aTerms[i].psz, *ppszVer, cchBlock)) { iVal1 = s_aTerms[i].iValue; break; } if (iVal1 != 0) { /* * Does the prelease term have a trailing number? * Add it assuming BETA == BETA1. */ if (RT_C_IS_DIGIT(*psz)) { const char *psz2 = psz; do ch = *++psz; while ( ch && RT_C_IS_DIGIT(ch) && !RTSTRVER_IS_PUNCTUACTION(ch)); int rc = RTStrToInt32Ex(psz2, NULL, 10, pi32Value); if (RT_SUCCESS(rc) && rc != VWRN_NUMBER_TOO_BIG && *pi32Value) iVal1 += *pi32Value - 1; else { AssertRC(rc); psz = psz2; } } fNumeric = true; } *pi32Value = iVal1; } *pcchBlock = psz - *ppszVer; /* * Skip trailing punctuation. */ if (RTSTRVER_IS_PUNCTUACTION(*psz)) psz++; *ppszVer = psz; return fNumeric; }
/** * Implementation of VbglR3Init and VbglR3InitUser */ static int vbglR3Init(const char *pszDeviceName) { uint32_t cInits = ASMAtomicIncU32(&g_cInits); Assert(cInits > 0); if (cInits > 1) { /* * This will fail if two (or more) threads race each other calling VbglR3Init. * However it will work fine for single threaded or otherwise serialized * processed calling us more than once. */ #ifdef RT_OS_WINDOWS if (g_hFile == INVALID_HANDLE_VALUE) #elif !defined (VBOX_VBGLR3_XSERVER) if (g_File == NIL_RTFILE) #else if (g_File == -1) #endif return VERR_INTERNAL_ERROR; return VINF_SUCCESS; } #if defined(RT_OS_WINDOWS) if (g_hFile != INVALID_HANDLE_VALUE) #elif !defined(VBOX_VBGLR3_XSERVER) if (g_File != NIL_RTFILE) #else if (g_File != -1) #endif return VERR_INTERNAL_ERROR; #if defined(RT_OS_WINDOWS) /* * Have to use CreateFile here as we want to specify FILE_FLAG_OVERLAPPED * and possible some other bits not available thru iprt/file.h. */ HANDLE hFile = CreateFile(pszDeviceName, GENERIC_READ | GENERIC_WRITE, FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL | FILE_FLAG_OVERLAPPED, NULL); if (hFile == INVALID_HANDLE_VALUE) return VERR_OPEN_FAILED; g_hFile = hFile; #elif defined(RT_OS_OS2) /* * We might wish to compile this with Watcom, so stick to * the OS/2 APIs all the way. And in any case we have to use * DosDevIOCtl for the requests, why not use Dos* for everything. */ HFILE hf = NULLHANDLE; ULONG ulAction = 0; APIRET rc = DosOpen((PCSZ)pszDeviceName, &hf, &ulAction, 0, FILE_NORMAL, OPEN_ACTION_OPEN_IF_EXISTS, OPEN_FLAGS_FAIL_ON_ERROR | OPEN_FLAGS_NOINHERIT | OPEN_SHARE_DENYNONE | OPEN_ACCESS_READWRITE, NULL); if (rc) return RTErrConvertFromOS2(rc); if (hf < 16) { HFILE ahfs[16]; unsigned i; for (i = 0; i < RT_ELEMENTS(ahfs); i++) { ahfs[i] = 0xffffffff; rc = DosDupHandle(hf, &ahfs[i]); if (rc) break; } if (i-- > 1) { ULONG fulState = 0; rc = DosQueryFHState(ahfs[i], &fulState); if (!rc) { fulState |= OPEN_FLAGS_NOINHERIT; fulState &= OPEN_FLAGS_WRITE_THROUGH | OPEN_FLAGS_FAIL_ON_ERROR | OPEN_FLAGS_NO_CACHE | OPEN_FLAGS_NOINHERIT; /* Turn off non-participating bits. */ rc = DosSetFHState(ahfs[i], fulState); } if (!rc) { rc = DosClose(hf); AssertMsg(!rc, ("%ld\n", rc)); hf = ahfs[i]; } else i++; while (i-- > 0) DosClose(ahfs[i]); } } g_File = (RTFILE)hf; #elif defined(RT_OS_DARWIN) /* * Darwin is kind of special we need to engage the device via I/O first * before we open it via the BSD device node. */ mach_port_t MasterPort; kern_return_t kr = IOMasterPort(MACH_PORT_NULL, &MasterPort); if (kr != kIOReturnSuccess) return VERR_GENERAL_FAILURE; CFDictionaryRef ClassToMatch = IOServiceMatching("org_virtualbox_VBoxGuest"); if (!ClassToMatch) return VERR_GENERAL_FAILURE; io_service_t ServiceObject = IOServiceGetMatchingService(kIOMasterPortDefault, ClassToMatch); if (!ServiceObject) return VERR_NOT_FOUND; io_connect_t uConnection; kr = IOServiceOpen(ServiceObject, mach_task_self(), VBOXGUEST_DARWIN_IOSERVICE_COOKIE, &uConnection); IOObjectRelease(ServiceObject); if (kr != kIOReturnSuccess) return VERR_OPEN_FAILED; RTFILE hFile; int rc = RTFileOpen(&hFile, pszDeviceName, RTFILE_O_READWRITE | RTFILE_O_OPEN | RTFILE_O_DENY_NONE); if (RT_FAILURE(rc)) { IOServiceClose(uConnection); return rc; } g_File = hFile; g_uConnection = uConnection; #elif defined(VBOX_VBGLR3_XSERVER) int File = xf86open(pszDeviceName, XF86_O_RDWR); if (File == -1) return VERR_OPEN_FAILED; g_File = File; #else /* The default implementation. (linux, solaris, freebsd, haiku) */ RTFILE File; int rc = RTFileOpen(&File, pszDeviceName, RTFILE_O_READWRITE | RTFILE_O_OPEN | RTFILE_O_DENY_NONE); if (RT_FAILURE(rc)) return rc; g_File = File; #endif #ifndef VBOX_VBGLR3_XSERVER /* * Create release logger */ PRTLOGGER pReleaseLogger; static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES; int rc2 = RTLogCreate(&pReleaseLogger, 0, "all", "VBOX_RELEASE_LOG", RT_ELEMENTS(s_apszGroups), &s_apszGroups[0], RTLOGDEST_USER, NULL); /* This may legitimately fail if we are using the mini-runtime. */ if (RT_SUCCESS(rc2)) RTLogRelSetDefaultInstance(pReleaseLogger); #endif return VINF_SUCCESS; }
RTDECL(int) RTManifestReadStandardEx(RTMANIFEST hManifest, RTVFSIOSTREAM hVfsIos, char *pszErr, size_t cbErr) { /* * Validate input. */ AssertPtrNull(pszErr); if (pszErr && cbErr) *pszErr = '\0'; RTMANIFESTINT *pThis = hManifest; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertReturn(pThis->u32Magic == RTMANIFEST_MAGIC, VERR_INVALID_HANDLE); /* * Process the stream line by line. */ uint32_t iLine = 0; for (;;) { /* * Read a line from the input stream. */ iLine++; char szLine[RTPATH_MAX + RTSHA512_DIGEST_LEN + 32]; int rc = rtManifestReadLine(hVfsIos, szLine, sizeof(szLine)); if (RT_FAILURE(rc)) { if (rc == VERR_EOF) return VINF_SUCCESS; RTStrPrintf(pszErr, cbErr, "Error reading line #u: %Rrc", iLine, rc); return rc; } if (rc != VINF_SUCCESS) { RTStrPrintf(pszErr, cbErr, "Line number %u is too long", iLine); return VERR_OUT_OF_RANGE; } /* * Strip it and skip if empty. */ char *psz = RTStrStrip(szLine); if (!*psz) continue; /* * Read the attribute name. */ const char * const pszAttr = psz; do psz++; while (!RT_C_IS_BLANK(*psz) && *psz); if (*psz) *psz++ = '\0'; /* * The entry name is enclosed in parenthesis and followed by a '='. */ psz = RTStrStripL(psz); if (*psz != '(') { RTStrPrintf(pszErr, cbErr, "Expected '(' after %zu on line %u", psz - szLine, iLine); return VERR_PARSE_ERROR; } const char * const pszName = ++psz; while (*psz) { if (*psz == ')') { char *psz2 = RTStrStripL(psz + 1); if (*psz2 == '=') { *psz = '\0'; psz = psz2; break; } } psz++; } if (*psz != '=') { RTStrPrintf(pszErr, cbErr, "Expected ')=' at %zu on line %u", psz - szLine, iLine); return VERR_PARSE_ERROR; } /* * The value. */ psz = RTStrStrip(psz + 1); const char * const pszValue = psz; if (!*psz) { RTStrPrintf(pszErr, cbErr, "Expected value at %zu on line %u", psz - szLine, iLine); return VERR_PARSE_ERROR; } /* * Detect attribute type and sanity check the value. */ uint32_t fType = RTMANIFEST_ATTR_UNKNOWN; static const struct { const char *pszAttr; uint32_t fType; unsigned cBits; unsigned uBase; } s_aDecAttrs[] = { { "SIZE", RTMANIFEST_ATTR_SIZE, 64, 10} }; for (unsigned i = 0; i < RT_ELEMENTS(s_aDecAttrs); i++) if (!strcmp(s_aDecAttrs[i].pszAttr, pszAttr)) { fType = s_aDecAttrs[i].fType; rc = RTStrToUInt64Full(pszValue, s_aDecAttrs[i].uBase, NULL); if (rc != VINF_SUCCESS) { RTStrPrintf(pszErr, cbErr, "Malformed value ('%s') at %zu on line %u: %Rrc", pszValue, psz - szLine, iLine, rc); return VERR_PARSE_ERROR; } break; } if (fType == RTMANIFEST_ATTR_UNKNOWN) { static const struct { const char *pszAttr; uint32_t fType; unsigned cchHex; } s_aHexAttrs[] = { { "MD5", RTMANIFEST_ATTR_MD5, RTMD5_DIGEST_LEN }, { "SHA1", RTMANIFEST_ATTR_SHA1, RTSHA1_DIGEST_LEN }, { "SHA256", RTMANIFEST_ATTR_SHA256, RTSHA256_DIGEST_LEN }, { "SHA512", RTMANIFEST_ATTR_SHA512, RTSHA512_DIGEST_LEN } }; for (unsigned i = 0; i < RT_ELEMENTS(s_aHexAttrs); i++) if (!strcmp(s_aHexAttrs[i].pszAttr, pszAttr)) { fType = s_aHexAttrs[i].fType; for (unsigned off = 0; off < s_aHexAttrs[i].cchHex; off++) if (!RT_C_IS_XDIGIT(pszValue[off])) { RTStrPrintf(pszErr, cbErr, "Expected hex digit at %zu on line %u (value '%s', pos %u)", pszValue - szLine + off, iLine, pszValue, off); return VERR_PARSE_ERROR; } break; } } /* * Finally, add it. */ rc = RTManifestEntrySetAttr(hManifest, pszName, pszAttr, pszValue, fType); if (RT_FAILURE(rc)) { RTStrPrintf(pszErr, cbErr, "RTManifestEntrySetAttr(,'%s','%s', '%s', %#x) failed on line %u: %Rrc", pszName, pszAttr, pszValue, fType, iLine, rc); return rc; } } }
/** * Recycles a requst. * * @returns true if recycled, false if it should be freed. * @param pQueue The queue. * @param pReq The request. */ DECLHIDDEN(bool) rtReqQueueRecycle(PRTREQQUEUEINT pQueue, PRTREQINT pReq) { if ( !pQueue || pQueue->cReqFree >= 128) return false; ASMAtomicIncU32(&pQueue->cReqFree); PRTREQ volatile *ppHead = &pQueue->apReqFree[ASMAtomicIncU32(&pQueue->iReqFree) % RT_ELEMENTS(pQueue->apReqFree)]; PRTREQ pNext; do { pNext = *ppHead; ASMAtomicWritePtr(&pReq->pNext, pNext); } while (!ASMAtomicCmpXchgPtr(ppHead, pReq, pNext)); return true; }
/** * Initializes the system information object. * * @returns COM result indicator */ HRESULT SystemProperties::init(VirtualBox *aParent) { LogFlowThisFunc(("aParent=%p\n", aParent)); ComAssertRet(aParent, E_FAIL); /* Enclose the state transition NotReady->InInit->Ready */ AutoInitSpan autoInitSpan(this); AssertReturn(autoInitSpan.isOk(), E_FAIL); unconst(mParent) = aParent; i_setDefaultMachineFolder(Utf8Str::Empty); i_setLoggingLevel(Utf8Str::Empty); i_setDefaultHardDiskFormat(Utf8Str::Empty); i_setVRDEAuthLibrary(Utf8Str::Empty); i_setDefaultVRDEExtPack(Utf8Str::Empty); m->ulLogHistoryCount = 3; /* On Windows and OS X, HW virtualization use isn't exclusive by * default so that VT-x or AMD-V can be shared with other * hypervisors without requiring user intervention. * NB: See also SystemProperties constructor in settings.h */ #if defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS) m->fExclusiveHwVirt = false; #else m->fExclusiveHwVirt = true; #endif HRESULT rc = S_OK; /* Fetch info of all available hd backends. */ /// @todo NEWMEDIA VDBackendInfo needs to be improved to let us enumerate /// any number of backends VDBACKENDINFO aVDInfo[100]; unsigned cEntries; int vrc = VDBackendInfo(RT_ELEMENTS(aVDInfo), aVDInfo, &cEntries); AssertRC(vrc); if (RT_SUCCESS(vrc)) { for (unsigned i = 0; i < cEntries; ++ i) { ComObjPtr<MediumFormat> hdf; rc = hdf.createObject(); if (FAILED(rc)) break; rc = hdf->init(&aVDInfo[i]); if (FAILED(rc)) break; m_llMediumFormats.push_back(hdf); } } /* Confirm a successful initialization */ if (SUCCEEDED(rc)) autoInitSpan.setSucceeded(); return rc; }
/** * Initializes the iconv handle cache associated with a thread. * * @param pThread The thread in question. */ DECLHIDDEN(void) rtStrIconvCacheInit(PRTTHREADINT pThread) { for (size_t i = 0; i < RT_ELEMENTS(pThread->ahIconvs); i++) pThread->ahIconvs[i] = (iconv_t)-1; }
int main(int argc, char **argv) { RTR3InitExe(argc, &argv, 0); /* * Parse arguments. */ static const RTGETOPTDEF s_aOptions[] = { { "--iterations", 'i', RTGETOPT_REQ_UINT32 }, { "--num-pages", 'n', RTGETOPT_REQ_UINT32 }, { "--page-at-a-time", 'c', RTGETOPT_REQ_UINT32 }, { "--page-file", 'f', RTGETOPT_REQ_STRING }, { "--offset", 'o', RTGETOPT_REQ_UINT64 }, }; const char *pszPageFile = NULL; uint64_t offPageFile = 0; uint32_t cIterations = 1; uint32_t cPagesAtATime = 1; RTGETOPTUNION Val; RTGETOPTSTATE State; int rc = RTGetOptInit(&State, argc, argv, &s_aOptions[0], RT_ELEMENTS(s_aOptions), 1, 0); AssertRCReturn(rc, 1); while ((rc = RTGetOpt(&State, &Val))) { switch (rc) { case 'n': g_cPages = Val.u32; if (g_cPages * PAGE_SIZE * 4 / (PAGE_SIZE * 4) != g_cPages) return Error("The specified page count is too high: %#x (%#llx bytes)\n", g_cPages, (uint64_t)g_cPages * PAGE_SHIFT); if (g_cPages < 1) return Error("The specified page count is too low: %#x\n", g_cPages); break; case 'i': cIterations = Val.u32; if (cIterations < 1) return Error("The number of iterations must be 1 or higher\n"); break; case 'c': cPagesAtATime = Val.u32; if (cPagesAtATime < 1 || cPagesAtATime > 10240) return Error("The specified pages-at-a-time count is out of range: %#x\n", cPagesAtATime); break; case 'f': pszPageFile = Val.psz; break; case 'o': offPageFile = Val.u64; break; case 'O': offPageFile = Val.u64 * PAGE_SIZE; break; case 'h': RTPrintf("syntax: tstCompressionBenchmark [options]\n" "\n" "Options:\n" " -h, --help\n" " Show this help page\n" " -i, --iterations <num>\n" " The number of iterations.\n" " -n, --num-pages <pages>\n" " The number of pages.\n" " -c, --pages-at-a-time <pages>\n" " Number of pages at a time.\n" " -f, --page-file <filename>\n" " File or device to read the page from. The default\n" " is to generate some garbage.\n" " -o, --offset <file-offset>\n" " Offset into the page file to start reading at.\n"); return 0; case 'V': RTPrintf("%sr%s\n", RTBldCfgVersion(), RTBldCfgRevisionStr()); return 0; default: return RTGetOptPrintError(rc, &Val); } } g_cbPages = g_cPages * PAGE_SIZE; uint64_t cbTotal = (uint64_t)g_cPages * PAGE_SIZE * cIterations; uint64_t cbTotalKB = cbTotal / _1K; if (cbTotal / cIterations != g_cbPages) return Error("cPages * cIterations -> overflow\n"); /* * Gather the test memory. */ if (pszPageFile) { size_t cbFile; rc = RTFileReadAllEx(pszPageFile, offPageFile, g_cbPages, RTFILE_RDALL_O_DENY_NONE, (void **)&g_pabSrc, &cbFile); if (RT_FAILURE(rc)) return Error("Error reading %zu bytes from %s at %llu: %Rrc\n", g_cbPages, pszPageFile, offPageFile, rc); if (cbFile != g_cbPages) return Error("Error reading %zu bytes from %s at %llu: got %zu bytes\n", g_cbPages, pszPageFile, offPageFile, cbFile); } else { g_pabSrc = (uint8_t *)RTMemAlloc(g_cbPages); if (g_pabSrc) { /* Just fill it with something - warn about the low quality of the something. */ RTPrintf("tstCompressionBenchmark: WARNING! No input file was specified so the source\n" "buffer will be filled with generated data of questionable quality.\n"); #ifdef RT_OS_LINUX RTPrintf("To get real RAM on linux: sudo dd if=/dev/mem ... \n"); #endif uint8_t *pb = g_pabSrc; uint8_t *pbEnd = &g_pabSrc[g_cbPages]; for (; pb != pbEnd; pb += 16) { char szTmp[17]; RTStrPrintf(szTmp, sizeof(szTmp), "aaaa%08Xzzzz", (uint32_t)(uintptr_t)pb); memcpy(pb, szTmp, 16); } } } g_pabDecompr = (uint8_t *)RTMemAlloc(g_cbPages); g_cbComprAlloc = RT_MAX(g_cbPages * 2, 256 * PAGE_SIZE); g_pabCompr = (uint8_t *)RTMemAlloc(g_cbComprAlloc); if (!g_pabSrc || !g_pabDecompr || !g_pabCompr) return Error("failed to allocate memory buffers (g_cPages=%#x)\n", g_cPages); /* * Double loop compressing and uncompressing the data, where the outer does * the specified number of iterations while the inner applies the different * compression algorithms. */ struct { /** The time spent decompressing. */ uint64_t cNanoDecompr; /** The time spent compressing. */ uint64_t cNanoCompr; /** The size of the compressed data. */ uint64_t cbCompr; /** First error. */ int rc; /** The compression style: block or stream. */ bool fBlock; /** Compression type. */ RTZIPTYPE enmType; /** Compression level. */ RTZIPLEVEL enmLevel; /** Method name. */ const char *pszName; } aTests[] = { { 0, 0, 0, VINF_SUCCESS, false, RTZIPTYPE_STORE, RTZIPLEVEL_DEFAULT, "RTZip/Store" }, { 0, 0, 0, VINF_SUCCESS, false, RTZIPTYPE_LZF, RTZIPLEVEL_DEFAULT, "RTZip/LZF" }, /* { 0, 0, 0, VINF_SUCCESS, false, RTZIPTYPE_ZLIB, RTZIPLEVEL_DEFAULT, "RTZip/zlib" }, - slow plus it randomly hits VERR_GENERAL_FAILURE atm. */ { 0, 0, 0, VINF_SUCCESS, true, RTZIPTYPE_STORE, RTZIPLEVEL_DEFAULT, "RTZipBlock/Store" }, { 0, 0, 0, VINF_SUCCESS, true, RTZIPTYPE_LZF, RTZIPLEVEL_DEFAULT, "RTZipBlock/LZF" }, { 0, 0, 0, VINF_SUCCESS, true, RTZIPTYPE_LZJB, RTZIPLEVEL_DEFAULT, "RTZipBlock/LZJB" }, { 0, 0, 0, VINF_SUCCESS, true, RTZIPTYPE_LZO, RTZIPLEVEL_DEFAULT, "RTZipBlock/LZO" }, }; RTPrintf("tstCompressionBenchmark: TESTING.."); for (uint32_t i = 0; i < cIterations; i++) { for (uint32_t j = 0; j < RT_ELEMENTS(aTests); j++) { if (RT_FAILURE(aTests[j].rc)) continue; memset(g_pabCompr, 0xaa, g_cbComprAlloc); memset(g_pabDecompr, 0xcc, g_cbPages); g_cbCompr = 0; g_offComprIn = 0; RTPrintf("."); RTStrmFlush(g_pStdOut); /* * Compress it. */ uint64_t NanoTS = RTTimeNanoTS(); if (aTests[j].fBlock) { size_t cbLeft = g_cbComprAlloc; uint8_t const *pbSrcPage = g_pabSrc; uint8_t *pbDstPage = g_pabCompr; for (size_t iPage = 0; iPage < g_cPages; iPage += cPagesAtATime) { AssertBreakStmt(cbLeft > PAGE_SIZE * 4, aTests[j].rc = rc = VERR_BUFFER_OVERFLOW); uint32_t *pcb = (uint32_t *)pbDstPage; pbDstPage += sizeof(uint32_t); cbLeft -= sizeof(uint32_t); size_t cbSrc = RT_MIN(g_cPages - iPage, cPagesAtATime) * PAGE_SIZE; size_t cbDst; rc = RTZipBlockCompress(aTests[j].enmType, aTests[j].enmLevel, 0 /*fFlags*/, pbSrcPage, cbSrc, pbDstPage, cbLeft, &cbDst); if (RT_FAILURE(rc)) { Error("RTZipBlockCompress failed for '%s' (#%u): %Rrc\n", aTests[j].pszName, j, rc); aTests[j].rc = rc; break; } *pcb = (uint32_t)cbDst; cbLeft -= cbDst; pbDstPage += cbDst; pbSrcPage += cbSrc; } if (RT_FAILURE(rc)) continue; g_cbCompr = pbDstPage - g_pabCompr; } else { PRTZIPCOMP pZipComp; rc = RTZipCompCreate(&pZipComp, NULL, ComprOutCallback, aTests[j].enmType, aTests[j].enmLevel); if (RT_FAILURE(rc)) { Error("Failed to create the compressor for '%s' (#%u): %Rrc\n", aTests[j].pszName, j, rc); aTests[j].rc = rc; continue; } uint8_t const *pbSrcPage = g_pabSrc; for (size_t iPage = 0; iPage < g_cPages; iPage += cPagesAtATime) { size_t cb = RT_MIN(g_cPages - iPage, cPagesAtATime) * PAGE_SIZE; rc = RTZipCompress(pZipComp, pbSrcPage, cb); if (RT_FAILURE(rc)) { Error("RTZipCompress failed for '%s' (#%u): %Rrc\n", aTests[j].pszName, j, rc); aTests[j].rc = rc; break; } pbSrcPage += cb; } if (RT_FAILURE(rc)) continue; rc = RTZipCompFinish(pZipComp); if (RT_FAILURE(rc)) { Error("RTZipCompFinish failed for '%s' (#%u): %Rrc\n", aTests[j].pszName, j, rc); aTests[j].rc = rc; break; } RTZipCompDestroy(pZipComp); } NanoTS = RTTimeNanoTS() - NanoTS; aTests[j].cbCompr += g_cbCompr; aTests[j].cNanoCompr += NanoTS; /* * Decompress it. */ NanoTS = RTTimeNanoTS(); if (aTests[j].fBlock) { uint8_t const *pbSrcPage = g_pabCompr; size_t cbLeft = g_cbCompr; uint8_t *pbDstPage = g_pabDecompr; for (size_t iPage = 0; iPage < g_cPages; iPage += cPagesAtATime) { size_t cbDst = RT_MIN(g_cPages - iPage, cPagesAtATime) * PAGE_SIZE; size_t cbSrc = *(uint32_t *)pbSrcPage; pbSrcPage += sizeof(uint32_t); cbLeft -= sizeof(uint32_t); rc = RTZipBlockDecompress(aTests[j].enmType, 0 /*fFlags*/, pbSrcPage, cbSrc, &cbSrc, pbDstPage, cbDst, &cbDst); if (RT_FAILURE(rc)) { Error("RTZipBlockDecompress failed for '%s' (#%u): %Rrc\n", aTests[j].pszName, j, rc); aTests[j].rc = rc; break; } pbDstPage += cbDst; cbLeft -= cbSrc; pbSrcPage += cbSrc; } if (RT_FAILURE(rc)) continue; } else { PRTZIPDECOMP pZipDecomp; rc = RTZipDecompCreate(&pZipDecomp, NULL, DecomprInCallback); if (RT_FAILURE(rc)) { Error("Failed to create the decompressor for '%s' (#%u): %Rrc\n", aTests[j].pszName, j, rc); aTests[j].rc = rc; continue; } uint8_t *pbDstPage = g_pabDecompr; for (size_t iPage = 0; iPage < g_cPages; iPage += cPagesAtATime) { size_t cb = RT_MIN(g_cPages - iPage, cPagesAtATime) * PAGE_SIZE; rc = RTZipDecompress(pZipDecomp, pbDstPage, cb, NULL); if (RT_FAILURE(rc)) { Error("RTZipDecompress failed for '%s' (#%u): %Rrc\n", aTests[j].pszName, j, rc); aTests[j].rc = rc; break; } pbDstPage += cb; } RTZipDecompDestroy(pZipDecomp); if (RT_FAILURE(rc)) continue; } NanoTS = RTTimeNanoTS() - NanoTS; aTests[j].cNanoDecompr += NanoTS; if (memcmp(g_pabDecompr, g_pabSrc, g_cbPages)) { Error("The compressed data doesn't match the source for '%s' (%#u)\n", aTests[j].pszName, j); aTests[j].rc = VERR_BAD_EXE_FORMAT; continue; } } } if (RT_SUCCESS(rc)) RTPrintf("\n"); /* * Report the results. */ rc = 0; RTPrintf("tstCompressionBenchmark: BEGIN RESULTS\n"); RTPrintf("%-20s Compression Decompression\n", ""); RTPrintf("%-20s In Out Ratio Size In Out\n", "Method"); RTPrintf("%.20s-----------------------------------------------------------------------------------------\n", "---------------------------------------------"); for (uint32_t j = 0; j < RT_ELEMENTS(aTests); j++) { if (RT_SUCCESS(aTests[j].rc)) { unsigned uComprSpeedIn = (unsigned)(cbTotalKB / (long double)aTests[j].cNanoCompr * 1000000000.0); unsigned uComprSpeedOut = (unsigned)(aTests[j].cbCompr / (long double)aTests[j].cNanoCompr * 1000000000.0 / 1024); unsigned uRatio = (unsigned)(aTests[j].cbCompr / cIterations * 100 / g_cbPages); unsigned uDecomprSpeedIn = (unsigned)(aTests[j].cbCompr / (long double)aTests[j].cNanoDecompr * 1000000000.0 / 1024); unsigned uDecomprSpeedOut = (unsigned)(cbTotalKB / (long double)aTests[j].cNanoDecompr * 1000000000.0); RTPrintf("%-20s %'9u KB/s %'9u KB/s %3u%% %'11llu bytes %'9u KB/s %'9u KB/s", aTests[j].pszName, uComprSpeedIn, uComprSpeedOut, uRatio, aTests[j].cbCompr / cIterations, uDecomprSpeedIn, uDecomprSpeedOut); #if 0 RTPrintf(" [%'14llu / %'14llu ns]\n", aTests[j].cNanoCompr / cIterations, aTests[j].cNanoDecompr / cIterations); #else RTPrintf("\n"); #endif } else { RTPrintf("%-20s: %Rrc\n", aTests[j].pszName, aTests[j].rc); rc = 1; } } if (pszPageFile) RTPrintf("Input: %'10zu pages from '%s' starting at offset %'lld (%#llx)\n" " %'11zu bytes\n", g_cPages, pszPageFile, offPageFile, offPageFile, g_cbPages); else RTPrintf("Input: %'10zu pages of generated rubbish %'11zu bytes\n", g_cPages, g_cbPages); /* * Count zero pages in the data set. */ size_t cZeroPages = 0; for (size_t iPage = 0; iPage < g_cPages; iPage++) { if (!ASMMemIsAllU32(&g_pabSrc[iPage * PAGE_SIZE], PAGE_SIZE, 0)) cZeroPages++; } RTPrintf(" %'10zu zero pages (%u %%)\n", cZeroPages, cZeroPages * 100 / g_cPages); /* * A little extension to the test, benchmark relevant CRCs. */ RTPrintf("\n" "tstCompressionBenchmark: Hash/CRC - All In One\n"); tstBenchmarkCRCsAllInOne(g_pabSrc, g_cbPages); RTPrintf("\n" "tstCompressionBenchmark: Hash/CRC - Page by Page\n"); tstBenchmarkCRCsPageByPage(g_pabSrc, g_cbPages); RTPrintf("\n" "tstCompressionBenchmark: Hash/CRC - Zero Page Digest\n"); static uint8_t s_abZeroPg[PAGE_SIZE]; RT_ZERO(s_abZeroPg); tstBenchmarkCRCsAllInOne(s_abZeroPg, PAGE_SIZE); RTPrintf("\n" "tstCompressionBenchmark: Hash/CRC - Zero Half Page Digest\n"); tstBenchmarkCRCsAllInOne(s_abZeroPg, PAGE_SIZE / 2); RTPrintf("tstCompressionBenchmark: END RESULTS\n"); return rc; }
int main(int argc, char **argv) { /* * Init. */ int rc = RTR3InitExe(argc, &argv, RTR3INIT_FLAGS_SUPLIB); if (RT_FAILURE(rc)) return RTMsgInitFailure(rc); /* * Process arguments. */ static const RTGETOPTDEF s_aOptions[] = { { "--keep", 'k', RTGETOPT_REQ_NOTHING }, { "--no-keep", 'n', RTGETOPT_REQ_NOTHING }, }; bool fKeepLoaded = false; int ch; RTGETOPTUNION ValueUnion; RTGETOPTSTATE GetState; RTGetOptInit(&GetState, argc, argv, s_aOptions, RT_ELEMENTS(s_aOptions), 1, 0); while ((ch = RTGetOpt(&GetState, &ValueUnion))) { switch (ch) { case VINF_GETOPT_NOT_OPTION: { void *pvImageBase; RTERRINFOSTATIC ErrInfo; RTErrInfoInitStatic(&ErrInfo); rc = SUPR3LoadModule(ValueUnion.psz, RTPathFilename(ValueUnion.psz), &pvImageBase, &ErrInfo.Core); if (RT_FAILURE(rc)) { RTMsgError("%Rrc when attempting to load '%s': %s\n", rc, ValueUnion.psz, ErrInfo.Core.pszMsg); return 1; } RTPrintf("Loaded '%s' at %p\n", ValueUnion.psz, pvImageBase); if (!fKeepLoaded) { rc = SUPR3FreeModule(pvImageBase); if (RT_FAILURE(rc)) { RTMsgError("%Rrc when attempting to load '%s'\n", rc, ValueUnion.psz); return 1; } } break; } case 'k': fKeepLoaded = true; break; case 'n': fKeepLoaded = false; break; case 'h': RTPrintf("%s [mod1 [mod2...]]\n", argv[0]); return 1; case 'V': RTPrintf("$Revision: 102121 $\n"); return 0; default: return RTGetOptPrintError(ch, &ValueUnion); } } return 0; }
int main(int argc, char **argv) { RTEXITCODE rcExit = RTTestInitAndCreate("tstRTPrfIO", &g_hTest); if (rcExit != RTEXITCODE_SUCCESS) return rcExit; RTTestBanner(g_hTest); /* * Parse arguments */ static const RTGETOPTDEF s_aOptions[] = { { "--test-dir", 'd', RTGETOPT_REQ_STRING }, }; bool fFileOpenCloseTest = true; bool fFileWriteByteTest = true; bool fPathQueryInfoTest = true; //bool fFileTests = true; //bool fDirTests = true; int ch; RTGETOPTUNION ValueUnion; RTGETOPTSTATE GetState; RTGetOptInit(&GetState, argc, argv, s_aOptions, RT_ELEMENTS(s_aOptions), 1, 0); while ((ch = RTGetOpt(&GetState, &ValueUnion))) { switch (ch) { case 'd': g_pszTestDir = ValueUnion.psz; break; case 'V': RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS, "$Revision: 100874 $\n"); return RTTestSummaryAndDestroy(g_hTest); case 'h': RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS, "usage: testname [-d <testdir>]\n"); return RTTestSummaryAndDestroy(g_hTest); default: RTTestFailed(g_hTest, "invalid argument"); RTGetOptPrintError(ch, &ValueUnion); return RTTestSummaryAndDestroy(g_hTest); } } /* * Set up and check the prerequisites. */ RTTESTI_CHECK_RC(RTPathJoin(g_szTestFile1, sizeof(g_szTestFile1), g_pszTestDir, "tstRTPrfIO-TestFile1"), VINF_SUCCESS); RTTESTI_CHECK_RC(RTPathJoin(g_szTestDir1, sizeof(g_szTestDir1), g_pszTestDir, "tstRTPrfIO-TestDir1"), VINF_SUCCESS); RTTESTI_CHECK_RC(RTPathJoin(g_szNotExitingFile, sizeof(g_szNotExitingFile), g_pszTestDir, "tstRTPrfIO-nonexistent-file"), VINF_SUCCESS); RTTESTI_CHECK_RC(RTPathJoin(g_szNotExitingDir, sizeof(g_szNotExitingDir), g_pszTestDir, "tstRTPrfIO-nonexistent-dir"), VINF_SUCCESS); RTTESTI_CHECK_RC(RTPathJoin(g_szNotExitingDirFile, sizeof(g_szNotExitingDirFile), g_szNotExitingDir, "nonexistent-file"), VINF_SUCCESS); RTTESTI_CHECK(RTDirExists(g_pszTestDir)); if (RTPathExists(g_szTestDir1)) RTTestFailed(g_hTest, "The primary test directory (%s) already exist, please remove it", g_szTestDir1); if (RTPathExists(g_szTestFile1)) RTTestFailed(g_hTest, "The primary test file (%s) already exist, please remove it", g_szTestFile1); if (RTPathExists(g_szNotExitingFile)) RTTestFailed(g_hTest, "'%s' exists, remove it", g_szNotExitingFile); if (RTPathExists(g_szNotExitingDir)) RTTestFailed(g_hTest, "'%s' exists, remove it", g_szNotExitingDir); if (RTPathExists(g_szNotExitingDirFile)) RTTestFailed(g_hTest, "'%s' exists, remove it", g_szNotExitingDirFile); /* * Do the testing. */ if (RTTestIErrorCount() == 0) { #if 1 if (fPathQueryInfoTest) benchmarkPathQueryInfo(); if (fFileOpenCloseTest) benchmarkFileOpenClose(); #endif if (fFileWriteByteTest) benchmarkFileWriteByte(); //if (fFileTests) // benchmarkFile(); //if (fDirTests) // benchmarkDir(); /* * Cleanup. */ RTFileDelete(g_szTestFile1); RTDirRemoveRecursive(g_szTestDir1, 0); RTTESTI_CHECK(RTDirExists(g_pszTestDir)); RTTESTI_CHECK(!RTPathExists(g_szTestDir1)); RTTESTI_CHECK(!RTPathExists(g_szTestFile1)); } return RTTestSummaryAndDestroy(g_hTest); }
/** Dummy delete function for vector-of-void pointer elements */ static void deletePVoid(void **ppv) { if (s_cDeleteCalls < RT_ELEMENTS(s_apvDeleteArg)) s_apvDeleteArg[s_cDeleteCalls] = *ppv; ++s_cDeleteCalls; }
int main(int argc, char **argv) { RTR3InitExe(argc, &argv, 0); const char * const argv0 = RTPathFilename(argv[0]); /* options */ uint64_t uAddress = 0; uint64_t uHighlightAddr = UINT64_MAX; ASMSTYLE enmStyle = kAsmStyle_Default; UNDEFOPHANDLING enmUndefOp = kUndefOp_Fail; bool fListing = true; DISCPUMODE enmCpuMode = CPUMODE_32BIT; RTFOFF off = 0; RTFOFF cbMax = _1G; bool fHexBytes = false; /* * Parse arguments. */ static const RTGETOPTDEF g_aOptions[] = { { "--address", 'a', RTGETOPT_REQ_UINT64 }, { "--cpumode", 'c', RTGETOPT_REQ_UINT32 }, { "--bytes", 'b', RTGETOPT_REQ_INT64 }, { "--listing", 'l', RTGETOPT_REQ_NOTHING }, { "--no-listing", 'L', RTGETOPT_REQ_NOTHING }, { "--offset", 'o', RTGETOPT_REQ_INT64 }, { "--style", 's', RTGETOPT_REQ_STRING }, { "--undef-op", 'u', RTGETOPT_REQ_STRING }, { "--hex-bytes", 'x', RTGETOPT_REQ_NOTHING }, }; int ch; RTGETOPTUNION ValueUnion; RTGETOPTSTATE GetState; RTGetOptInit(&GetState, argc, argv, g_aOptions, RT_ELEMENTS(g_aOptions), 1, RTGETOPTINIT_FLAGS_OPTS_FIRST); while ( (ch = RTGetOpt(&GetState, &ValueUnion)) && ch != VINF_GETOPT_NOT_OPTION) { switch (ch) { case 'a': uAddress = ValueUnion.u64; break; case 'b': cbMax = ValueUnion.i64; break; case 'c': if (ValueUnion.u32 == 16) enmCpuMode = CPUMODE_16BIT; else if (ValueUnion.u32 == 32) enmCpuMode = CPUMODE_32BIT; else if (ValueUnion.u32 == 64) enmCpuMode = CPUMODE_64BIT; else { RTStrmPrintf(g_pStdErr, "%s: Invalid CPU mode value %RU32\n", argv0, ValueUnion.u32); return 1; } break; case 'h': return Usage(argv0); case 'l': fListing = true; break; case 'L': fListing = false; break; case 'o': off = ValueUnion.i64; break; case 's': if (!strcmp(ValueUnion.psz, "default")) enmStyle = kAsmStyle_Default; else if (!strcmp(ValueUnion.psz, "yasm")) enmStyle = kAsmStyle_yasm; else if (!strcmp(ValueUnion.psz, "masm")) { enmStyle = kAsmStyle_masm; RTStrmPrintf(g_pStdErr, "%s: masm style isn't implemented yet\n", argv0); return 1; } else { RTStrmPrintf(g_pStdErr, "%s: unknown assembly style: %s\n", argv0, ValueUnion.psz); return 1; } break; case 'u': if (!strcmp(ValueUnion.psz, "fail")) enmUndefOp = kUndefOp_Fail; else if (!strcmp(ValueUnion.psz, "all")) enmUndefOp = kUndefOp_All; else if (!strcmp(ValueUnion.psz, "db")) enmUndefOp = kUndefOp_DefineByte; else { RTStrmPrintf(g_pStdErr, "%s: unknown undefined opcode handling method: %s\n", argv0, ValueUnion.psz); return 1; } break; case 'x': fHexBytes = true; break; case 'V': RTPrintf("$Revision: $\n"); return 0; default: return RTGetOptPrintError(ch, &ValueUnion); } } int iArg = GetState.iNext - 1; /** @todo Not pretty, add RTGetOptInit flag for this. */ if (iArg >= argc) return Usage(argv0); int rc = VINF_SUCCESS; if (fHexBytes) { /* * Convert the remaining arguments from a hex byte string into * a buffer that we disassemble. */ size_t cb = 0; uint8_t *pb = NULL; for ( ; iArg < argc; iArg++) { char ch2; const char *psz = argv[iArg]; while (*psz) { /** @todo this stuff belongs in IPRT, same stuff as mac address reading. Could be reused for IPv6 with a different item size.*/ /* skip white space, and for the benefit of linux panics '<' and '>'. */ while (RT_C_IS_SPACE(ch2 = *psz) || ch2 == '<' || ch2 == '>') { if (ch2 == '<') uHighlightAddr = uAddress + cb; psz++; } if (!ch2) break; /* one digit followed by a space or EOS, or two digits. */ int iNum = HexDigitToNum(*psz++); if (iNum == -1) return 1; if (!RT_C_IS_SPACE(ch2 = *psz) && ch2 != '\0' && ch2 != '>') { int iDigit = HexDigitToNum(*psz++); if (iDigit == -1) return 1; iNum = iNum * 16 + iDigit; } /* add the byte */ if (!(cb % 4 /*64*/)) { pb = (uint8_t *)RTMemRealloc(pb, cb + 64); if (!pb) { RTPrintf("%s: error: RTMemRealloc failed\n", argv[0]); return 1; } } pb[cb++] = (uint8_t)iNum; } } /* * Disassemble it. */ rc = MyDisasmBlock(argv0, enmCpuMode, uAddress, uHighlightAddr, pb, cb, enmStyle, fListing, enmUndefOp); } else { /* * Process the files. */ for ( ; iArg < argc; iArg++) { /* * Read the file into memory. */ void *pvFile; size_t cbFile; rc = RTFileReadAllEx(argv[iArg], off, cbMax, RTFILE_RDALL_O_DENY_NONE, &pvFile, &cbFile); if (RT_FAILURE(rc)) { RTStrmPrintf(g_pStdErr, "%s: %s: %Rrc\n", argv0, argv[iArg], rc); break; } /* * Disassemble it. */ rc = MyDisasmBlock(argv0, enmCpuMode, uAddress, uHighlightAddr, (uint8_t *)pvFile, cbFile, enmStyle, fListing, enmUndefOp); if (RT_FAILURE(rc)) break; } } return RT_SUCCESS(rc) ? 0 : 1; }
/** Dummy delete by value function for vector-of-void pointer elements */ static void deletePVoidValue(void *pv) { if (s_cDeleteCalls < RT_ELEMENTS(s_apvDeleteArg)) s_apvDeleteArg[s_cDeleteCalls] = pv; ++s_cDeleteCalls; }
/** * Match the specified device against the filters. * Unlike the VBoxUSBFilterMatch, returns Owner also if exclude filter is matched * * @returns Owner on if matched, VBOXUSBFILTER_CONTEXT_NIL it not matched. * @param pDevice The device data as a filter structure. * See USBFilterMatch for how to construct this. * @param puId Where to store the filter id (optional). * @param pfFilter Where to store whether the device must be filtered or not */ VBOXUSBFILTER_CONTEXT VBoxUSBFilterMatchEx(PCUSBFILTER pDevice, uintptr_t *puId, bool fRemoveFltIfOneShot, bool *pfFilter, bool *pfIsOneShot) { /* * Validate input. */ int rc = USBFilterValidate(pDevice); AssertRCReturn(rc, VBOXUSBFILTER_CONTEXT_NIL); *pfFilter = false; if (puId) *puId = 0; /* * Search the lists for a match. * (The lists are ordered by priority.) */ VBOXUSBFILTERMGR_LOCK(); for (unsigned i = USBFILTERTYPE_FIRST; i < RT_ELEMENTS(g_aLists); i++) { PVBOXUSBFILTER pPrev = NULL; PVBOXUSBFILTER pCur = g_aLists[i].pHead; while (pCur) { if (USBFilterMatch(&pCur->Core, pDevice)) { /* * Take list specific actions and return. * * The code does NOT implement the case where there are two or more * filter clients, and one of them is releasing a device that's * requested by some of the others. It's just too much work for a * situation that noone will encounter. */ if (puId) *puId = pCur->uId; VBOXUSBFILTER_CONTEXT Owner = pCur->Owner; *pfFilter = !!(i != USBFILTERTYPE_IGNORE && i != USBFILTERTYPE_ONESHOT_IGNORE); if ( i == USBFILTERTYPE_ONESHOT_IGNORE || i == USBFILTERTYPE_ONESHOT_CAPTURE) { if (fRemoveFltIfOneShot) { /* unlink */ PVBOXUSBFILTER pNext = pCur->pNext; if (pPrev) pPrev->pNext = pNext; else g_aLists[i].pHead = pNext; if (!pNext) g_aLists[i].pTail = pPrev; } } VBOXUSBFILTERMGR_UNLOCK(); if ( i == USBFILTERTYPE_ONESHOT_IGNORE || i == USBFILTERTYPE_ONESHOT_CAPTURE) { if (fRemoveFltIfOneShot) { vboxUSBFilterFree(pCur); } if (pfIsOneShot) *pfIsOneShot = true; } else { if (pfIsOneShot) *pfIsOneShot = false; } return Owner; } pPrev = pCur; pCur = pCur->pNext; } } VBOXUSBFILTERMGR_UNLOCK(); return VBOXUSBFILTER_CONTEXT_NIL; }
/** * Modify page flags for a range of pages in a mapping. * * The existing flags are ANDed with the fMask and ORed with the fFlags. * * @returns VBox status code. * @param pVM VM handle. * @param GCPtr Virtual address of the first page in the range. * @param cb Size (in bytes) of the range to apply the modification to. * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course. * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course. */ VMMDECL(int) PGMMapModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask) { /* * Validate input. */ AssertMsg(!(fFlags & (X86_PTE_PAE_PG_MASK | X86_PTE_PAE_MBZ_MASK_NX)), ("fFlags=%#x\n", fFlags)); Assert(cb); /* * Align the input. */ cb += (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK; cb = RT_ALIGN_Z(cb, PAGE_SIZE); GCPtr = (RTGCPTR)((RTGCUINTPTR)GCPtr & PAGE_BASE_GC_MASK); /* * Find the mapping. */ PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); while (pCur) { RTGCUINTPTR off = (RTGCUINTPTR)GCPtr - (RTGCUINTPTR)pCur->GCPtr; if (off < pCur->cb) { AssertMsgReturn(off + cb <= pCur->cb, ("Invalid page range %#x LB%#x. mapping '%s' %#x to %#x\n", GCPtr, cb, pCur->pszDesc, pCur->GCPtr, pCur->GCPtrLast), VERR_INVALID_PARAMETER); /* * Perform the requested operation. */ while (cb > 0) { unsigned iPT = off >> X86_PD_SHIFT; unsigned iPTE = (off >> PAGE_SHIFT) & X86_PT_MASK; while (cb > 0 && iPTE < RT_ELEMENTS(pCur->aPTs[iPT].CTX_SUFF(pPT)->a)) { /* 32-Bit */ pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPTE].u &= fMask | X86_PTE_PG_MASK; pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPTE].u |= fFlags & ~X86_PTE_PG_MASK; /* PAE */ PPGMSHWPTEPAE pPtePae = &pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPTE / 512].a[iPTE % 512]; PGMSHWPTEPAE_SET(*pPtePae, ( PGMSHWPTEPAE_GET_U(*pPtePae) & (fMask | X86_PTE_PAE_PG_MASK)) | (fFlags & ~(X86_PTE_PAE_PG_MASK | X86_PTE_PAE_MBZ_MASK_NX))); /* invalidate tls */ PGM_INVL_PG(VMMGetCpu(pVM), (RTGCUINTPTR)pCur->GCPtr + off); /* next */ iPTE++; cb -= PAGE_SIZE; off += PAGE_SIZE; } } return VINF_SUCCESS; } /* next */ pCur = pCur->CTX_SUFF(pNext); }
DECLEXPORT(EGLBoolean) eglChooseConfig (EGLDisplay hDisplay, const EGLint *paAttribs, EGLConfig *paConfigs, EGLint caConfigs, EGLint *pcConfigs) { Display *pDisplay = (Display *)hDisplay; int aAttribList[256]; /* The list cannot be this long. */ unsigned cAttribs = 0, i; const EGLint *pAttrib, *pAttrib2; EGLint cRenderableType = EGL_OPENGL_ES_BIT; int cConfigCaveat = GLX_DONT_CARE, cConformant = GLX_DONT_CARE; GLXFBConfig *paFBConfigs; int caFBConfigs; if (!VALID_PTR(hDisplay)) return setEGLError(EGL_NOT_INITIALIZED); if (!VALID_PTR(pcConfigs)) return setEGLError(EGL_BAD_PARAMETER); if (caConfigs > 0 && !VALID_PTR(paConfigs)) return setEGLError(EGL_BAD_PARAMETER); for (pAttrib = paAttribs; pAttrib != NULL && *pAttrib != EGL_NONE; pAttrib += 2) { bool fSkip = false; int cGLXAttrib; /* Check for illegal values. */ if ((*pAttrib == EGL_LEVEL || *pAttrib == EGL_MATCH_NATIVE_PIXMAP) && pAttrib[1] == EGL_DONT_CARE) return setEGLError(EGL_BAD_ATTRIBUTE); /* Check for values we can't handle. */ if ( (*pAttrib == EGL_ALPHA_MASK_SIZE) && pAttrib[1] != EGL_DONT_CARE && pAttrib[1] != 0) return setEGLError(EGL_BAD_ACCESS); /** @todo try creating a pixmap from a native one with the configurations returned. */ if (*pAttrib == EGL_MATCH_NATIVE_PIXMAP) return setEGLError(EGL_BAD_ACCESS); if ( ( *pAttrib == EGL_MIN_SWAP_INTERVAL || *pAttrib == EGL_MAX_SWAP_INTERVAL || *pAttrib == EGL_BIND_TO_TEXTURE_RGB || *pAttrib == EGL_BIND_TO_TEXTURE_RGBA) && pAttrib[1] != EGL_DONT_CARE) return setEGLError(EGL_BAD_ACCESS); /* Ignore attributes which are repeated later. */ for (pAttrib2 = pAttrib + 2; *pAttrib2 != EGL_NONE; pAttrib2 += 2) if (*pAttrib2 == *pAttrib) fSkip == true; if (fSkip) continue; cGLXAttrib = convertEGLAttribToGLX(*pAttrib); if (cGLXAttrib != None) { aAttribList[cAttribs] = cGLXAttrib; if (pAttrib[1] == EGL_DONT_CARE) aAttribList[cAttribs + 1] = GLX_DONT_CARE; else aAttribList[cAttribs + 1] = pAttrib[1]; cAttribs += 2; } else { switch (*pAttrib) { if ( *pAttrib == EGL_COLOR_BUFFER_TYPE && pAttrib[1] != EGL_DONT_CARE && pAttrib[1] != EGL_RGB_BUFFER) return setEGLError(EGL_BAD_ACCESS); case EGL_COLOR_BUFFER_TYPE: aAttribList[cAttribs] = GLX_X_VISUAL_TYPE; aAttribList[cAttribs + 1] = pAttrib[1] == EGL_DONT_CARE ? GLX_DONT_CARE : pAttrib[1] == EGL_RGB_BUFFER ? GLX_TRUE_COLOR : pAttrib[1] == EGL_LUMINANCE_BUFFER ? GLX_GRAY_SCALE : GL_FALSE; break; case EGL_CONFIG_CAVEAT: cConfigCaveat = pAttrib[1] == EGL_DONT_CARE ? GLX_DONT_CARE : pAttrib[1] == EGL_NONE ? GLX_NONE : pAttrib[1] == EGL_SLOW_CONFIG ? GLX_SLOW_CONFIG : pAttrib[1] == EGL_NON_CONFORMANT_CONFIG ? GLX_NON_CONFORMANT_CONFIG : GL_FALSE; if (!cConfigCaveat) return setEGLError(EGL_BAD_ATTRIBUTE); cAttribs -= 2; break; case EGL_CONFORMANT: if (pAttrib[1] != EGL_OPENGL_BIT && pAttrib[1] != 0) return setEGLError(EGL_BAD_ACCESS); cConformant = pAttrib[1] == EGL_OPENGL_BIT ? GL_TRUE : GL_FALSE; cAttribs -= 2; break; case EGL_NATIVE_VISUAL_TYPE: aAttribList[cAttribs] = GLX_X_VISUAL_TYPE; aAttribList[cAttribs + 1] = pAttrib[1] == EGL_DONT_CARE ? GLX_DONT_CARE : pAttrib[1] == StaticGray ? GLX_STATIC_GRAY : pAttrib[1] == StaticColor ? GLX_STATIC_COLOR : pAttrib[1] == TrueColor ? GLX_TRUE_COLOR : pAttrib[1] == GrayScale ? GLX_GRAY_SCALE : pAttrib[1] == PseudoColor ? GLX_PSEUDO_COLOR : pAttrib[1] == DirectColor ? GLX_DIRECT_COLOR : GL_FALSE; break; case EGL_RENDERABLE_TYPE: cRenderableType = pAttrib[1]; break; case EGL_SURFACE_TYPE: if (pAttrib[1] & ~(EGL_PBUFFER_BIT | EGL_PIXMAP_BIT | EGL_WINDOW_BIT)) return setEGLError(EGL_BAD_ACCESS); aAttribList[cAttribs] = GLX_DRAWABLE_TYPE; aAttribList[cAttribs + 1] = (pAttrib[1] & EGL_PBUFFER_BIT ? GLX_PBUFFER_BIT : 0) | (pAttrib[1] & EGL_PIXMAP_BIT ? GLX_PIXMAP_BIT : 0) | (pAttrib[1] & EGL_WINDOW_BIT ? GLX_WINDOW_BIT : 0); break; case EGL_TRANSPARENT_TYPE: aAttribList[cAttribs] = GLX_TRANSPARENT_TYPE; aAttribList[cAttribs + 1] = pAttrib[1] == EGL_DONT_CARE ? GLX_DONT_CARE : pAttrib[1] == EGL_NONE ? GLX_NONE : pAttrib[1] == EGL_TRANSPARENT_RGB ? GLX_TRANSPARENT_RGB : GL_FALSE; break; default: return setEGLError(EGL_BAD_ATTRIBUTE); } cAttribs += 2; } } if (cConfigCaveat != GLX_DONT_CARE || cConformant != GLX_DONT_CARE) { aAttribList[cAttribs] = GLX_CONFIG_CAVEAT; aAttribList[cAttribs + 1] = cConformant == GL_FALSE ? GLX_NON_CONFORMANT_CONFIG : cConfigCaveat == EGL_SLOW_CONFIG ? GLX_SLOW_CONFIG : GLX_NONE; cAttribs += 2; } aAttribList[cAttribs] = GLX_RENDER_TYPE; aAttribList[cAttribs + 1] = GLX_RGBA_BIT; cAttribs += 2; if (paAttribs != NULL) { aAttribList[cAttribs] = None; AssertRelease(cAttribs < RT_ELEMENTS(aAttribList)); if (!(cRenderableType & EGL_OPENGL_BIT)) return setEGLError(EGL_BAD_ACCESS); } paFBConfigs = glXChooseFBConfig(pDisplay, DefaultScreen(pDisplay), paAttribs != NULL ? aAttribList : NULL, &caFBConfigs); if (paFBConfigs == NULL) return setEGLError(EGL_BAD_ACCESS); *pcConfigs = caFBConfigs; for (i = 0; i < caConfigs && i < caFBConfigs; ++i) paConfigs[i] = (EGLConfig)paFBConfigs[i]; XFree(paFBConfigs); return clearEGLError(); }
/** * Leaves a critical section entered with PDMCritSectEnter(). * * @returns Indication whether we really exited the critical section. * @retval VINF_SUCCESS if we really exited. * @retval VINF_SEM_NESTED if we only reduced the nesting count. * @retval VERR_NOT_OWNER if you somehow ignore release assertions. * * @param pCritSect The PDM critical section to leave. */ VMMDECL(int) PDMCritSectLeave(PPDMCRITSECT pCritSect) { AssertMsg(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic)); Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC); /* Check for NOP sections before asserting ownership. */ if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP) return VINF_SUCCESS; /* * Always check that the caller is the owner (screw performance). */ RTNATIVETHREAD const hNativeSelf = pdmCritSectGetNativeSelf(pCritSect); AssertReleaseMsgReturn(pCritSect->s.Core.NativeThreadOwner == hNativeSelf, ("%p %s: %p != %p; cLockers=%d cNestings=%d\n", pCritSect, R3STRING(pCritSect->s.pszName), pCritSect->s.Core.NativeThreadOwner, hNativeSelf, pCritSect->s.Core.cLockers, pCritSect->s.Core.cNestings), VERR_NOT_OWNER); Assert(pCritSect->s.Core.cNestings >= 1); /* * Nested leave. */ if (pCritSect->s.Core.cNestings > 1) { ASMAtomicDecS32(&pCritSect->s.Core.cNestings); Assert(pCritSect->s.Core.cNestings >= 1); ASMAtomicDecS32(&pCritSect->s.Core.cLockers); Assert(pCritSect->s.Core.cLockers >= 0); return VINF_SEM_NESTED; } #ifdef IN_RING0 # if 0 /** @todo Make SUPSemEventSignal interrupt safe (handle table++) and enable this for: defined(RT_OS_LINUX) || defined(RT_OS_OS2) */ if (1) /* SUPSemEventSignal is safe */ # else if (ASMIntAreEnabled()) # endif #endif #if defined(IN_RING3) || defined(IN_RING0) { /* * Leave for real. */ /* update members. */ # ifdef IN_RING3 RTSEMEVENT hEventToSignal = pCritSect->s.EventToSignal; pCritSect->s.EventToSignal = NIL_RTSEMEVENT; # if defined(PDMCRITSECT_STRICT) if (pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD) RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec); # endif Assert(!pCritSect->s.Core.pValidatorRec || pCritSect->s.Core.pValidatorRec->hThread == NIL_RTTHREAD); # endif ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK); ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD); ASMAtomicDecS32(&pCritSect->s.Core.cNestings); Assert(pCritSect->s.Core.cNestings == 0); /* stop and decrement lockers. */ STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l); ASMCompilerBarrier(); if (ASMAtomicDecS32(&pCritSect->s.Core.cLockers) >= 0) { /* Someone is waiting, wake up one of them. */ SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem; PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession; int rc = SUPSemEventSignal(pSession, hEvent); AssertRC(rc); } # ifdef IN_RING3 /* Signal exit event. */ if (hEventToSignal != NIL_RTSEMEVENT) { LogBird(("Signalling %#x\n", hEventToSignal)); int rc = RTSemEventSignal(hEventToSignal); AssertRC(rc); } # endif # if defined(DEBUG_bird) && defined(IN_RING0) VMMTrashVolatileXMMRegs(); # endif } #endif /* IN_RING3 || IN_RING0 */ #ifdef IN_RING0 else #endif #if defined(IN_RING0) || defined(IN_RC) { /* * Try leave it. */ if (pCritSect->s.Core.cLockers == 0) { ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0); RTNATIVETHREAD hNativeThread = pCritSect->s.Core.NativeThreadOwner; ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK); STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l); ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD); if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0)) return VINF_SUCCESS; /* darn, someone raced in on us. */ ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeThread); STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l); Assert(pCritSect->s.Core.cNestings == 0); ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1); } ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK); /* * Queue the request. */ PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM); PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu); uint32_t i = pVCpu->pdm.s.cQueuedCritSectLeaves++; LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect)); AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectLeaves)); pVCpu->pdm.s.apQueuedCritSectLeaves[i] = MMHyperCCToR3(pVM, pCritSect); VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT); VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves); STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock); } #endif /* IN_RING0 || IN_RC */ return VINF_SUCCESS; }
/** * Handler for the 'remove-trusted-publisher' command. */ static RTEXITCODE cmdRemoveTrustedPublisher(int argc, char **argv) { /* * Parse arguments. */ static const RTGETOPTDEF s_aOptions[] = { { "--root", 'r', RTGETOPT_REQ_STRING }, }; const char *pszRootCert = NULL; const char *pszTrustedCert = NULL; int rc; RTGETOPTUNION ValueUnion; RTGETOPTSTATE GetState; RTGetOptInit(&GetState, argc, argv, s_aOptions, RT_ELEMENTS(s_aOptions), 1, 0); while ((rc = RTGetOpt(&GetState, &ValueUnion))) { switch (rc) { case 'h': RTPrintf("Usage: VBoxCertUtil remove-trusted-publisher [--root <root-cert>] <trusted-cert>\n"); break; case 'V': RTPrintf("%sr%d\n", RTBldCfgVersion(), RTBldCfgRevision()); return RTEXITCODE_SUCCESS; case 'r': if (pszRootCert) return RTMsgErrorExit(RTEXITCODE_SUCCESS, "You've already specified '%s' as root certificate.", pszRootCert); pszRootCert = ValueUnion.psz; break; case VINF_GETOPT_NOT_OPTION: if (pszTrustedCert) return RTMsgErrorExit(RTEXITCODE_SUCCESS, "You've already specified '%s' as trusted certificate.", pszRootCert); pszTrustedCert = ValueUnion.psz; break; default: return RTGetOptPrintError(rc, &ValueUnion); } } if (!pszTrustedCert) return RTMsgErrorExit(RTEXITCODE_SUCCESS, "No trusted certificate specified."); /* * Do the job. */ if ( pszRootCert && !removeCertFromStoreByFile(CERT_SYSTEM_STORE_LOCAL_MACHINE, "Root", pszRootCert)) return RTEXITCODE_FAILURE; if (!removeCertFromStoreByFile(CERT_SYSTEM_STORE_LOCAL_MACHINE, "TrustedPublisher", pszTrustedCert)) return RTEXITCODE_FAILURE; if (g_cVerbosityLevel > 0) { if (pszRootCert) RTMsgInfo("Successfully removed '%s' as root and '%s' as trusted publisher", pszRootCert, pszTrustedCert); else RTMsgInfo("Successfully removed '%s' as trusted publisher", pszTrustedCert); } return RTEXITCODE_SUCCESS; }
/** * This is called on each EMT and will beat TM. * * @returns VINF_SUCCESS, test failure is reported via RTTEST. * @param pVM Pointer to the VM. * @param hTest The test handle. */ DECLCALLBACK(int) tstTMWorker(PVM pVM, RTTEST hTest) { VMCPUID idCpu = VMMGetCpuId(pVM); RTTestPrintfNl(hTest, RTTESTLVL_ALWAYS, "idCpu=%d STARTING\n", idCpu); /* * Create the test set. */ int rc; PTMTIMER apTimers[5]; for (size_t i = 0; i < RT_ELEMENTS(apTimers); i++) { rc = TMR3TimerCreateInternal(pVM, i & 1 ? TMCLOCK_VIRTUAL : TMCLOCK_VIRTUAL_SYNC, tstTMDummyCallback, NULL, "test timer", &apTimers[i]); RTTEST_CHECK_RET(hTest, RT_SUCCESS(rc), rc); } /* * The run loop. */ unsigned uPrevPct = 0; uint32_t const cLoops = 100000; for (uint32_t iLoop = 0; iLoop < cLoops; iLoop++) { size_t cLeft = RT_ELEMENTS(apTimers); unsigned i = iLoop % RT_ELEMENTS(apTimers); while (cLeft-- > 0) { PTMTIMER pTimer = apTimers[i]; if ( cLeft == RT_ELEMENTS(apTimers) / 2 && TMTimerIsActive(pTimer)) { rc = TMTimerStop(pTimer); RTTEST_CHECK_MSG(hTest, RT_SUCCESS(rc), (hTest, "TMTimerStop: %Rrc\n", rc)); } else { rc = TMTimerSetMicro(pTimer, 50 + cLeft); RTTEST_CHECK_MSG(hTest, RT_SUCCESS(rc), (hTest, "TMTimerSetMicro: %Rrc\n", rc)); } /* next */ i = (i + 1) % RT_ELEMENTS(apTimers); } if (i % 3) TMR3TimerQueuesDo(pVM); /* Progress report. */ unsigned uPct = (unsigned)(100.0 * iLoop / cLoops); if (uPct != uPrevPct) { uPrevPct = uPct; if (!(uPct % 10)) RTTestPrintfNl(hTest, RTTESTLVL_ALWAYS, "idCpu=%d - %3u%%\n", idCpu, uPct); } } RTTestPrintfNl(hTest, RTTESTLVL_ALWAYS, "idCpu=%d DONE\n", idCpu); return 0; }