static void vboxApplyPatch(const char* psFuncName, void *pDst, const void *pSrc, unsigned long size) { void *alPatch; int rv; /* Get aligned start address we're going to patch*/ alPatch = (void*) ((uintptr_t)pDst & ~(uintptr_t)(PAGESIZE-1)); #ifndef VBOX_NO_MESA_PATCH_REPORTS crDebug("MProtecting: %p, %li", alPatch, pDst-alPatch+size); #endif /* Get write access to mesa functions */ rv = RTMemProtect(alPatch, pDst-alPatch+size, RTMEM_PROT_READ|RTMEM_PROT_WRITE|RTMEM_PROT_EXEC); if (RT_FAILURE(rv)) { crError("mprotect failed with %x (%s)", rv, psFuncName); } #ifndef VBOX_NO_MESA_PATCH_REPORTS crDebug("Writing %li bytes to %p from %p", size, pDst, pSrc); #endif crMemcpy(pDst, pSrc, size); /*@todo Restore the protection, probably have to check what was it before us...*/ rv = RTMemProtect(alPatch, pDst-alPatch+size, RTMEM_PROT_READ|RTMEM_PROT_EXEC); if (RT_FAILURE(rv)) { crError("mprotect2 failed with %x (%s)", rv, psFuncName); } }
/** Generic implementation of krdrRTFileProtect. */ static int krdrRTFileGenericProtect(PKRDR pRdr, PKRDRFILEPREP pPrep, KU32 cSegments, PCKLDRSEG paSegments, KBOOL fUnprotectOrProtect) { KU32 i; /* * Iterate the segments and apply memory protection changes. */ for (i = 0; i < cSegments; i++) { int rc; void *pv; KPROT enmProt; if (paSegments[i].RVA == NIL_KLDRADDR) continue; /* calc new protection. */ enmProt = (KPROT)paSegments[i].enmProt; /** @todo drop cast */ if (fUnprotectOrProtect) { switch (enmProt) { case KPROT_NOACCESS: case KPROT_READONLY: case KPROT_READWRITE: case KPROT_WRITECOPY: enmProt = KPROT_READWRITE; break; case KPROT_EXECUTE: case KPROT_EXECUTE_READ: case KPROT_EXECUTE_READWRITE: case KPROT_EXECUTE_WRITECOPY: enmProt = KPROT_EXECUTE_READWRITE; break; default: AssertFailed(); return -1; } } else { /* copy on write -> normal write. */ if (enmProt == KPROT_EXECUTE_WRITECOPY) enmProt = KPROT_EXECUTE_READWRITE; else if (enmProt == KPROT_WRITECOPY) enmProt = KPROT_READWRITE; } pv = (KU8 *)pPrep->pv + paSegments[i].RVA; rc = RTMemProtect(pv, paSegments[i].cbMapped, krdrRTFileConvertProt(enmProt)); if (rc) break; } return 0; }
/** * Internal free. */ RTDECL(void) rtR3MemFree(const char *pszOp, RTMEMTYPE enmType, void *pv, void *pvCaller, RT_SRC_POS_DECL) { /* * Simple case. */ if (!pv) return; /* * Check watch points. */ for (unsigned i = 0; i < RT_ELEMENTS(gapvRTMemFreeWatch); i++) if (gapvRTMemFreeWatch[i] == pv) RTAssertDoPanic(); #ifdef RTALLOC_EFENCE_TRACE /* * Find the block. */ PRTMEMBLOCK pBlock = rtmemBlockRemove(pv); if (pBlock) { if (gfRTMemFreeLog) RTLogPrintf("RTMem %s: pv=%p pvCaller=%p cbUnaligned=%#x\n", pszOp, pv, pvCaller, pBlock->cbUnaligned); # ifdef RTALLOC_EFENCE_NOMAN_FILLER /* * Check whether the no man's land is untouched. */ # ifdef RTALLOC_EFENCE_IN_FRONT void *pvWrong = ASMMemIsAll8((char *)pv + pBlock->cbUnaligned, RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) - pBlock->cbUnaligned, RTALLOC_EFENCE_NOMAN_FILLER); # else /* Alignment must match allocation alignment in rtMemAlloc(). */ void *pvWrong = ASMMemIsAll8((char *)pv + pBlock->cbUnaligned, pBlock->cbAligned - pBlock->cbUnaligned, RTALLOC_EFENCE_NOMAN_FILLER); if (pvWrong) RTAssertDoPanic(); pvWrong = ASMMemIsAll8((void *)((uintptr_t)pv & ~PAGE_OFFSET_MASK), RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) - pBlock->cbAligned, RTALLOC_EFENCE_NOMAN_FILLER); # endif if (pvWrong) RTAssertDoPanic(); # endif # ifdef RTALLOC_EFENCE_FREE_FILL /* * Fill the user part of the block. */ memset(pv, RTALLOC_EFENCE_FREE_FILL, pBlock->cbUnaligned); # endif # if defined(RTALLOC_EFENCE_FREE_DELAYED) && RTALLOC_EFENCE_FREE_DELAYED > 0 /* * We're doing delayed freeing. * That means we'll expand the E-fence to cover the entire block. */ int rc = RTMemProtect(pv, pBlock->cbAligned, RTMEM_PROT_NONE); if (RT_SUCCESS(rc)) { /* * Insert it into the free list and process pending frees. */ rtmemBlockDelayInsert(pBlock); while ((pBlock = rtmemBlockDelayRemove()) != NULL) { pv = pBlock->Core.Key; # ifdef RTALLOC_EFENCE_IN_FRONT void *pvBlock = (char *)pv - RTALLOC_EFENCE_SIZE; # else void *pvBlock = (void *)((uintptr_t)pv & ~PAGE_OFFSET_MASK); # endif size_t cbBlock = RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTALLOC_EFENCE_SIZE; rc = RTMemProtect(pvBlock, cbBlock, RTMEM_PROT_READ | RTMEM_PROT_WRITE); if (RT_SUCCESS(rc)) RTMemPageFree(pvBlock, RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTALLOC_EFENCE_SIZE); else rtmemComplain(pszOp, "RTMemProtect(%p, %#x, RTMEM_PROT_READ | RTMEM_PROT_WRITE) -> %d\n", pvBlock, cbBlock, rc); rtmemBlockFree(pBlock); } } else rtmemComplain(pszOp, "Failed to expand the efence of pv=%p cb=%d, rc=%d.\n", pv, pBlock, rc); # else /* !RTALLOC_EFENCE_FREE_DELAYED */ /* * Turn of the E-fence and free it. */ # ifdef RTALLOC_EFENCE_IN_FRONT void *pvBlock = (char *)pv - RTALLOC_EFENCE_SIZE; void *pvEFence = pvBlock; # else void *pvBlock = (void *)((uintptr_t)pv & ~PAGE_OFFSET_MASK); void *pvEFence = (char *)pv + pBlock->cb; # endif int rc = RTMemProtect(pvEFence, RTALLOC_EFENCE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE); if (RT_SUCCESS(rc)) RTMemPageFree(pvBlock, RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTALLOC_EFENCE_SIZE); else rtmemComplain(pszOp, "RTMemProtect(%p, %#x, RTMEM_PROT_READ | RTMEM_PROT_WRITE) -> %d\n", pvEFence, RTALLOC_EFENCE_SIZE, rc); rtmemBlockFree(pBlock); # endif /* !RTALLOC_EFENCE_FREE_DELAYED */ } else rtmemComplain(pszOp, "pv=%p not found! Incorrect free!\n", pv); #else /* !RTALLOC_EFENCE_TRACE */ /* * We have no size tracking, so we're not doing any freeing because * we cannot if the E-fence is after the block. * Let's just expand the E-fence to the first page of the user bit * since we know that it's around. */ int rc = RTMemProtect((void *)((uintptr_t)pv & ~PAGE_OFFSET_MASK), PAGE_SIZE, RTMEM_PROT_NONE); if (RT_FAILURE(rc)) rtmemComplain(pszOp, "RTMemProtect(%p, PAGE_SIZE, RTMEM_PROT_NONE) -> %d\n", (void *)((uintptr_t)pv & ~PAGE_OFFSET_MASK), rc); #endif /* !RTALLOC_EFENCE_TRACE */ }
/** * Internal allocator. */ RTDECL(void *) rtR3MemAlloc(const char *pszOp, RTMEMTYPE enmType, size_t cbUnaligned, size_t cbAligned, const char *pszTag, void *pvCaller, RT_SRC_POS_DECL) { /* * Sanity. */ if ( RT_ALIGN_Z(RTALLOC_EFENCE_SIZE, PAGE_SIZE) != RTALLOC_EFENCE_SIZE && RTALLOC_EFENCE_SIZE <= 0) { rtmemComplain(pszOp, "Invalid E-fence size! %#x\n", RTALLOC_EFENCE_SIZE); return NULL; } if (!cbUnaligned) { #if 0 rtmemComplain(pszOp, "Request of ZERO bytes allocation!\n"); return NULL; #else cbAligned = cbUnaligned = 1; #endif } #ifndef RTALLOC_EFENCE_IN_FRONT /* Alignment decreases fence accuracy, but this is at least partially * counteracted by filling and checking the alignment padding. When the * fence is in front then then no extra alignment is needed. */ cbAligned = RT_ALIGN_Z(cbAligned, RTALLOC_EFENCE_ALIGNMENT); #endif #ifdef RTALLOC_EFENCE_TRACE /* * Allocate the trace block. */ PRTMEMBLOCK pBlock = rtmemBlockCreate(enmType, cbUnaligned, cbAligned, pszTag, pvCaller, RT_SRC_POS_ARGS); if (!pBlock) { rtmemComplain(pszOp, "Failed to allocate trace block!\n"); return NULL; } #endif /* * Allocate a block with page alignment space + the size of the E-fence. */ size_t cbBlock = RT_ALIGN_Z(cbAligned, PAGE_SIZE) + RTALLOC_EFENCE_SIZE; void *pvBlock = RTMemPageAlloc(cbBlock); if (pvBlock) { /* * Calc the start of the fence and the user block * and then change the page protection of the fence. */ #ifdef RTALLOC_EFENCE_IN_FRONT void *pvEFence = pvBlock; void *pv = (char *)pvEFence + RTALLOC_EFENCE_SIZE; # ifdef RTALLOC_EFENCE_NOMAN_FILLER memset((char *)pv + cbUnaligned, RTALLOC_EFENCE_NOMAN_FILLER, cbBlock - RTALLOC_EFENCE_SIZE - cbUnaligned); # endif #else void *pvEFence = (char *)pvBlock + (cbBlock - RTALLOC_EFENCE_SIZE); void *pv = (char *)pvEFence - cbAligned; # ifdef RTALLOC_EFENCE_NOMAN_FILLER memset(pvBlock, RTALLOC_EFENCE_NOMAN_FILLER, cbBlock - RTALLOC_EFENCE_SIZE - cbAligned); memset((char *)pv + cbUnaligned, RTALLOC_EFENCE_NOMAN_FILLER, cbAligned - cbUnaligned); # endif #endif #ifdef RTALLOC_EFENCE_FENCE_FILLER memset(pvEFence, RTALLOC_EFENCE_FENCE_FILLER, RTALLOC_EFENCE_SIZE); #endif int rc = RTMemProtect(pvEFence, RTALLOC_EFENCE_SIZE, RTMEM_PROT_NONE); if (!rc) { #ifdef RTALLOC_EFENCE_TRACE rtmemBlockInsert(pBlock, pv); #endif if (enmType == RTMEMTYPE_RTMEMALLOCZ) memset(pv, 0, cbUnaligned); #ifdef RTALLOC_EFENCE_FILLER else memset(pv, RTALLOC_EFENCE_FILLER, cbUnaligned); #endif rtmemLog(pszOp, "returns %p (pvBlock=%p cbBlock=%#x pvEFence=%p cbUnaligned=%#x)\n", pv, pvBlock, cbBlock, pvEFence, cbUnaligned); return pv; } rtmemComplain(pszOp, "RTMemProtect failed, pvEFence=%p size %d, rc=%d\n", pvEFence, RTALLOC_EFENCE_SIZE, rc); RTMemPageFree(pvBlock, cbBlock); } else rtmemComplain(pszOp, "Failed to allocated %lu (%lu) bytes.\n", (unsigned long)cbBlock, (unsigned long)cbUnaligned); #ifdef RTALLOC_EFENCE_TRACE rtmemBlockFree(pBlock); #endif return NULL; }
static void rtMemReplaceMallocAndFriends(void) { struct { const char *pszName; PFNRT pfnReplacement; PFNRT pfnOrg; PFNRT *ppfnJumpBack; } aApis[] = { { "free", (PFNRT)rtMemReplacementFree, (PFNRT)free, (PFNRT *)&g_pfnOrgFree }, { "realloc", (PFNRT)rtMemReplacementRealloc, (PFNRT)realloc, (PFNRT *)&g_pfnOrgRealloc }, { "calloc", (PFNRT)rtMemReplacementCalloc, (PFNRT)calloc, (PFNRT *)&g_pfnOrgCalloc }, { "malloc", (PFNRT)rtMemReplacementMalloc, (PFNRT)malloc, (PFNRT *)&g_pfnOrgMalloc }, #ifdef RT_OS_DARWIN { "malloc_size", (PFNRT)rtMemReplacementMallocSize, (PFNRT)malloc_size, (PFNRT *)&g_pfnOrgMallocSize }, #endif }; /* * Initialize the jump backs to avoid recursivly entering this function. */ for (unsigned i = 0; i < RT_ELEMENTS(aApis); i++) *aApis[i].ppfnJumpBack = aApis[i].pfnOrg; /* * Give the user an option to skip replacing malloc. */ if (getenv("IPRT_DONT_REPLACE_MALLOC")) return; /* * Allocate a page for jump back code (we leak it). */ uint8_t *pbExecPage = (uint8_t *)RTMemPageAlloc(PAGE_SIZE); AssertFatal(pbExecPage); int rc = RTMemProtect(pbExecPage, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC); AssertFatalRC(rc); /* * Do the ground work. */ uint8_t *pb = pbExecPage; for (unsigned i = 0; i < RT_ELEMENTS(aApis); i++) { /* Resolve it. */ PFNRT pfnOrg = (PFNRT)(uintptr_t)dlsym(RTLD_DEFAULT, aApis[i].pszName); if (pfnOrg) aApis[i].pfnOrg = pfnOrg; else pfnOrg = aApis[i].pfnOrg; /* Figure what we can replace and how much to duplicate in the jump back code. */ # ifdef RT_ARCH_AMD64 uint32_t cbNeeded = 12; DISCPUMODE const enmCpuMode = DISCPUMODE_64BIT; # elif defined(RT_ARCH_X86) uint32_t const cbNeeded = 5; DISCPUMODE const enmCpuMode = DISCPUMODE_32BIT; # else # error "Port me" # endif uint32_t offJmpBack = 0; uint32_t cbCopy = 0; while (offJmpBack < cbNeeded) { DISCPUSTATE Dis; uint32_t cbInstr = 1; rc = DISInstr((void *)((uintptr_t)pfnOrg + offJmpBack), enmCpuMode, &Dis, &cbInstr); AssertFatalRC(rc); AssertFatal(!(Dis.pCurInstr->fOpType & (DISOPTYPE_CONTROLFLOW))); # ifdef RT_ARCH_AMD64 # ifdef RT_OS_DARWIN /* Kludge for: cmp [malloc_def_zone_state], 1; jg 2; call _malloc_initialize; 2: */ DISQPVPARAMVAL Parm; if ( Dis.ModRM.Bits.Mod == 0 && Dis.ModRM.Bits.Rm == 5 /* wrt RIP */ && (Dis.Param2.fUse & (DISUSE_IMMEDIATE16_SX8 | DISUSE_IMMEDIATE32_SX8 | DISUSE_IMMEDIATE64_SX8)) && Dis.Param2.uValue == 1 && Dis.pCurInstr->uOpcode == OP_CMP) { cbCopy = offJmpBack; offJmpBack += cbInstr; rc = DISInstr((void *)((uintptr_t)pfnOrg + offJmpBack), enmCpuMode, &Dis, &cbInstr); AssertFatalRC(rc); if ( Dis.pCurInstr->uOpcode == OP_JNBE && Dis.Param1.uDisp.i8 == 5) { offJmpBack += cbInstr + 5; AssertFatal(offJmpBack >= cbNeeded); break; } } # endif AssertFatal(!(Dis.ModRM.Bits.Mod == 0 && Dis.ModRM.Bits.Rm == 5 /* wrt RIP */)); # endif offJmpBack += cbInstr; } if (!cbCopy) cbCopy = offJmpBack; /* Assemble the jump back. */ memcpy(pb, (void *)(uintptr_t)pfnOrg, cbCopy); uint32_t off = cbCopy; # ifdef RT_ARCH_AMD64 pb[off++] = 0xff; /* jmp qword [$+8 wrt RIP] */ pb[off++] = 0x25; *(uint32_t *)&pb[off] = 0; off += 4; *(uint64_t *)&pb[off] = (uintptr_t)pfnOrg + offJmpBack; off += 8; off = RT_ALIGN_32(off, 16); # elif defined(RT_ARCH_X86) pb[off++] = 0xe9; /* jmp rel32 */ *(uint32_t *)&pb[off] = (uintptr_t)pfnOrg + offJmpBack - (uintptr_t)&pb[4]; off += 4; off = RT_ALIGN_32(off, 8); # else # error "Port me" # endif *aApis[i].ppfnJumpBack = (PFNRT)(uintptr_t)pb; pb += off; } /* * Modify the APIs. */ for (unsigned i = 0; i < RT_ELEMENTS(aApis); i++) { pb = (uint8_t *)(uintptr_t)aApis[i].pfnOrg; rc = RTMemProtect(pb, 16, RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC); AssertFatalRC(rc); # ifdef RT_ARCH_AMD64 /* Assemble the LdrLoadDll patch. */ *pb++ = 0x48; /* mov rax, qword */ *pb++ = 0xb8; *(uint64_t *)pb = (uintptr_t)aApis[i].pfnReplacement; pb += 8; *pb++ = 0xff; /* jmp rax */ *pb++ = 0xe0; # elif defined(RT_ARCH_X86) *pb++ = 0xe9; /* jmp rel32 */ *(uint32_t *)pb = (uintptr_t)aApis[i].pfnReplacement - (uintptr_t)&pb[4]; # else # error "Port me" # endif } }
int main() { /* * Set up the test environment. */ RTTEST hTest; RTEXITCODE rcExit = RTTestInitAndCreate("tstX86-1", &hTest); if (rcExit != RTEXITCODE_SUCCESS) return rcExit; RTTestBanner(hTest); g_pbEfPage = (uint8_t *)RTTestGuardedAllocTail(hTest, PAGE_SIZE); RTTESTI_CHECK(g_pbEfPage != NULL); g_pbEfExecPage = (uint8_t *)RTMemExecAlloc(PAGE_SIZE*2); RTTESTI_CHECK(g_pbEfExecPage != NULL); RTTESTI_CHECK(!((uintptr_t)g_pbEfExecPage & PAGE_OFFSET_MASK)); RTTESTI_CHECK_RC(RTMemProtect(g_pbEfExecPage + PAGE_SIZE, PAGE_SIZE, RTMEM_PROT_NONE), VINF_SUCCESS); #ifdef USE_SIGNAL static int const s_aiSigs[] = { SIGBUS, SIGSEGV, SIGFPE, SIGILL }; for (unsigned i = 0; i < RT_ELEMENTS(s_aiSigs); i++) { struct sigaction SigAct; RTTESTI_CHECK_BREAK(sigaction(s_aiSigs[i], NULL, &SigAct) == 0); SigAct.sa_sigaction = sigHandler; SigAct.sa_flags |= SA_SIGINFO; RTTESTI_CHECK(sigaction(s_aiSigs[i], &SigAct, NULL) == 0); } #else /** @todo implement me. */ #endif if (!RTTestErrorCount(hTest)) { /* * Do the testing. */ int32_t rc; #if 0 RTTestSub(hTest, "Misc 1"); rc = x861_Test1(); if (rc != 0) RTTestFailed(hTest, "x861_Test1 -> %d", rc); RTTestSub(hTest, "Prefixes and groups"); rc = x861_Test2(); if (rc != 0) RTTestFailed(hTest, "x861_Test2 -> %d", rc); RTTestSub(hTest, "fxsave / fxrstor and #PFs"); rc = x861_Test3(); if (rc != 0) RTTestFailed(hTest, "x861_Test3 -> %d", rc); RTTestSub(hTest, "Multibyte NOPs"); rc = x861_Test4(); if (rc != 0) RTTestFailed(hTest, "x861_Test4 -> %d", rc); //#endif RTTestSub(hTest, "Odd encodings and odd ends"); rc = x861_Test5(); if (rc != 0) RTTestFailed(hTest, "x861_Test5 -> %d", rc); //#if 0 RTTestSub(hTest, "Odd floating point encodings"); rc = x861_Test6(); if (rc != 0) RTTestFailed(hTest, "x861_Test5 -> %d", rc); RTTestSub(hTest, "Floating point exceptions ++"); rc = x861_Test7(); if (rc != 0) RTTestFailed(hTest, "x861_Test6 -> %d", rc); #endif rc = x861_TestFPUInstr1(); if (rc != 0) RTTestFailed(hTest, "x861_TestFPUInstr1 -> %d", rc); } return RTTestSummaryAndDestroy(hTest); }