/** * Called on the EMT for the VCpu. * * @returns VBox status code. * * @param pVM The VM handle. * @param idCpu The ID of the CPU context. * @param pAddress The address. * @param fReadOnly Whether returning a read-only page is fine or not. * @param ppvR3Ptr Where to return the address. */ static DECLCALLBACK(int) dbgfR3AddrToVolatileR3PtrOnVCpu(PVM pVM, VMCPUID idCpu, PDBGFADDRESS pAddress, bool fReadOnly, void **ppvR3Ptr) { Assert(idCpu == VMMGetCpuId(pVM)); int rc; if (pAddress->fFlags & DBGFADDRESS_FLAGS_HMA) { rc = VERR_NOT_SUPPORTED; /** @todo create some dedicated errors for this stuff. */ /** @todo this may assert, create a debug version of this which doesn't. */ if (MMHyperIsInsideArea(pVM, pAddress->FlatPtr)) { void *pv = MMHyperRCToCC(pVM, (RTRCPTR)pAddress->FlatPtr); if (pv) { *ppvR3Ptr = pv; rc = VINF_SUCCESS; } } } else { /* * This is a tad ugly, but it gets the job done. */ PGMPAGEMAPLOCK Lock; if (pAddress->fFlags & DBGFADDRESS_FLAGS_PHYS) { if (fReadOnly) rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, pAddress->FlatPtr, (void const **)ppvR3Ptr, &Lock); else rc = PGMPhysGCPhys2CCPtr(pVM, pAddress->FlatPtr, ppvR3Ptr, &Lock); } else { PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu); if (fReadOnly) rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, pAddress->FlatPtr, (void const **)ppvR3Ptr, &Lock); else rc = PGMPhysGCPtr2CCPtr(pVCpu, pAddress->FlatPtr, ppvR3Ptr, &Lock); } if (RT_SUCCESS(rc)) PGMPhysReleasePageMappingLock(pVM, &Lock); } return rc; }
/** * Check Windows XP sysenter heuristics and install patch * * @returns VBox status code. * @param pVM Pointer to the VM. * @param pInstrGC GC Instruction pointer for sysenter * @param pPatchRec Patch structure * */ int PATMPatchSysenterXP(PVM pVM, RTGCPTR32 pInstrGC, PPATMPATCHREC pPatchRec) { PPATCHINFO pPatch = &pPatchRec->patch; uint8_t uTemp[16]; RTGCPTR32 lpfnKiFastSystemCall, lpfnKiIntSystemCall = 0; /* (initializing it to shut up warning.) */ int rc, i; PVMCPU pVCpu = VMMGetCpu0(pVM); Assert(sizeof(uTemp) > sizeof(uFnKiIntSystemCall)); Assert(sizeof(uTemp) > sizeof(uFnKiFastSystemCall)); /* Guest OS specific patch; check heuristics first */ /* check the epilog of KiFastSystemCall */ lpfnKiFastSystemCall = pInstrGC - 2; rc = PGMPhysSimpleReadGCPtr(pVCpu, uTemp, lpfnKiFastSystemCall, sizeof(uFnKiFastSystemCall)); if ( RT_FAILURE(rc) || memcmp(uFnKiFastSystemCall, uTemp, sizeof(uFnKiFastSystemCall))) { return VERR_PATCHING_REFUSED; } /* Now search for KiIntSystemCall */ for (i=0;i<64;i++) { rc = PGMPhysSimpleReadGCPtr(pVCpu, uTemp, pInstrGC + i, sizeof(uFnKiIntSystemCall)); if(RT_FAILURE(rc)) { break; } if(!memcmp(uFnKiIntSystemCall, uTemp, sizeof(uFnKiIntSystemCall))) { lpfnKiIntSystemCall = pInstrGC + i; /* Found it! */ break; } } if (i == 64) { Log(("KiIntSystemCall not found!!\n")); return VERR_PATCHING_REFUSED; } if (PAGE_ADDRESS(lpfnKiFastSystemCall) != PAGE_ADDRESS(lpfnKiIntSystemCall)) { Log(("KiFastSystemCall and KiIntSystemCall not in the same page!!\n")); return VERR_PATCHING_REFUSED; } // make a copy of the guest code bytes that will be overwritten rc = PGMPhysSimpleReadGCPtr(pVCpu, pPatch->aPrivInstr, pPatch->pPrivInstrGC, SIZEOF_NEARJUMP32); AssertRC(rc); /* Now we simply jump from the fast version to the 'old and slow' system call */ uTemp[0] = 0xE9; *(RTGCPTR32 *)&uTemp[1] = lpfnKiIntSystemCall - (pInstrGC + SIZEOF_NEARJUMP32); rc = PGMPhysSimpleDirtyWriteGCPtr(pVCpu, pInstrGC, uTemp, SIZEOF_NEARJUMP32); if (RT_FAILURE(rc)) { Log(("PGMPhysSimpleDirtyWriteGCPtr failed with rc=%Rrc!!\n", rc)); return VERR_PATCHING_REFUSED; } #ifdef LOG_ENABLED Log(("Sysenter Patch code ----------------------------------------------------------\n")); PATMP2GLOOKUPREC cacheRec; RT_ZERO(cacheRec); cacheRec.pPatch = pPatch; patmr3DisasmCodeStream(pVM, pInstrGC, pInstrGC, patmr3DisasmCallback, &cacheRec); /* Free leftover lock if any. */ if (cacheRec.Lock.pvMap) PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock); Log(("Sysenter Patch code ends -----------------------------------------------------\n")); #endif pPatch->uState = PATCH_ENABLED; return VINF_SUCCESS; }