/** * Converts an address to a guest physical address. * * @returns VBox status code. * @retval VINF_SUCCESS * @retval VERR_INVALID_PARAMETER if the address is invalid. * @retval VERR_INVALID_STATE if the VM is being terminated or if the virtual * CPU handle is invalid. * @retval VERR_NOT_SUPPORTED is the type of address cannot be converted. * @retval VERR_PAGE_NOT_PRESENT * @retval VERR_PAGE_TABLE_NOT_PRESENT * @retval VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT * @retval VERR_PAGE_MAP_LEVEL4_NOT_PRESENT * * @param pVM The VM handle. * @param idCpu The ID of the CPU context to convert virtual * addresses. * @param pAddress The address. * @param pGCPhys Where to return the physical address. */ VMMR3DECL(int) DBGFR3AddrToPhys(PVM pVM, VMCPUID idCpu, PDBGFADDRESS pAddress, PRTGCPHYS pGCPhys) { /* * Parameter validation. */ AssertPtr(pGCPhys); *pGCPhys = NIL_RTGCPHYS; AssertPtr(pAddress); AssertReturn(DBGFADDRESS_IS_VALID(pAddress), VERR_INVALID_PARAMETER); VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_STATE); AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_PARAMETER); /* * Convert by address type. */ int rc; if (pAddress->fFlags & DBGFADDRESS_FLAGS_HMA) rc = VERR_NOT_SUPPORTED; else if (pAddress->fFlags & DBGFADDRESS_FLAGS_PHYS) { *pGCPhys = pAddress->FlatPtr; rc = VINF_SUCCESS; } else { PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu); if (VMCPU_IS_EMT(pVCpu)) rc = dbgfR3AddrToPhysOnVCpu(pVCpu, pAddress, pGCPhys); else rc = VMR3ReqCallWait(pVCpu->pVMR3, pVCpu->idCpu, (PFNRT)dbgfR3AddrToPhysOnVCpu, 3, pVCpu, pAddress, pGCPhys); } return rc; }
csamCodePageWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser) { Log(("csamCodePageWriteHandler: write to %RGv LB %zu\n", GCPtr, cbBuf)); Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType); Assert(VMCPU_IS_EMT(pVCpu)); /* * Check if it's a dummy write that doesn't change anything. */ if ( PAGE_ADDRESS(pvPtr) == PAGE_ADDRESS((uintptr_t)pvPtr + cbBuf - 1) && !memcmp(pvPtr, pvBuf, cbBuf)) { Log(("csamCodePageWriteHandler: dummy write -> ignore\n")); return VINF_PGM_HANDLER_DO_DEFAULT; } #ifdef IN_RING3 /* * Ring-3: Do proper handling. */ int rc = PATMR3PatchWrite(pVM, GCPtr, (uint32_t)cbBuf); AssertRC(rc); return VINF_PGM_HANDLER_DO_DEFAULT; #else /* * Raw-mode: Try avoid needing to go to ring-3 (same as csamRCCodePageWritePfHandler). */ uint32_t const cpl = CPUMGetGuestCPL(pVCpu); bool const fPatchCode = PATMIsPatchGCAddr(pVM, CPUMGetGuestRIP(pVCpu)); PPATMGCSTATE pPATMGCState = PATMGetGCState(pVM); Assert(pVM->csam.s.cDirtyPages < CSAM_MAX_DIRTY_PAGES); Assert(pPATMGCState); Assert(pPATMGCState->fPIF || fPatchCode); # ifdef VBOX_WITH_REM /* Flush the recompilers translation block cache as the guest seems to be modifying instructions. */ /** @todo a bit overkill?? */ REMFlushTBs(pVM); # endif /* * When patch code is executing instructions that must complete, then we * must *never* interrupt it. */ if (!pPATMGCState->fPIF && fPatchCode) { Log(("csamRCCodePageWriteHandler: fPIF=0 -> stack fault in patch generated code at %08RX32!\n", CPUMGetGuestRIP(pVCpu))); return VINF_PGM_HANDLER_DO_DEFAULT; } Log(("csamRCCodePageWriteHandler: code page write at %RGv (cpl=%d)\n", GCPtr, cpl)); /* * If user code is modifying one of our monitored pages, then we can safely * write to it as it's no longer being used for supervisor code. */ if (cpl != 3) { VBOXSTRICTRC rcStrict = PATMRCHandleWriteToPatchPage(pVM, NULL /* pRegFrame = no interpret */, (RTRCPTR)GCPtr, cbBuf); if ( rcStrict == VINF_PGM_HANDLER_DO_DEFAULT || rcStrict == VINF_SUCCESS) return rcStrict; if (rcStrict == VINF_EM_RAW_EMULATE_INSTR) { STAM_COUNTER_INC(&pVM->csam.s.StatDangerousWrite); return VINF_EM_RAW_EMULATE_INSTR; } Assert(rcStrict == VERR_PATCH_NOT_FOUND); } /* * Schedule ring-3 activity. * Note that GCPtr might be a different address in case of aliases. So, * take down both alternatives. */ VMCPU_FF_SET(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION); pVM->csam.s.pvDirtyBasePage[pVM->csam.s.cDirtyPages] = (RTRCPTR)GCPtr; pVM->csam.s.pvDirtyFaultPage[pVM->csam.s.cDirtyPages] = (RTRCPTR)GCPtr; if (++pVM->csam.s.cDirtyPages == CSAM_MAX_DIRTY_PAGES) return VINF_CSAM_PENDING_ACTION; /* * Continue with the write. The VM_FF_CSAM_FLUSH_DIRTY_PAGE handler will reset it to readonly again. */ Log(("csamRCCodePageWriteHandler: enabled r/w for page %RGv (%RGv)\n", GCPtr, GCPtr)); STAM_COUNTER_INC(&pVM->csam.s.StatCodePageModified); return VINF_PGM_HANDLER_DO_DEFAULT; #endif }