/** * Raises a generic debug event if enabled and not being ignored. * * @returns Strict VBox status code. * @retval VINF_EM_DBG_EVENT if the event was raised and the caller should * return ASAP to the debugger (via EM). * @retval VINF_SUCCESS if the event was disabled or ignored. * * @param pVM The cross context VM structure. * @param pVCpu The cross context virtual CPU structure. * @param enmEvent The generic event being raised. * @param uEventArg The argument of that event. * @param enmCtx The context in which this event is being raised. * * @thread EMT(pVCpu) */ VMM_INT_DECL(VBOXSTRICTRC) DBGFEventGenericWithArg(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmEvent, uint64_t uEventArg, DBGFEVENTCTX enmCtx) { /* * Is it enabled. */ if (dbgfEventIsGenericWithArgEnabled(pVM, enmEvent, uEventArg)) { /* * Any events on the stack. Should the incoming event be ignored? */ uint64_t const rip = CPUMGetGuestRIP(pVCpu); uint32_t i = pVCpu->dbgf.s.cEvents; if (i > 0) { while (i-- > 0) { if ( pVCpu->dbgf.s.aEvents[i].Event.enmType == enmEvent && pVCpu->dbgf.s.aEvents[i].enmState == DBGFEVENTSTATE_IGNORE && pVCpu->dbgf.s.aEvents[i].rip == rip) { pVCpu->dbgf.s.aEvents[i].enmState = DBGFEVENTSTATE_RESTORABLE; return VINF_SUCCESS; } Assert(pVCpu->dbgf.s.aEvents[i].enmState != DBGFEVENTSTATE_CURRENT); } /* * Trim the event stack. */ i = pVCpu->dbgf.s.cEvents; while (i-- > 0) { if ( pVCpu->dbgf.s.aEvents[i].rip == rip && ( pVCpu->dbgf.s.aEvents[i].enmState == DBGFEVENTSTATE_RESTORABLE || pVCpu->dbgf.s.aEvents[i].enmState == DBGFEVENTSTATE_IGNORE) ) pVCpu->dbgf.s.aEvents[i].enmState = DBGFEVENTSTATE_IGNORE; else { if (i + 1 != pVCpu->dbgf.s.cEvents) memmove(&pVCpu->dbgf.s.aEvents[i], &pVCpu->dbgf.s.aEvents[i + 1], (pVCpu->dbgf.s.cEvents - i) * sizeof(pVCpu->dbgf.s.aEvents)); pVCpu->dbgf.s.cEvents--; } } i = pVCpu->dbgf.s.cEvents; AssertStmt(i < RT_ELEMENTS(pVCpu->dbgf.s.aEvents), i = RT_ELEMENTS(pVCpu->dbgf.s.aEvents) - 1); } /* * Push the event. */ pVCpu->dbgf.s.aEvents[i].enmState = DBGFEVENTSTATE_CURRENT; pVCpu->dbgf.s.aEvents[i].rip = rip; pVCpu->dbgf.s.aEvents[i].Event.enmType = enmEvent; pVCpu->dbgf.s.aEvents[i].Event.enmCtx = enmCtx; pVCpu->dbgf.s.aEvents[i].Event.u.Generic.uArg = uEventArg; pVCpu->dbgf.s.cEvents = i + 1; return VINF_EM_DBG_EVENT; } return VINF_SUCCESS; }
csamCodePageWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser) { Log(("csamCodePageWriteHandler: write to %RGv LB %zu\n", GCPtr, cbBuf)); Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType); Assert(VMCPU_IS_EMT(pVCpu)); /* * Check if it's a dummy write that doesn't change anything. */ if ( PAGE_ADDRESS(pvPtr) == PAGE_ADDRESS((uintptr_t)pvPtr + cbBuf - 1) && !memcmp(pvPtr, pvBuf, cbBuf)) { Log(("csamCodePageWriteHandler: dummy write -> ignore\n")); return VINF_PGM_HANDLER_DO_DEFAULT; } #ifdef IN_RING3 /* * Ring-3: Do proper handling. */ int rc = PATMR3PatchWrite(pVM, GCPtr, (uint32_t)cbBuf); AssertRC(rc); return VINF_PGM_HANDLER_DO_DEFAULT; #else /* * Raw-mode: Try avoid needing to go to ring-3 (same as csamRCCodePageWritePfHandler). */ uint32_t const cpl = CPUMGetGuestCPL(pVCpu); bool const fPatchCode = PATMIsPatchGCAddr(pVM, CPUMGetGuestRIP(pVCpu)); PPATMGCSTATE pPATMGCState = PATMGetGCState(pVM); Assert(pVM->csam.s.cDirtyPages < CSAM_MAX_DIRTY_PAGES); Assert(pPATMGCState); Assert(pPATMGCState->fPIF || fPatchCode); # ifdef VBOX_WITH_REM /* Flush the recompilers translation block cache as the guest seems to be modifying instructions. */ /** @todo a bit overkill?? */ REMFlushTBs(pVM); # endif /* * When patch code is executing instructions that must complete, then we * must *never* interrupt it. */ if (!pPATMGCState->fPIF && fPatchCode) { Log(("csamRCCodePageWriteHandler: fPIF=0 -> stack fault in patch generated code at %08RX32!\n", CPUMGetGuestRIP(pVCpu))); return VINF_PGM_HANDLER_DO_DEFAULT; } Log(("csamRCCodePageWriteHandler: code page write at %RGv (cpl=%d)\n", GCPtr, cpl)); /* * If user code is modifying one of our monitored pages, then we can safely * write to it as it's no longer being used for supervisor code. */ if (cpl != 3) { VBOXSTRICTRC rcStrict = PATMRCHandleWriteToPatchPage(pVM, NULL /* pRegFrame = no interpret */, (RTRCPTR)GCPtr, cbBuf); if ( rcStrict == VINF_PGM_HANDLER_DO_DEFAULT || rcStrict == VINF_SUCCESS) return rcStrict; if (rcStrict == VINF_EM_RAW_EMULATE_INSTR) { STAM_COUNTER_INC(&pVM->csam.s.StatDangerousWrite); return VINF_EM_RAW_EMULATE_INSTR; } Assert(rcStrict == VERR_PATCH_NOT_FOUND); } /* * Schedule ring-3 activity. * Note that GCPtr might be a different address in case of aliases. So, * take down both alternatives. */ VMCPU_FF_SET(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION); pVM->csam.s.pvDirtyBasePage[pVM->csam.s.cDirtyPages] = (RTRCPTR)GCPtr; pVM->csam.s.pvDirtyFaultPage[pVM->csam.s.cDirtyPages] = (RTRCPTR)GCPtr; if (++pVM->csam.s.cDirtyPages == CSAM_MAX_DIRTY_PAGES) return VINF_CSAM_PENDING_ACTION; /* * Continue with the write. The VM_FF_CSAM_FLUSH_DIRTY_PAGE handler will reset it to readonly again. */ Log(("csamRCCodePageWriteHandler: enabled r/w for page %RGv (%RGv)\n", GCPtr, GCPtr)); STAM_COUNTER_INC(&pVM->csam.s.StatCodePageModified); return VINF_PGM_HANDLER_DO_DEFAULT; #endif }