/** * Scan guest memory for an exact byte string. * * @returns VBox status code. * @param pUVM The user mode VM handle. * @param idCpu The ID of the CPU context to search in. * @param pAddress Where to store the mixed address. * @param puAlign The alignment restriction imposed on the search result. * @param pcbRange The number of bytes to scan. Passed as a pointer because * it may be 64-bit. * @param pabNeedle What to search for - exact search. * @param cbNeedle Size of the search byte string. * @param pHitAddress Where to put the address of the first hit. */ static DECLCALLBACK(int) dbgfR3MemScan(PUVM pUVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, PCRTGCUINTPTR pcbRange, RTGCUINTPTR *puAlign, const uint8_t *pabNeedle, size_t cbNeedle, PDBGFADDRESS pHitAddress) { PVM pVM = pUVM->pVM; VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE); Assert(idCpu == VMMGetCpuId(pVM)); /* * Validate the input we use, PGM does the rest. */ RTGCUINTPTR cbRange = *pcbRange; if (!DBGFR3AddrIsValid(pUVM, pAddress)) return VERR_INVALID_POINTER; if (!VALID_PTR(pHitAddress)) return VERR_INVALID_POINTER; if (DBGFADDRESS_IS_HMA(pAddress)) return VERR_INVALID_POINTER; /* * Select DBGF worker by addressing mode. */ int rc; PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu); PGMMODE enmMode = PGMGetGuestMode(pVCpu); if ( enmMode == PGMMODE_REAL || enmMode == PGMMODE_PROTECTED || DBGFADDRESS_IS_PHYS(pAddress) ) { RTGCPHYS GCPhysAlign = *puAlign; if (GCPhysAlign != *puAlign) return VERR_OUT_OF_RANGE; RTGCPHYS PhysHit; rc = PGMR3DbgScanPhysical(pVM, pAddress->FlatPtr, cbRange, GCPhysAlign, pabNeedle, cbNeedle, &PhysHit); if (RT_SUCCESS(rc)) DBGFR3AddrFromPhys(pUVM, pHitAddress, PhysHit); } else { #if GC_ARCH_BITS > 32 if ( ( pAddress->FlatPtr >= _4G || pAddress->FlatPtr + cbRange > _4G) && enmMode != PGMMODE_AMD64 && enmMode != PGMMODE_AMD64_NX) return VERR_DBGF_MEM_NOT_FOUND; #endif RTGCUINTPTR GCPtrHit; rc = PGMR3DbgScanVirtual(pVM, pVCpu, pAddress->FlatPtr, cbRange, *puAlign, pabNeedle, cbNeedle, &GCPtrHit); if (RT_SUCCESS(rc)) DBGFR3AddrFromFlat(pUVM, pHitAddress, GCPtrHit); } return rc; }
/** * @interface_method_impl{DBGCCMDHLP,pfnVarToDbgfAddr} */ static DECLCALLBACK(int) dbgcHlpVarToDbgfAddr(PDBGCCMDHLP pCmdHlp, PCDBGCVAR pVar, PDBGFADDRESS pAddress) { PDBGC pDbgc = DBGC_CMDHLP2DBGC(pCmdHlp); AssertPtr(pVar); AssertPtr(pAddress); switch (pVar->enmType) { case DBGCVAR_TYPE_GC_FLAT: DBGFR3AddrFromFlat(pDbgc->pUVM, pAddress, pVar->u.GCFlat); return VINF_SUCCESS; case DBGCVAR_TYPE_NUMBER: DBGFR3AddrFromFlat(pDbgc->pUVM, pAddress, (RTGCUINTPTR)pVar->u.u64Number); return VINF_SUCCESS; case DBGCVAR_TYPE_GC_FAR: return DBGFR3AddrFromSelOff(pDbgc->pUVM, pDbgc->idCpu, pAddress, pVar->u.GCFar.sel, pVar->u.GCFar.off); case DBGCVAR_TYPE_GC_PHYS: DBGFR3AddrFromPhys(pDbgc->pUVM, pAddress, pVar->u.GCPhys); return VINF_SUCCESS; case DBGCVAR_TYPE_SYMBOL: { DBGCVAR Var; int rc = DBGCCmdHlpEval(&pDbgc->CmdHlp, &Var, "%%(%DV)", pVar); if (RT_FAILURE(rc)) return rc; return dbgcHlpVarToDbgfAddr(pCmdHlp, &Var, pAddress); } case DBGCVAR_TYPE_STRING: case DBGCVAR_TYPE_HC_FLAT: case DBGCVAR_TYPE_HC_PHYS: default: return VERR_DBGC_PARSE_CONVERSION_FAILED; } }
/** * Disarms an int 3 breakpoint. * This is used to implement both DBGFR3BpClear() and DBGFR3BpDisable(). * * @returns VBox status code. * @param pVM The VM handle. * @param pBp The breakpoint. */ static int dbgfR3BpInt3Disarm(PVM pVM, PDBGFBP pBp) { /* @todo SMP support! */ VMCPUID idCpu = 0; /* * Check that the current byte is the int3 instruction, and restore the original one. * We currently ignore invalid bytes. */ DBGFADDRESS Addr; DBGFR3AddrFromFlat(pVM, &Addr, pBp->GCPtr); uint8_t bCurrent; int rc = DBGFR3MemRead(pVM, idCpu, &Addr, &bCurrent, 1); if (bCurrent == 0xcc) rc = DBGFR3MemWrite(pVM, idCpu, &Addr, &pBp->u.Int3.bOrg, 1); return rc; }
/** * @copydoc DBGFOSREG::pfnProbe */ static DECLCALLBACK(bool) dbgDiggerLinuxProbe(PUVM pUVM, void *pvData) { PDBGDIGGERLINUX pThis = (PDBGDIGGERLINUX)pvData; /* * Look for "Linux version " at the start of the rodata segment. * Hope that this comes before any message buffer or other similar string. */ for (unsigned i = 0; i < RT_ELEMENTS(g_au64LnxKernelAddresses); i++) { DBGFADDRESS KernelAddr; DBGFR3AddrFromFlat(pUVM, &KernelAddr, g_au64LnxKernelAddresses[i]); DBGFADDRESS HitAddr; static const uint8_t s_abLinuxVersion[] = "Linux version "; int rc = DBGFR3MemScan(pUVM, 0, &KernelAddr, LNX_MAX_KERNEL_SIZE, 1, s_abLinuxVersion, sizeof(s_abLinuxVersion) - 1, &HitAddr); if (RT_SUCCESS(rc)) { char szTmp[128]; char const *pszX = &szTmp[sizeof(s_abLinuxVersion) - 1]; rc = DBGFR3MemReadString(pUVM, 0, &HitAddr, szTmp, sizeof(szTmp)); if ( RT_SUCCESS(rc) && ( ( pszX[0] == '2' /* 2.x.y with x in {0..6} */ && pszX[1] == '.' && pszX[2] >= '0' && pszX[2] <= '6') || ( pszX[0] >= '3' /* 3.x, 4.x, ... 9.x */ && pszX[0] <= '9' && pszX[1] == '.' && pszX[2] >= '0' && pszX[2] <= '9') ) ) { pThis->AddrKernelBase = KernelAddr; pThis->AddrLinuxBanner = HitAddr; return true; } } } return false; }
/** * Arms an int 3 breakpoint. * This is used to implement both DBGFR3BpSetReg() and DBGFR3BpEnable(). * * @returns VBox status code. * @param pVM The VM handle. * @param pBp The breakpoint. */ static int dbgfR3BpInt3Arm(PVM pVM, PDBGFBP pBp) { /** @todo should actually use physical address here! */ /* @todo SMP support! */ VMCPUID idCpu = 0; /* * Save current byte and write int3 instruction. */ DBGFADDRESS Addr; DBGFR3AddrFromFlat(pVM, &Addr, pBp->GCPtr); int rc = DBGFR3MemRead(pVM, idCpu, &Addr, &pBp->u.Int3.bOrg, 1); if (RT_SUCCESS(rc)) { static const uint8_t s_bInt3 = 0xcc; rc = DBGFR3MemWrite(pVM, idCpu, &Addr, &s_bInt3, 1); } return rc; }
/** * Called when a new patch is added or when first populating the address space. * * @param pVM The cross context VM structure. * @param pPatchRec The patch record. */ void patmR3DbgAddPatch(PVM pVM, PPATMPATCHREC pPatchRec) { if ( pVM->patm.s.hDbgModPatchMem != NIL_RTDBGMOD && pPatchRec->patch.pPatchBlockOffset > 0 && !(pPatchRec->patch.flags & PATMFL_GLOBAL_FUNCTIONS)) { /** @todo find a cheap way of checking whether we've already added the patch. * Using a flag would be nice, except I don't want to consider saved * state considerations right now (I don't recall if we're still * depending on structure layout there or not). */ char szName[256]; size_t off = patmR3DbgDescribePatchAsSymbol(pPatchRec, szName, sizeof(szName)); /* If we have a symbol near the guest address, append that. */ if (off + 8 <= sizeof(szName)) { RTDBGSYMBOL Symbol; RTGCINTPTR offDisp; DBGFADDRESS Addr; int rc = DBGFR3AsSymbolByAddr(pVM->pUVM, DBGF_AS_GLOBAL, DBGFR3AddrFromFlat(pVM->pUVM, &Addr, pPatchRec->patch.pPrivInstrGC), RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL | RTDBGSYMADDR_FLAGS_SKIP_ABS_IN_DEFERRED, &offDisp, &Symbol, NULL /*phMod*/); if (RT_SUCCESS(rc)) { szName[off++] = '_'; szName[off++] = '_'; RTStrCopy(&szName[off], sizeof(szName) - off, Symbol.szName); } } /* Add it (may fail due to enable/disable patches). */ RTDbgModSymbolAdd(pVM->patm.s.hDbgModPatchMem, szName, 0 /*iSeg*/, pPatchRec->patch.pPatchBlockOffset, pPatchRec->patch.cbPatchBlockSize, 0 /*fFlags*/, NULL /*piOrdinal*/); } }
/** * @interface_method_impl{DBGCCMDHLP,pfnVarConvert} */ static DECLCALLBACK(int) dbgcHlpVarConvert(PDBGCCMDHLP pCmdHlp, PCDBGCVAR pInVar, DBGCVARTYPE enmToType, bool fConvSyms, PDBGCVAR pResult) { PDBGC pDbgc = DBGC_CMDHLP2DBGC(pCmdHlp); DBGCVAR const InVar = *pInVar; /* if pInVar == pResult */ PCDBGCVAR pArg = &InVar; /* lazy bird, clean up later */ DBGFADDRESS Address; int rc; Assert(pDbgc->pUVM); *pResult = InVar; switch (InVar.enmType) { case DBGCVAR_TYPE_GC_FLAT: switch (enmToType) { case DBGCVAR_TYPE_GC_FLAT: return VINF_SUCCESS; case DBGCVAR_TYPE_GC_FAR: return VERR_DBGC_PARSE_INCORRECT_ARG_TYPE; case DBGCVAR_TYPE_GC_PHYS: pResult->enmType = DBGCVAR_TYPE_GC_PHYS; rc = DBGFR3AddrToPhys(pDbgc->pUVM, pDbgc->idCpu, DBGFR3AddrFromFlat(pDbgc->pUVM, &Address, pArg->u.GCFlat), &pResult->u.GCPhys); if (RT_SUCCESS(rc)) return VINF_SUCCESS; return VERR_DBGC_PARSE_CONVERSION_FAILED; case DBGCVAR_TYPE_HC_FLAT: pResult->enmType = DBGCVAR_TYPE_HC_FLAT; rc = DBGFR3AddrToVolatileR3Ptr(pDbgc->pUVM, pDbgc->idCpu, DBGFR3AddrFromFlat(pDbgc->pUVM, &Address, pArg->u.GCFlat), false /*fReadOnly */, &pResult->u.pvHCFlat); if (RT_SUCCESS(rc)) return VINF_SUCCESS; return VERR_DBGC_PARSE_CONVERSION_FAILED; case DBGCVAR_TYPE_HC_PHYS: pResult->enmType = DBGCVAR_TYPE_HC_PHYS; rc = DBGFR3AddrToHostPhys(pDbgc->pUVM, pDbgc->idCpu, DBGFR3AddrFromFlat(pDbgc->pUVM, &Address, pArg->u.GCFlat), &pResult->u.GCPhys); if (RT_SUCCESS(rc)) return VINF_SUCCESS; return VERR_DBGC_PARSE_CONVERSION_FAILED; case DBGCVAR_TYPE_NUMBER: pResult->enmType = enmToType; pResult->u.u64Number = InVar.u.GCFlat; return VINF_SUCCESS; case DBGCVAR_TYPE_STRING: case DBGCVAR_TYPE_SYMBOL: return VERR_DBGC_PARSE_INCORRECT_ARG_TYPE; case DBGCVAR_TYPE_UNKNOWN: case DBGCVAR_TYPE_ANY: break; } break; case DBGCVAR_TYPE_GC_FAR: switch (enmToType) { case DBGCVAR_TYPE_GC_FLAT: rc = DBGFR3AddrFromSelOff(pDbgc->pUVM, pDbgc->idCpu, &Address, pArg->u.GCFar.sel, pArg->u.GCFar.off); if (RT_SUCCESS(rc)) { pResult->enmType = DBGCVAR_TYPE_GC_FLAT; pResult->u.GCFlat = Address.FlatPtr; return VINF_SUCCESS; } return VERR_DBGC_PARSE_CONVERSION_FAILED; case DBGCVAR_TYPE_GC_FAR: return VINF_SUCCESS; case DBGCVAR_TYPE_GC_PHYS: rc = DBGFR3AddrFromSelOff(pDbgc->pUVM, pDbgc->idCpu, &Address, pArg->u.GCFar.sel, pArg->u.GCFar.off); if (RT_SUCCESS(rc)) { pResult->enmType = DBGCVAR_TYPE_GC_PHYS; rc = DBGFR3AddrToPhys(pDbgc->pUVM, pDbgc->idCpu, &Address, &pResult->u.GCPhys); if (RT_SUCCESS(rc)) return VINF_SUCCESS; } return VERR_DBGC_PARSE_CONVERSION_FAILED; case DBGCVAR_TYPE_HC_FLAT: rc = DBGFR3AddrFromSelOff(pDbgc->pUVM, pDbgc->idCpu, &Address, pArg->u.GCFar.sel, pArg->u.GCFar.off); if (RT_SUCCESS(rc)) { pResult->enmType = DBGCVAR_TYPE_HC_FLAT; rc = DBGFR3AddrToVolatileR3Ptr(pDbgc->pUVM, pDbgc->idCpu, &Address, false /*fReadOnly*/, &pResult->u.pvHCFlat); if (RT_SUCCESS(rc)) return VINF_SUCCESS; } return VERR_DBGC_PARSE_CONVERSION_FAILED; case DBGCVAR_TYPE_HC_PHYS: rc = DBGFR3AddrFromSelOff(pDbgc->pUVM, pDbgc->idCpu, &Address, pArg->u.GCFar.sel, pArg->u.GCFar.off); if (RT_SUCCESS(rc)) { pResult->enmType = DBGCVAR_TYPE_HC_PHYS; rc = DBGFR3AddrToHostPhys(pDbgc->pUVM, pDbgc->idCpu, &Address, &pResult->u.GCPhys); if (RT_SUCCESS(rc)) return VINF_SUCCESS; } return VERR_DBGC_PARSE_CONVERSION_FAILED; case DBGCVAR_TYPE_NUMBER: pResult->enmType = enmToType; pResult->u.u64Number = InVar.u.GCFar.off; return VINF_SUCCESS; case DBGCVAR_TYPE_STRING: case DBGCVAR_TYPE_SYMBOL: return VERR_DBGC_PARSE_INCORRECT_ARG_TYPE; case DBGCVAR_TYPE_UNKNOWN: case DBGCVAR_TYPE_ANY: break; } break; case DBGCVAR_TYPE_GC_PHYS: switch (enmToType) { case DBGCVAR_TYPE_GC_FLAT: //rc = MMR3PhysGCPhys2GCVirtEx(pDbgc->pVM, pResult->u.GCPhys, ..., &pResult->u.GCFlat); - yea, sure. return VERR_DBGC_PARSE_INCORRECT_ARG_TYPE; case DBGCVAR_TYPE_GC_FAR: return VERR_DBGC_PARSE_INCORRECT_ARG_TYPE; case DBGCVAR_TYPE_GC_PHYS: return VINF_SUCCESS; case DBGCVAR_TYPE_HC_FLAT: pResult->enmType = DBGCVAR_TYPE_HC_FLAT; rc = DBGFR3AddrToVolatileR3Ptr(pDbgc->pUVM, pDbgc->idCpu, DBGFR3AddrFromPhys(pDbgc->pUVM, &Address, pArg->u.GCPhys), false /*fReadOnly */, &pResult->u.pvHCFlat); if (RT_SUCCESS(rc)) return VINF_SUCCESS; return VERR_DBGC_PARSE_CONVERSION_FAILED; case DBGCVAR_TYPE_HC_PHYS: pResult->enmType = DBGCVAR_TYPE_HC_PHYS; rc = DBGFR3AddrToHostPhys(pDbgc->pUVM, pDbgc->idCpu, DBGFR3AddrFromPhys(pDbgc->pUVM, &Address, pArg->u.GCPhys), &pResult->u.HCPhys); if (RT_SUCCESS(rc)) return VINF_SUCCESS; return VERR_DBGC_PARSE_CONVERSION_FAILED; case DBGCVAR_TYPE_NUMBER: pResult->enmType = enmToType; pResult->u.u64Number = InVar.u.GCPhys; return VINF_SUCCESS; case DBGCVAR_TYPE_STRING: case DBGCVAR_TYPE_SYMBOL: return VERR_DBGC_PARSE_INCORRECT_ARG_TYPE; case DBGCVAR_TYPE_UNKNOWN: case DBGCVAR_TYPE_ANY: break; } break; case DBGCVAR_TYPE_HC_FLAT: switch (enmToType) { case DBGCVAR_TYPE_GC_FLAT: return VERR_DBGC_PARSE_INCORRECT_ARG_TYPE; case DBGCVAR_TYPE_GC_FAR: return VERR_DBGC_PARSE_INCORRECT_ARG_TYPE; case DBGCVAR_TYPE_GC_PHYS: pResult->enmType = DBGCVAR_TYPE_GC_PHYS; rc = PGMR3DbgR3Ptr2GCPhys(pDbgc->pUVM, pArg->u.pvHCFlat, &pResult->u.GCPhys); if (RT_SUCCESS(rc)) return VINF_SUCCESS; /** @todo more memory types! */ return VERR_DBGC_PARSE_CONVERSION_FAILED; case DBGCVAR_TYPE_HC_FLAT: return VINF_SUCCESS; case DBGCVAR_TYPE_HC_PHYS: pResult->enmType = DBGCVAR_TYPE_HC_PHYS; rc = PGMR3DbgR3Ptr2HCPhys(pDbgc->pUVM, pArg->u.pvHCFlat, &pResult->u.HCPhys); if (RT_SUCCESS(rc)) return VINF_SUCCESS; /** @todo more memory types! */ return VERR_DBGC_PARSE_CONVERSION_FAILED; case DBGCVAR_TYPE_NUMBER: pResult->enmType = enmToType; pResult->u.u64Number = (uintptr_t)InVar.u.pvHCFlat; return VINF_SUCCESS; case DBGCVAR_TYPE_STRING: case DBGCVAR_TYPE_SYMBOL: return VERR_DBGC_PARSE_INCORRECT_ARG_TYPE; case DBGCVAR_TYPE_UNKNOWN: case DBGCVAR_TYPE_ANY: break; } break; case DBGCVAR_TYPE_HC_PHYS: switch (enmToType) { case DBGCVAR_TYPE_GC_FLAT: return VERR_DBGC_PARSE_INCORRECT_ARG_TYPE; case DBGCVAR_TYPE_GC_FAR: return VERR_DBGC_PARSE_INCORRECT_ARG_TYPE; case DBGCVAR_TYPE_GC_PHYS: pResult->enmType = DBGCVAR_TYPE_GC_PHYS; rc = PGMR3DbgHCPhys2GCPhys(pDbgc->pUVM, pArg->u.HCPhys, &pResult->u.GCPhys); if (RT_SUCCESS(rc)) return VINF_SUCCESS; return VERR_DBGC_PARSE_CONVERSION_FAILED; case DBGCVAR_TYPE_HC_FLAT: return VERR_DBGC_PARSE_INCORRECT_ARG_TYPE; case DBGCVAR_TYPE_HC_PHYS: return VINF_SUCCESS; case DBGCVAR_TYPE_NUMBER: pResult->enmType = enmToType; pResult->u.u64Number = InVar.u.HCPhys; return VINF_SUCCESS; case DBGCVAR_TYPE_STRING: case DBGCVAR_TYPE_SYMBOL: return VERR_DBGC_PARSE_INCORRECT_ARG_TYPE; case DBGCVAR_TYPE_UNKNOWN: case DBGCVAR_TYPE_ANY: break; } break; case DBGCVAR_TYPE_NUMBER: switch (enmToType) { case DBGCVAR_TYPE_GC_FLAT: pResult->enmType = DBGCVAR_TYPE_GC_FLAT; pResult->u.GCFlat = (RTGCPTR)InVar.u.u64Number; return VINF_SUCCESS; case DBGCVAR_TYPE_GC_FAR: return VERR_DBGC_PARSE_INCORRECT_ARG_TYPE; case DBGCVAR_TYPE_GC_PHYS: pResult->enmType = DBGCVAR_TYPE_GC_PHYS; pResult->u.GCPhys = (RTGCPHYS)InVar.u.u64Number; return VINF_SUCCESS; case DBGCVAR_TYPE_HC_FLAT: pResult->enmType = DBGCVAR_TYPE_HC_FLAT; pResult->u.pvHCFlat = (void *)(uintptr_t)InVar.u.u64Number; return VINF_SUCCESS; case DBGCVAR_TYPE_HC_PHYS: pResult->enmType = DBGCVAR_TYPE_HC_PHYS; pResult->u.HCPhys = (RTHCPHYS)InVar.u.u64Number; return VINF_SUCCESS; case DBGCVAR_TYPE_NUMBER: return VINF_SUCCESS; case DBGCVAR_TYPE_STRING: case DBGCVAR_TYPE_SYMBOL: return VERR_DBGC_PARSE_INCORRECT_ARG_TYPE; case DBGCVAR_TYPE_UNKNOWN: case DBGCVAR_TYPE_ANY: break; } break; case DBGCVAR_TYPE_SYMBOL: case DBGCVAR_TYPE_STRING: switch (enmToType) { case DBGCVAR_TYPE_GC_FLAT: case DBGCVAR_TYPE_GC_FAR: case DBGCVAR_TYPE_GC_PHYS: case DBGCVAR_TYPE_HC_FLAT: case DBGCVAR_TYPE_HC_PHYS: case DBGCVAR_TYPE_NUMBER: if (fConvSyms) { rc = dbgcSymbolGet(pDbgc, InVar.u.pszString, enmToType, pResult); if (RT_SUCCESS(rc)) return VINF_SUCCESS; } return VERR_DBGC_PARSE_INCORRECT_ARG_TYPE; case DBGCVAR_TYPE_STRING: case DBGCVAR_TYPE_SYMBOL: pResult->enmType = enmToType; return VINF_SUCCESS; case DBGCVAR_TYPE_UNKNOWN: case DBGCVAR_TYPE_ANY: break; } break; case DBGCVAR_TYPE_UNKNOWN: case DBGCVAR_TYPE_ANY: break; } AssertMsgFailed(("f=%d t=%d\n", InVar.enmType, enmToType)); return VERR_INVALID_PARAMETER; }
/** * @interface_method_impl{DBGCCMDHLP,pfnMemWrite} */ static DECLCALLBACK(int) dbgcHlpMemWrite(PDBGCCMDHLP pCmdHlp, const void *pvBuffer, size_t cbWrite, PCDBGCVAR pVarPointer, size_t *pcbWritten) { PDBGC pDbgc = DBGC_CMDHLP2DBGC(pCmdHlp); DBGFADDRESS Address; int rc; /* * Dummy check. */ if (cbWrite == 0) { if (*pcbWritten) *pcbWritten = 0; return VINF_SUCCESS; } /* * Convert Far addresses getting size and the correct base address. * Getting and checking the size is what makes this messy and slow. */ DBGCVAR Var = *pVarPointer; switch (pVarPointer->enmType) { case DBGCVAR_TYPE_GC_FAR: { /* Use DBGFR3AddrFromSelOff for the conversion. */ Assert(pDbgc->pUVM); rc = DBGFR3AddrFromSelOff(pDbgc->pUVM, pDbgc->idCpu, &Address, Var.u.GCFar.sel, Var.u.GCFar.off); if (RT_FAILURE(rc)) return rc; /* don't bother with flat selectors (for now). */ if (!DBGFADDRESS_IS_FLAT(&Address)) { DBGFSELINFO SelInfo; rc = DBGFR3SelQueryInfo(pDbgc->pUVM, pDbgc->idCpu, Address.Sel, DBGFSELQI_FLAGS_DT_GUEST | DBGFSELQI_FLAGS_DT_ADJ_64BIT_MODE, &SelInfo); if (RT_SUCCESS(rc)) { RTGCUINTPTR cb; /* -1 byte */ if (DBGFSelInfoIsExpandDown(&SelInfo)) { if ( !SelInfo.u.Raw.Gen.u1Granularity && Address.off > UINT16_C(0xffff)) return VERR_OUT_OF_SELECTOR_BOUNDS; if (Address.off <= SelInfo.cbLimit) return VERR_OUT_OF_SELECTOR_BOUNDS; cb = (SelInfo.u.Raw.Gen.u1Granularity ? UINT32_C(0xffffffff) : UINT32_C(0xffff)) - Address.off; } else { if (Address.off > SelInfo.cbLimit) return VERR_OUT_OF_SELECTOR_BOUNDS; cb = SelInfo.cbLimit - Address.off; } if (cbWrite - 1 > cb) { if (!pcbWritten) return VERR_OUT_OF_SELECTOR_BOUNDS; cbWrite = cb + 1; } } } Var.enmType = DBGCVAR_TYPE_GC_FLAT; Var.u.GCFlat = Address.FlatPtr; } /* fall thru */ case DBGCVAR_TYPE_GC_FLAT: rc = DBGFR3MemWrite(pDbgc->pUVM, pDbgc->idCpu, DBGFR3AddrFromFlat(pDbgc->pUVM, &Address, Var.u.GCFlat), pvBuffer, cbWrite); if (pcbWritten && RT_SUCCESS(rc)) *pcbWritten = cbWrite; return rc; case DBGCVAR_TYPE_GC_PHYS: rc = DBGFR3MemWrite(pDbgc->pUVM, pDbgc->idCpu, DBGFR3AddrFromPhys(pDbgc->pUVM, &Address, Var.u.GCPhys), pvBuffer, cbWrite); if (pcbWritten && RT_SUCCESS(rc)) *pcbWritten = cbWrite; return rc; case DBGCVAR_TYPE_HC_FLAT: case DBGCVAR_TYPE_HC_PHYS: { /* * Copy HC memory page by page. */ if (pcbWritten) *pcbWritten = 0; while (cbWrite > 0) { /* convert to flat address */ DBGCVAR Var2; rc = dbgcOpAddrFlat(pDbgc, &Var, DBGCVAR_CAT_ANY, &Var2); if (RT_FAILURE(rc)) { if (pcbWritten && *pcbWritten) return -VERR_INVALID_POINTER; return VERR_INVALID_POINTER; } /* calc size. */ size_t cbChunk = PAGE_SIZE; cbChunk -= (uintptr_t)Var.u.pvHCFlat & PAGE_OFFSET_MASK; if (cbChunk > cbWrite) cbChunk = cbWrite; /** @todo protect this!!! */ memcpy(Var2.u.pvHCFlat, pvBuffer, cbChunk); /* advance */ if (Var.enmType == DBGCVAR_TYPE_HC_FLAT) Var.u.pvHCFlat = (uint8_t *)Var.u.pvHCFlat + cbChunk; else Var.u.HCPhys += cbChunk; pvBuffer = (uint8_t const *)pvBuffer + cbChunk; if (pcbWritten) *pcbWritten += cbChunk; cbWrite -= cbChunk; } return VINF_SUCCESS; } default: return VERR_NOT_IMPLEMENTED; } }
/** * @interface_method_impl{DBGCCMDHLP,pfnMemRead} */ static DECLCALLBACK(int) dbgcHlpMemRead(PDBGCCMDHLP pCmdHlp, void *pvBuffer, size_t cbRead, PCDBGCVAR pVarPointer, size_t *pcbRead) { PDBGC pDbgc = DBGC_CMDHLP2DBGC(pCmdHlp); DBGFADDRESS Address; int rc; /* * Dummy check. */ if (cbRead == 0) { if (*pcbRead) *pcbRead = 0; return VINF_SUCCESS; } /* * Convert Far addresses getting size and the correct base address. * Getting and checking the size is what makes this messy and slow. */ DBGCVAR Var = *pVarPointer; switch (pVarPointer->enmType) { case DBGCVAR_TYPE_GC_FAR: /* Use DBGFR3AddrFromSelOff for the conversion. */ Assert(pDbgc->pUVM); rc = DBGFR3AddrFromSelOff(pDbgc->pUVM, pDbgc->idCpu, &Address, Var.u.GCFar.sel, Var.u.GCFar.off); if (RT_FAILURE(rc)) return rc; /* don't bother with flat selectors (for now). */ if (!DBGFADDRESS_IS_FLAT(&Address)) { DBGFSELINFO SelInfo; rc = DBGFR3SelQueryInfo(pDbgc->pUVM, pDbgc->idCpu, Address.Sel, DBGFSELQI_FLAGS_DT_GUEST | DBGFSELQI_FLAGS_DT_ADJ_64BIT_MODE, &SelInfo); if (RT_SUCCESS(rc)) { RTGCUINTPTR cb; /* -1 byte */ if (DBGFSelInfoIsExpandDown(&SelInfo)) { if ( !SelInfo.u.Raw.Gen.u1Granularity && Address.off > UINT16_C(0xffff)) return VERR_OUT_OF_SELECTOR_BOUNDS; if (Address.off <= SelInfo.cbLimit) return VERR_OUT_OF_SELECTOR_BOUNDS; cb = (SelInfo.u.Raw.Gen.u1Granularity ? UINT32_C(0xffffffff) : UINT32_C(0xffff)) - Address.off; } else { if (Address.off > SelInfo.cbLimit) return VERR_OUT_OF_SELECTOR_BOUNDS; cb = SelInfo.cbLimit - Address.off; } if (cbRead - 1 > cb) { if (!pcbRead) return VERR_OUT_OF_SELECTOR_BOUNDS; cbRead = cb + 1; } } } Var.enmType = DBGCVAR_TYPE_GC_FLAT; Var.u.GCFlat = Address.FlatPtr; break; case DBGCVAR_TYPE_GC_FLAT: case DBGCVAR_TYPE_GC_PHYS: case DBGCVAR_TYPE_HC_FLAT: case DBGCVAR_TYPE_HC_PHYS: break; default: return VERR_NOT_IMPLEMENTED; } /* * Copy page by page. */ size_t cbLeft = cbRead; for (;;) { /* * Calc read size. */ size_t cb = RT_MIN(PAGE_SIZE, cbLeft); switch (pVarPointer->enmType) { case DBGCVAR_TYPE_GC_FLAT: cb = RT_MIN(cb, PAGE_SIZE - (Var.u.GCFlat & PAGE_OFFSET_MASK)); break; case DBGCVAR_TYPE_GC_PHYS: cb = RT_MIN(cb, PAGE_SIZE - (Var.u.GCPhys & PAGE_OFFSET_MASK)); break; case DBGCVAR_TYPE_HC_FLAT: cb = RT_MIN(cb, PAGE_SIZE - ((uintptr_t)Var.u.pvHCFlat & PAGE_OFFSET_MASK)); break; case DBGCVAR_TYPE_HC_PHYS: cb = RT_MIN(cb, PAGE_SIZE - ((size_t)Var.u.HCPhys & PAGE_OFFSET_MASK)); break; /* size_t: MSC has braindead loss of data warnings! */ default: break; } /* * Perform read. */ switch (Var.enmType) { case DBGCVAR_TYPE_GC_FLAT: rc = DBGFR3MemRead(pDbgc->pUVM, pDbgc->idCpu, DBGFR3AddrFromFlat(pDbgc->pUVM, &Address, Var.u.GCFlat), pvBuffer, cb); break; case DBGCVAR_TYPE_GC_PHYS: rc = DBGFR3MemRead(pDbgc->pUVM, pDbgc->idCpu, DBGFR3AddrFromPhys(pDbgc->pUVM, &Address, Var.u.GCPhys), pvBuffer, cb); break; case DBGCVAR_TYPE_HC_PHYS: case DBGCVAR_TYPE_HC_FLAT: { DBGCVAR Var2; rc = dbgcOpAddrFlat(pDbgc, &Var, DBGCVAR_CAT_ANY, &Var2); if (RT_SUCCESS(rc)) { /** @todo protect this!!! */ memcpy(pvBuffer, Var2.u.pvHCFlat, cb); rc = 0; } else rc = VERR_INVALID_POINTER; break; } default: rc = VERR_DBGC_PARSE_INCORRECT_ARG_TYPE; } /* * Check for failure. */ if (RT_FAILURE(rc)) { if (pcbRead && (*pcbRead = cbRead - cbLeft) > 0) return VINF_SUCCESS; return rc; } /* * Next. */ cbLeft -= cb; if (!cbLeft) break; pvBuffer = (char *)pvBuffer + cb; rc = DBGCCmdHlpEval(pCmdHlp, &Var, "%DV + %d", &Var, cb); if (RT_FAILURE(rc)) { if (pcbRead && (*pcbRead = cbRead - cbLeft) > 0) return VINF_SUCCESS; return rc; } } /* * Done */ if (pcbRead) *pcbRead = cbRead; return 0; }
/** * Internal worker routine. * * On x86 the typical stack frame layout is like this: * .. .. * 16 parameter 2 * 12 parameter 1 * 8 parameter 0 * 4 return address * 0 old ebp; current ebp points here * * @todo Add AMD64 support (needs teaming up with the module management for * unwind tables). */ static int dbgfR3StackWalk(PUVM pUVM, VMCPUID idCpu, RTDBGAS hAs, PDBGFSTACKFRAME pFrame) { /* * Stop if we got a read error in the previous run. */ if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_LAST) return VERR_NO_MORE_FILES; /* * Read the raw frame data. */ const DBGFADDRESS AddrOldPC = pFrame->AddrPC; const unsigned cbRetAddr = DBGFReturnTypeSize(pFrame->enmReturnType); unsigned cbStackItem; switch (AddrOldPC.fFlags & DBGFADDRESS_FLAGS_TYPE_MASK) { case DBGFADDRESS_FLAGS_FAR16: cbStackItem = 2; break; case DBGFADDRESS_FLAGS_FAR32: cbStackItem = 4; break; case DBGFADDRESS_FLAGS_FAR64: cbStackItem = 8; break; case DBGFADDRESS_FLAGS_RING0: cbStackItem = sizeof(RTHCUINTPTR); break; default: switch (pFrame->enmReturnType) { case DBGFRETURNTYPE_FAR16: case DBGFRETURNTYPE_IRET16: case DBGFRETURNTYPE_IRET32_V86: case DBGFRETURNTYPE_NEAR16: cbStackItem = 2; break; case DBGFRETURNTYPE_FAR32: case DBGFRETURNTYPE_IRET32: case DBGFRETURNTYPE_IRET32_PRIV: case DBGFRETURNTYPE_NEAR32: cbStackItem = 4; break; case DBGFRETURNTYPE_FAR64: case DBGFRETURNTYPE_IRET64: case DBGFRETURNTYPE_NEAR64: cbStackItem = 8; break; default: AssertMsgFailed(("%d\n", pFrame->enmReturnType)); cbStackItem = 4; break; } } union { uint64_t *pu64; uint32_t *pu32; uint16_t *pu16; uint8_t *pb; void *pv; } u, uRet, uArgs, uBp; size_t cbRead = cbRetAddr + cbStackItem + sizeof(pFrame->Args); u.pv = alloca(cbRead); uBp = u; uRet.pb = u.pb + cbStackItem; uArgs.pb = u.pb + cbStackItem + cbRetAddr; Assert(DBGFADDRESS_IS_VALID(&pFrame->AddrFrame)); int rc = dbgfR3Read(pUVM, idCpu, u.pv, pFrame->fFlags & DBGFSTACKFRAME_FLAGS_ALL_VALID ? &pFrame->AddrReturnFrame : &pFrame->AddrFrame, cbRead, &cbRead); if ( RT_FAILURE(rc) || cbRead < cbRetAddr + cbStackItem) pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_LAST; /* * The first step is taken in a different way than the others. */ if (!(pFrame->fFlags & DBGFSTACKFRAME_FLAGS_ALL_VALID)) { pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_ALL_VALID; pFrame->iFrame = 0; /* Current PC - set by caller, just find symbol & line. */ if (DBGFADDRESS_IS_VALID(&pFrame->AddrPC)) { pFrame->pSymPC = DBGFR3AsSymbolByAddrA(pUVM, hAs, &pFrame->AddrPC, RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL, NULL /*poffDisp*/, NULL /*phMod*/); pFrame->pLinePC = DBGFR3AsLineByAddrA(pUVM, hAs, &pFrame->AddrPC, NULL /*poffDisp*/, NULL /*phMod*/); } } else /* 2nd and subsequent steps */ { /* frame, pc and stack is taken from the existing frames return members. */ pFrame->AddrFrame = pFrame->AddrReturnFrame; pFrame->AddrPC = pFrame->AddrReturnPC; pFrame->pSymPC = pFrame->pSymReturnPC; pFrame->pLinePC = pFrame->pLineReturnPC; /* increment the frame number. */ pFrame->iFrame++; } /* * Return Frame address. */ pFrame->AddrReturnFrame = pFrame->AddrFrame; switch (cbStackItem) { case 2: pFrame->AddrReturnFrame.off = *uBp.pu16; break; case 4: pFrame->AddrReturnFrame.off = *uBp.pu32; break; case 8: pFrame->AddrReturnFrame.off = *uBp.pu64; break; default: AssertMsgFailedReturn(("cbStackItem=%d\n", cbStackItem), VERR_DBGF_STACK_IPE_1); } pFrame->AddrReturnFrame.FlatPtr += pFrame->AddrReturnFrame.off - pFrame->AddrFrame.off; /* * Return PC and Stack Addresses. */ /** @todo AddrReturnStack is not correct for stdcall and pascal. (requires scope info) */ pFrame->AddrReturnStack = pFrame->AddrFrame; pFrame->AddrReturnStack.off += cbStackItem + cbRetAddr; pFrame->AddrReturnStack.FlatPtr += cbStackItem + cbRetAddr; pFrame->AddrReturnPC = pFrame->AddrPC; switch (pFrame->enmReturnType) { case DBGFRETURNTYPE_NEAR16: if (DBGFADDRESS_IS_VALID(&pFrame->AddrReturnPC)) { pFrame->AddrReturnPC.FlatPtr += *uRet.pu16 - pFrame->AddrReturnPC.off; pFrame->AddrReturnPC.off = *uRet.pu16; } else DBGFR3AddrFromFlat(pUVM, &pFrame->AddrReturnPC, *uRet.pu16); break; case DBGFRETURNTYPE_NEAR32: if (DBGFADDRESS_IS_VALID(&pFrame->AddrReturnPC)) { pFrame->AddrReturnPC.FlatPtr += *uRet.pu32 - pFrame->AddrReturnPC.off; pFrame->AddrReturnPC.off = *uRet.pu32; } else DBGFR3AddrFromFlat(pUVM, &pFrame->AddrReturnPC, *uRet.pu32); break; case DBGFRETURNTYPE_NEAR64: if (DBGFADDRESS_IS_VALID(&pFrame->AddrReturnPC)) { pFrame->AddrReturnPC.FlatPtr += *uRet.pu64 - pFrame->AddrReturnPC.off; pFrame->AddrReturnPC.off = *uRet.pu64; } else DBGFR3AddrFromFlat(pUVM, &pFrame->AddrReturnPC, *uRet.pu64); break; case DBGFRETURNTYPE_FAR16: DBGFR3AddrFromSelOff(pUVM, idCpu, &pFrame->AddrReturnPC, uRet.pu16[1], uRet.pu16[0]); break; case DBGFRETURNTYPE_FAR32: DBGFR3AddrFromSelOff(pUVM, idCpu, &pFrame->AddrReturnPC, uRet.pu16[2], uRet.pu32[0]); break; case DBGFRETURNTYPE_FAR64: DBGFR3AddrFromSelOff(pUVM, idCpu, &pFrame->AddrReturnPC, uRet.pu16[4], uRet.pu64[0]); break; case DBGFRETURNTYPE_IRET16: DBGFR3AddrFromSelOff(pUVM, idCpu, &pFrame->AddrReturnPC, uRet.pu16[1], uRet.pu16[0]); break; case DBGFRETURNTYPE_IRET32: DBGFR3AddrFromSelOff(pUVM, idCpu, &pFrame->AddrReturnPC, uRet.pu16[2], uRet.pu32[0]); break; case DBGFRETURNTYPE_IRET32_PRIV: DBGFR3AddrFromSelOff(pUVM, idCpu, &pFrame->AddrReturnPC, uRet.pu16[2], uRet.pu32[0]); break; case DBGFRETURNTYPE_IRET32_V86: DBGFR3AddrFromSelOff(pUVM, idCpu, &pFrame->AddrReturnPC, uRet.pu16[2], uRet.pu32[0]); break; case DBGFRETURNTYPE_IRET64: DBGFR3AddrFromSelOff(pUVM, idCpu, &pFrame->AddrReturnPC, uRet.pu16[4], uRet.pu64[0]); break; default: AssertMsgFailed(("enmReturnType=%d\n", pFrame->enmReturnType)); return VERR_INVALID_PARAMETER; } pFrame->pSymReturnPC = DBGFR3AsSymbolByAddrA(pUVM, hAs, &pFrame->AddrReturnPC, RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL, NULL /*poffDisp*/, NULL /*phMod*/); pFrame->pLineReturnPC = DBGFR3AsLineByAddrA(pUVM, hAs, &pFrame->AddrReturnPC, NULL /*poffDisp*/, NULL /*phMod*/); /* * Frame bitness flag. */ switch (cbStackItem) { case 2: pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_16BIT; break; case 4: pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_32BIT; break; case 8: pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_64BIT; break; default: AssertMsgFailedReturn(("cbStackItem=%d\n", cbStackItem), VERR_DBGF_STACK_IPE_2); } /* * The arguments. */ memcpy(&pFrame->Args, uArgs.pv, sizeof(pFrame->Args)); return VINF_SUCCESS; }
/** * Walks the entire stack allocating memory as we walk. */ static DECLCALLBACK(int) dbgfR3StackWalkCtxFull(PUVM pUVM, VMCPUID idCpu, PCCPUMCTXCORE pCtxCore, RTDBGAS hAs, DBGFCODETYPE enmCodeType, PCDBGFADDRESS pAddrFrame, PCDBGFADDRESS pAddrStack, PCDBGFADDRESS pAddrPC, DBGFRETURNTYPE enmReturnType, PCDBGFSTACKFRAME *ppFirstFrame) { /* alloc first frame. */ PDBGFSTACKFRAME pCur = (PDBGFSTACKFRAME)MMR3HeapAllocZU(pUVM, MM_TAG_DBGF_STACK, sizeof(*pCur)); if (!pCur) return VERR_NO_MEMORY; /* * Initialize the frame. */ pCur->pNextInternal = NULL; pCur->pFirstInternal = pCur; int rc = VINF_SUCCESS; if (pAddrPC) pCur->AddrPC = *pAddrPC; else if (enmCodeType != DBGFCODETYPE_GUEST) DBGFR3AddrFromFlat(pUVM, &pCur->AddrPC, pCtxCore->rip); else rc = DBGFR3AddrFromSelOff(pUVM, idCpu, &pCur->AddrPC, pCtxCore->cs.Sel, pCtxCore->rip); if (RT_SUCCESS(rc)) { if (enmReturnType == DBGFRETURNTYPE_INVALID) switch (pCur->AddrPC.fFlags & DBGFADDRESS_FLAGS_TYPE_MASK) { case DBGFADDRESS_FLAGS_FAR16: pCur->enmReturnType = DBGFRETURNTYPE_NEAR16; break; case DBGFADDRESS_FLAGS_FAR32: pCur->enmReturnType = DBGFRETURNTYPE_NEAR32; break; case DBGFADDRESS_FLAGS_FAR64: pCur->enmReturnType = DBGFRETURNTYPE_NEAR64; break; case DBGFADDRESS_FLAGS_RING0: pCur->enmReturnType = HC_ARCH_BITS == 64 ? DBGFRETURNTYPE_NEAR64 : DBGFRETURNTYPE_NEAR32; break; default: pCur->enmReturnType = DBGFRETURNTYPE_NEAR32; break; /// @todo 64-bit guests } uint64_t fAddrMask; if (enmCodeType == DBGFCODETYPE_RING0) fAddrMask = HC_ARCH_BITS == 64 ? UINT64_MAX : UINT32_MAX; else if (enmCodeType == DBGFCODETYPE_HYPER) fAddrMask = UINT32_MAX; else if (DBGFADDRESS_IS_FAR16(&pCur->AddrPC)) fAddrMask = UINT16_MAX; else if (DBGFADDRESS_IS_FAR32(&pCur->AddrPC)) fAddrMask = UINT32_MAX; else if (DBGFADDRESS_IS_FAR64(&pCur->AddrPC)) fAddrMask = UINT64_MAX; else { PVMCPU pVCpu = VMMGetCpuById(pUVM->pVM, idCpu); CPUMMODE CpuMode = CPUMGetGuestMode(pVCpu); if (CpuMode == CPUMMODE_REAL) fAddrMask = UINT16_MAX; else if ( CpuMode == CPUMMODE_PROTECTED || !CPUMIsGuestIn64BitCode(pVCpu)) fAddrMask = UINT32_MAX; else fAddrMask = UINT64_MAX; } if (pAddrStack) pCur->AddrStack = *pAddrStack; else if (enmCodeType != DBGFCODETYPE_GUEST) DBGFR3AddrFromFlat(pUVM, &pCur->AddrStack, pCtxCore->rsp & fAddrMask); else rc = DBGFR3AddrFromSelOff(pUVM, idCpu, &pCur->AddrStack, pCtxCore->ss.Sel, pCtxCore->rsp & fAddrMask); if (pAddrFrame) pCur->AddrFrame = *pAddrFrame; else if (enmCodeType != DBGFCODETYPE_GUEST) DBGFR3AddrFromFlat(pUVM, &pCur->AddrFrame, pCtxCore->rbp & fAddrMask); else if (RT_SUCCESS(rc)) rc = DBGFR3AddrFromSelOff(pUVM, idCpu, &pCur->AddrFrame, pCtxCore->ss.Sel, pCtxCore->rbp & fAddrMask); } else pCur->enmReturnType = enmReturnType; /* * The first frame. */ if (RT_SUCCESS(rc)) rc = dbgfR3StackWalk(pUVM, idCpu, hAs, pCur); if (RT_FAILURE(rc)) { DBGFR3StackWalkEnd(pCur); return rc; } /* * The other frames. */ DBGFSTACKFRAME Next = *pCur; while (!(pCur->fFlags & (DBGFSTACKFRAME_FLAGS_LAST | DBGFSTACKFRAME_FLAGS_MAX_DEPTH | DBGFSTACKFRAME_FLAGS_LOOP))) { /* try walk. */ rc = dbgfR3StackWalk(pUVM, idCpu, hAs, &Next); if (RT_FAILURE(rc)) break; /* add the next frame to the chain. */ PDBGFSTACKFRAME pNext = (PDBGFSTACKFRAME)MMR3HeapAllocU(pUVM, MM_TAG_DBGF_STACK, sizeof(*pNext)); if (!pNext) { DBGFR3StackWalkEnd(pCur); return VERR_NO_MEMORY; } *pNext = Next; pCur->pNextInternal = pNext; pCur = pNext; Assert(pCur->pNextInternal == NULL); /* check for loop */ for (PCDBGFSTACKFRAME pLoop = pCur->pFirstInternal; pLoop && pLoop != pCur; pLoop = pLoop->pNextInternal) if (pLoop->AddrFrame.FlatPtr == pCur->AddrFrame.FlatPtr) { pCur->fFlags |= DBGFSTACKFRAME_FLAGS_LOOP; break; } /* check for insane recursion */ if (pCur->iFrame >= 2048) pCur->fFlags |= DBGFSTACKFRAME_FLAGS_MAX_DEPTH; } *ppFirstFrame = pCur->pFirstInternal; return rc; }
/* execute the switch. */ VMMR3DECL(int) VMMDoTest(PVM pVM) { #if 1 PVMCPU pVCpu = &pVM->aCpus[0]; #ifdef NO_SUPCALLR0VMM RTPrintf("NO_SUPCALLR0VMM\n"); return VINF_SUCCESS; #endif /* * Setup stack for calling VMMGCEntry(). */ RTRCPTR RCPtrEP; int rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "VMMGCEntry", &RCPtrEP); if (RT_SUCCESS(rc)) { RTPrintf("VMM: VMMGCEntry=%RRv\n", RCPtrEP); /* * Test various crashes which we must be able to recover from. */ vmmR3DoTrapTest(pVM, 0x3, 0, VINF_EM_DBG_HYPER_ASSERTION, 0xf0f0f0f0, "vmmGCTestTrap3_FaultEIP", "int3"); vmmR3DoTrapTest(pVM, 0x3, 1, VINF_EM_DBG_HYPER_ASSERTION, 0xf0f0f0f0, "vmmGCTestTrap3_FaultEIP", "int3 WP"); #if defined(DEBUG_bird) /* guess most people would like to skip these since they write to com1. */ vmmR3DoTrapTest(pVM, 0x8, 0, VERR_TRPM_PANIC, 0x00000000, "vmmGCTestTrap8_FaultEIP", "#DF [#PG]"); SELMR3Relocate(pVM); /* this resets the busy flag of the Trap 08 TSS */ bool f; rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "DoubleFault", &f); #if !defined(DEBUG_bird) if (RT_SUCCESS(rc) && f) #endif { /* see triple fault warnings in SELM and VMMGC.cpp. */ vmmR3DoTrapTest(pVM, 0x8, 1, VERR_TRPM_PANIC, 0x00000000, "vmmGCTestTrap8_FaultEIP", "#DF [#PG] WP"); SELMR3Relocate(pVM); /* this resets the busy flag of the Trap 08 TSS */ } #endif vmmR3DoTrapTest(pVM, 0xd, 0, VERR_TRPM_DONT_PANIC, 0xf0f0f0f0, "vmmGCTestTrap0d_FaultEIP", "ltr #GP"); ///@todo find a better \#GP case, on intel ltr will \#PF (busy update?) and not \#GP. //vmmR3DoTrapTest(pVM, 0xd, 1, VERR_TRPM_DONT_PANIC, 0xf0f0f0f0, "vmmGCTestTrap0d_FaultEIP", "ltr #GP WP"); vmmR3DoTrapTest(pVM, 0xe, 0, VERR_TRPM_DONT_PANIC, 0x00000000, "vmmGCTestTrap0e_FaultEIP", "#PF (NULL)"); vmmR3DoTrapTest(pVM, 0xe, 1, VERR_TRPM_DONT_PANIC, 0x00000000, "vmmGCTestTrap0e_FaultEIP", "#PF (NULL) WP"); vmmR3DoTrapTest(pVM, 0xe, 2, VINF_SUCCESS, 0x00000000, NULL, "#PF w/Tmp Handler"); /* This test is no longer relevant as fs and gs are loaded with NULL selectors and we will always return to HC if a #GP occurs while returning to guest code. vmmR3DoTrapTest(pVM, 0xe, 4, VINF_SUCCESS, 0x00000000, NULL, "#PF w/Tmp Handler and bad fs"); */ /* * Set a debug register and perform a context switch. */ rc = vmmR3DoGCTest(pVM, VMMGC_DO_TESTCASE_NOP, 0); if (rc != VINF_SUCCESS) { RTPrintf("VMM: Nop test failed, rc=%Rrc not VINF_SUCCESS\n", rc); return rc; } /* a harmless breakpoint */ RTPrintf("VMM: testing hardware bp at 0x10000 (not hit)\n"); DBGFADDRESS Addr; DBGFR3AddrFromFlat(pVM, &Addr, 0x10000); RTUINT iBp0; rc = DBGFR3BpSetReg(pVM, &Addr, 0, ~(uint64_t)0, X86_DR7_RW_EO, 1, &iBp0); AssertReleaseRC(rc); rc = vmmR3DoGCTest(pVM, VMMGC_DO_TESTCASE_NOP, 0); if (rc != VINF_SUCCESS) { RTPrintf("VMM: DR0=0x10000 test failed with rc=%Rrc!\n", rc); return rc; } /* a bad one at VMMGCEntry */ RTPrintf("VMM: testing hardware bp at VMMGCEntry (hit)\n"); DBGFR3AddrFromFlat(pVM, &Addr, RCPtrEP); RTUINT iBp1; rc = DBGFR3BpSetReg(pVM, &Addr, 0, ~(uint64_t)0, X86_DR7_RW_EO, 1, &iBp1); AssertReleaseRC(rc); rc = vmmR3DoGCTest(pVM, VMMGC_DO_TESTCASE_NOP, 0); if (rc != VINF_EM_DBG_HYPER_BREAKPOINT) { RTPrintf("VMM: DR1=VMMGCEntry test failed with rc=%Rrc! expected VINF_EM_RAW_BREAKPOINT_HYPER\n", rc); return rc; } /* resume the breakpoint */ RTPrintf("VMM: resuming hyper after breakpoint\n"); CPUMSetHyperEFlags(pVCpu, CPUMGetHyperEFlags(pVCpu) | X86_EFL_RF); rc = VMMR3ResumeHyper(pVM, pVCpu); if (rc != VINF_SUCCESS) { RTPrintf("VMM: failed to resume on hyper breakpoint, rc=%Rrc = KNOWN BUG\n", rc); /** @todo fix VMMR3ResumeHyper */ return rc; } /* engage the breakpoint again and try single stepping. */ RTPrintf("VMM: testing hardware bp at VMMGCEntry + stepping\n"); rc = vmmR3DoGCTest(pVM, VMMGC_DO_TESTCASE_NOP, 0); if (rc != VINF_EM_DBG_HYPER_BREAKPOINT) { RTPrintf("VMM: DR1=VMMGCEntry test failed with rc=%Rrc! expected VINF_EM_RAW_BREAKPOINT_HYPER\n", rc); return rc; } RTGCUINTREG OldPc = CPUMGetHyperEIP(pVCpu); RTPrintf("%RGr=>", OldPc); unsigned i; for (i = 0; i < 8; i++) { CPUMSetHyperEFlags(pVCpu, CPUMGetHyperEFlags(pVCpu) | X86_EFL_TF | X86_EFL_RF); rc = VMMR3ResumeHyper(pVM, pVCpu); if (rc != VINF_EM_DBG_HYPER_STEPPED) { RTPrintf("\nVMM: failed to step on hyper breakpoint, rc=%Rrc\n", rc); return rc; } RTGCUINTREG Pc = CPUMGetHyperEIP(pVCpu); RTPrintf("%RGr=>", Pc); if (Pc == OldPc) { RTPrintf("\nVMM: step failed, PC: %RGr -> %RGr\n", OldPc, Pc); return VERR_GENERAL_FAILURE; } OldPc = Pc; } RTPrintf("ok\n"); /* done, clear it */ if ( RT_FAILURE(DBGFR3BpClear(pVM, iBp0)) || RT_FAILURE(DBGFR3BpClear(pVM, iBp1))) { RTPrintf("VMM: Failed to clear breakpoints!\n"); return VERR_GENERAL_FAILURE; } rc = vmmR3DoGCTest(pVM, VMMGC_DO_TESTCASE_NOP, 0); if (rc != VINF_SUCCESS) { RTPrintf("VMM: NOP failed, rc=%Rrc\n", rc); return rc; } /* * Interrupt masking. */ RTPrintf("VMM: interrupt masking...\n"); RTStrmFlush(g_pStdOut); RTThreadSleep(250); for (i = 0; i < 10000; i++) { uint64_t StartTick = ASMReadTSC(); rc = vmmR3DoGCTest(pVM, VMMGC_DO_TESTCASE_INTERRUPT_MASKING, 0); if (rc != VINF_SUCCESS) { RTPrintf("VMM: Interrupt masking failed: rc=%Rrc\n", rc); return rc; } uint64_t Ticks = ASMReadTSC() - StartTick; if (Ticks < (SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage) / 10000)) RTPrintf("Warning: Ticks=%RU64 (< %RU64)\n", Ticks, SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage) / 10000); } /* * Interrupt forwarding. */ CPUMSetHyperState(pVCpu, pVM->vmm.s.pfnCallTrampolineRC, pVCpu->vmm.s.pbEMTStackBottomRC, 0, 0); CPUMPushHyper(pVCpu, 0); CPUMPushHyper(pVCpu, VMMGC_DO_TESTCASE_HYPER_INTERRUPT); CPUMPushHyper(pVCpu, pVM->pVMRC); CPUMPushHyper(pVCpu, 3 * sizeof(RTRCPTR)); /* stack frame size */ CPUMPushHyper(pVCpu, RCPtrEP); /* what to call */ Log(("trampoline=%x\n", pVM->vmm.s.pfnCallTrampolineRC)); /* * Switch and do da thing. */ RTPrintf("VMM: interrupt forwarding...\n"); RTStrmFlush(g_pStdOut); RTThreadSleep(250); i = 0; uint64_t tsBegin = RTTimeNanoTS(); uint64_t TickStart = ASMReadTSC(); Assert(CPUMGetHyperCR3(pVCpu) && CPUMGetHyperCR3(pVCpu) == PGMGetHyperCR3(pVCpu)); do { rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN, 0); if (RT_LIKELY(rc == VINF_SUCCESS)) rc = pVCpu->vmm.s.iLastGZRc; if (RT_FAILURE(rc)) { Log(("VMM: GC returned fatal %Rra in iteration %d\n", rc, i)); VMMR3FatalDump(pVM, pVCpu, rc); return rc; } i++; if (!(i % 32)) Log(("VMM: iteration %d, esi=%08x edi=%08x ebx=%08x\n", i, CPUMGetHyperESI(pVCpu), CPUMGetHyperEDI(pVCpu), CPUMGetHyperEBX(pVCpu))); } while (rc == VINF_EM_RAW_INTERRUPT_HYPER); uint64_t TickEnd = ASMReadTSC(); uint64_t tsEnd = RTTimeNanoTS(); uint64_t Elapsed = tsEnd - tsBegin; uint64_t PerIteration = Elapsed / (uint64_t)i; uint64_t cTicksElapsed = TickEnd - TickStart; uint64_t cTicksPerIteration = cTicksElapsed / (uint64_t)i; RTPrintf("VMM: %8d interrupts in %11llu ns (%11llu ticks), %10llu ns/iteration (%11llu ticks)\n", i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration); Log(("VMM: %8d interrupts in %11llu ns (%11llu ticks), %10llu ns/iteration (%11llu ticks)\n", i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration)); /* * These forced actions are not necessary for the test and trigger breakpoints too. */ VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TRPM_SYNC_IDT); VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_TSS); /* * Profile switching. */ RTPrintf("VMM: profiling switcher...\n"); Log(("VMM: profiling switcher...\n")); uint64_t TickMin = ~0; tsBegin = RTTimeNanoTS(); TickStart = ASMReadTSC(); Assert(CPUMGetHyperCR3(pVCpu) && CPUMGetHyperCR3(pVCpu) == PGMGetHyperCR3(pVCpu)); for (i = 0; i < 1000000; i++) { CPUMSetHyperState(pVCpu, pVM->vmm.s.pfnCallTrampolineRC, pVCpu->vmm.s.pbEMTStackBottomRC, 0, 0); CPUMPushHyper(pVCpu, 0); CPUMPushHyper(pVCpu, VMMGC_DO_TESTCASE_NOP); CPUMPushHyper(pVCpu, pVM->pVMRC); CPUMPushHyper(pVCpu, 3 * sizeof(RTRCPTR)); /* stack frame size */ CPUMPushHyper(pVCpu, RCPtrEP); /* what to call */ uint64_t TickThisStart = ASMReadTSC(); rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN, 0); if (RT_LIKELY(rc == VINF_SUCCESS)) rc = pVCpu->vmm.s.iLastGZRc; uint64_t TickThisElapsed = ASMReadTSC() - TickThisStart; if (RT_FAILURE(rc)) { Log(("VMM: GC returned fatal %Rra in iteration %d\n", rc, i)); VMMR3FatalDump(pVM, pVCpu, rc); return rc; } if (TickThisElapsed < TickMin) TickMin = TickThisElapsed; } TickEnd = ASMReadTSC(); tsEnd = RTTimeNanoTS(); Elapsed = tsEnd - tsBegin; PerIteration = Elapsed / (uint64_t)i; cTicksElapsed = TickEnd - TickStart; cTicksPerIteration = cTicksElapsed / (uint64_t)i; RTPrintf("VMM: %8d cycles in %11llu ns (%11lld ticks), %10llu ns/iteration (%11lld ticks) Min %11lld ticks\n", i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration, TickMin); Log(("VMM: %8d cycles in %11llu ns (%11lld ticks), %10llu ns/iteration (%11lld ticks) Min %11lld ticks\n", i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration, TickMin)); rc = VINF_SUCCESS; } else AssertMsgFailed(("Failed to resolved VMMGC.gc::VMMGCEntry(), rc=%Rrc\n", rc)); #endif return rc; }
/** * Loads the kernel symbols from the kallsyms tables. * * @returns VBox status code. * @param pUVM The user mode VM handle. * @param pThis The Linux digger data. */ static int dbgDiggerLinuxLoadKernelSymbols(PUVM pUVM, PDBGDIGGERLINUX pThis) { /* * Allocate memory for temporary table copies, reading the tables as we go. */ uint32_t const cbGuestAddr = pThis->f64Bit ? sizeof(uint64_t) : sizeof(uint32_t); void *pvAddresses = RTMemAllocZ(pThis->cKernelSymbols * cbGuestAddr); int rc = DBGFR3MemRead(pUVM, 0 /*idCpu*/, &pThis->AddrKernelAddresses, pvAddresses, pThis->cKernelSymbols * cbGuestAddr); if (RT_SUCCESS(rc)) { uint8_t *pbNames = (uint8_t *)RTMemAllocZ(pThis->cbKernelNames); rc = DBGFR3MemRead(pUVM, 0 /*idCpu*/, &pThis->AddrKernelNames, pbNames, pThis->cbKernelNames); if (RT_SUCCESS(rc)) { char *pszzTokens = (char *)RTMemAllocZ(pThis->cbKernelTokenTable); rc = DBGFR3MemRead(pUVM, 0 /*idCpu*/, &pThis->AddrKernelTokenTable, pszzTokens, pThis->cbKernelTokenTable); if (RT_SUCCESS(rc)) { uint16_t *paoffTokens = (uint16_t *)RTMemAllocZ(256 * sizeof(uint16_t)); rc = DBGFR3MemRead(pUVM, 0 /*idCpu*/, &pThis->AddrKernelTokenIndex, paoffTokens, 256 * sizeof(uint16_t)); if (RT_SUCCESS(rc)) { /* * Figure out the kernel start and end. */ RTGCUINTPTR uKernelStart = pThis->AddrKernelAddresses.FlatPtr; RTGCUINTPTR uKernelEnd = pThis->AddrKernelTokenIndex.FlatPtr + 256 * sizeof(uint16_t); uint32_t i; if (cbGuestAddr == sizeof(uint64_t)) { uint64_t *pauAddrs = (uint64_t *)pvAddresses; for (i = 0; i < pThis->cKernelSymbols; i++) if ( pauAddrs[i] < uKernelStart && LNX64_VALID_ADDRESS(pauAddrs[i]) && uKernelStart - pauAddrs[i] < LNX_MAX_KERNEL_SIZE) uKernelStart = pauAddrs[i]; for (i = pThis->cKernelSymbols - 1; i > 0; i--) if ( pauAddrs[i] > uKernelEnd && LNX64_VALID_ADDRESS(pauAddrs[i]) && pauAddrs[i] - uKernelEnd < LNX_MAX_KERNEL_SIZE) uKernelEnd = pauAddrs[i]; } else { uint32_t *pauAddrs = (uint32_t *)pvAddresses; for (i = 0; i < pThis->cKernelSymbols; i++) if ( pauAddrs[i] < uKernelStart && LNX32_VALID_ADDRESS(pauAddrs[i]) && uKernelStart - pauAddrs[i] < LNX_MAX_KERNEL_SIZE) uKernelStart = pauAddrs[i]; for (i = pThis->cKernelSymbols - 1; i > 0; i--) if ( pauAddrs[i] > uKernelEnd && LNX32_VALID_ADDRESS(pauAddrs[i]) && pauAddrs[i] - uKernelEnd < LNX_MAX_KERNEL_SIZE) uKernelEnd = pauAddrs[i]; } RTGCUINTPTR cbKernel = uKernelEnd - uKernelStart; pThis->cbKernel = (uint32_t)cbKernel; DBGFR3AddrFromFlat(pUVM, &pThis->AddrKernelBase, uKernelStart); Log(("dbgDiggerLinuxLoadKernelSymbols: uKernelStart=%RGv cbKernel=%#x\n", uKernelStart, cbKernel)); /* * Create a module for the kernel. */ RTDBGMOD hMod; rc = RTDbgModCreate(&hMod, "vmlinux", cbKernel, 0 /*fFlags*/); if (RT_SUCCESS(rc)) { rc = RTDbgModSetTag(hMod, DIG_LNX_MOD_TAG); AssertRC(rc); rc = VINF_SUCCESS; /* * Enumerate the symbols. */ uint8_t const *pbCurAddr = (uint8_t const *)pvAddresses; uint32_t offName = 0; uint32_t cLeft = pThis->cKernelSymbols; while (cLeft-- > 0 && RT_SUCCESS(rc)) { /* Decode the symbol name first. */ if (RT_LIKELY(offName < pThis->cbKernelNames)) { uint8_t cbName = pbNames[offName++]; if (RT_LIKELY(offName + cbName <= pThis->cbKernelNames)) { char szSymbol[4096]; uint32_t offSymbol = 0; while (cbName-- > 0) { uint8_t bEnc = pbNames[offName++]; uint16_t offToken = paoffTokens[bEnc]; if (RT_LIKELY(offToken < pThis->cbKernelTokenTable)) { const char *pszToken = &pszzTokens[offToken]; char ch; while ((ch = *pszToken++) != '\0') if (offSymbol < sizeof(szSymbol) - 1) szSymbol[offSymbol++] = ch; } else { rc = VERR_INVALID_UTF8_ENCODING; break; } } szSymbol[offSymbol < sizeof(szSymbol) ? offSymbol : sizeof(szSymbol) - 1] = '\0'; /* The address. */ RTGCUINTPTR uSymAddr = cbGuestAddr == sizeof(uint64_t) ? *(uint64_t *)pbCurAddr : *(uint32_t *)pbCurAddr; pbCurAddr += cbGuestAddr; /* Add it without the type char. */ if (uSymAddr - uKernelStart <= cbKernel) { rc = RTDbgModSymbolAdd(hMod, &szSymbol[1], RTDBGSEGIDX_RVA, uSymAddr - uKernelStart, 0 /*cb*/, 0 /*fFlags*/, NULL); if (RT_FAILURE(rc)) { if ( rc == VERR_DBG_SYMBOL_NAME_OUT_OF_RANGE || rc == VERR_DBG_INVALID_RVA || rc == VERR_DBG_ADDRESS_CONFLICT || rc == VERR_DBG_DUPLICATE_SYMBOL) { Log2(("dbgDiggerLinuxLoadKernelSymbols: RTDbgModSymbolAdd(,%s,) failed %Rrc (ignored)\n", szSymbol, rc)); rc = VINF_SUCCESS; } else Log(("dbgDiggerLinuxLoadKernelSymbols: RTDbgModSymbolAdd(,%s,) failed %Rrc\n", szSymbol, rc)); } } } else { rc = VERR_END_OF_STRING; Log(("dbgDiggerLinuxLoadKernelSymbols: offName=%#x cLeft=%#x cbName=%#x cbKernelNames=%#x\n", offName, cLeft, cbName, pThis->cbKernelNames)); } } else { rc = VERR_END_OF_STRING; Log(("dbgDiggerLinuxLoadKernelSymbols: offName=%#x cLeft=%#x cbKernelNames=%#x\n", offName, cLeft, pThis->cbKernelNames)); } } /* * Link the module into the address space. */ if (RT_SUCCESS(rc)) { RTDBGAS hAs = DBGFR3AsResolveAndRetain(pUVM, DBGF_AS_KERNEL); if (hAs != NIL_RTDBGAS) rc = RTDbgAsModuleLink(hAs, hMod, uKernelStart, RTDBGASLINK_FLAGS_REPLACE); else rc = VERR_INTERNAL_ERROR; RTDbgAsRelease(hAs); } else Log(("dbgDiggerLinuxFindTokenIndex: Failed: %Rrc\n", rc)); RTDbgModRelease(hMod); } else Log(("dbgDiggerLinuxFindTokenIndex: RTDbgModCreate failed: %Rrc\n", rc)); } else Log(("dbgDiggerLinuxFindTokenIndex: Reading token index at %RGv failed: %Rrc\n", pThis->AddrKernelTokenIndex.FlatPtr, rc)); RTMemFree(paoffTokens); } else Log(("dbgDiggerLinuxFindTokenIndex: Reading token table at %RGv failed: %Rrc\n", pThis->AddrKernelTokenTable.FlatPtr, rc)); RTMemFree(pszzTokens); } else Log(("dbgDiggerLinuxFindTokenIndex: Reading encoded names at %RGv failed: %Rrc\n", pThis->AddrKernelNames.FlatPtr, rc)); RTMemFree(pbNames); } else Log(("dbgDiggerLinuxFindTokenIndex: Reading symbol addresses at %RGv failed: %Rrc\n", pThis->AddrKernelAddresses.FlatPtr, rc)); RTMemFree(pvAddresses); return rc; }
/** * @interface_method_impl{DBGFOSIDMESG,pfnQueryKernelLog} */ static DECLCALLBACK(int) dbgDiggerLinuxIDmsg_QueryKernelLog(PDBGFOSIDMESG pThis, PUVM pUVM, uint32_t fFlags, uint32_t cMessages, char *pszBuf, size_t cbBuf, size_t *pcbActual) { PDBGDIGGERLINUX pData = RT_FROM_MEMBER(pThis, DBGDIGGERLINUX, IDmesg); if (cMessages < 1) return VERR_INVALID_PARAMETER; /* * Resolve the symbols we need and read their values. */ RTDBGAS hAs = DBGFR3AsResolveAndRetain(pUVM, DBGF_AS_KERNEL); RTDBGMOD hMod; int rc = RTDbgAsModuleByName(hAs, "vmlinux", 0, &hMod); if (RT_FAILURE(rc)) return VERR_NOT_FOUND; RTDbgAsRelease(hAs); RTGCPTR GCPtrLogBuf; uint32_t cbLogBuf; uint32_t idxFirst; uint32_t idxNext; struct { void *pvVar; size_t cbHost, cbGuest; const char *pszSymbol; } aSymbols[] = { { &GCPtrLogBuf, sizeof(GCPtrLogBuf), pData->f64Bit ? sizeof(uint64_t) : sizeof(uint32_t), "log_buf" }, { &cbLogBuf, sizeof(cbLogBuf), sizeof(cbLogBuf), "log_buf_len" }, { &idxFirst, sizeof(idxFirst), sizeof(idxFirst), "log_first_idx" }, { &idxNext, sizeof(idxNext), sizeof(idxNext), "log_next_idx" }, }; for (uint32_t i = 0; i < RT_ELEMENTS(aSymbols); i++) { RTDBGSYMBOL SymInfo; rc = RTDbgModSymbolByName(hMod, aSymbols[i].pszSymbol, &SymInfo); if (RT_SUCCESS(rc)) { RT_BZERO(aSymbols[i].pvVar, aSymbols[i].cbHost); Assert(aSymbols[i].cbHost >= aSymbols[i].cbGuest); DBGFADDRESS Addr; rc = DBGFR3MemRead(pUVM, 0 /*idCpu*/, DBGFR3AddrFromFlat(pUVM, &Addr, (RTGCPTR)SymInfo.Value + pData->AddrKernelBase.FlatPtr), aSymbols[i].pvVar, aSymbols[i].cbGuest); if (RT_SUCCESS(rc)) continue; Log(("dbgDiggerLinuxIDmsg_QueryKernelLog: Reading '%s' at %RGv: %Rrc\n", aSymbols[i].pszSymbol, Addr.FlatPtr, rc)); } else Log(("dbgDiggerLinuxIDmsg_QueryKernelLog: Error looking up '%s': %Rrc\n", aSymbols[i].pszSymbol, rc)); RTDbgModRelease(hMod); return VERR_NOT_FOUND; } /* * Check if the values make sense. */ if (pData->f64Bit ? !LNX64_VALID_ADDRESS(GCPtrLogBuf) : !LNX32_VALID_ADDRESS(GCPtrLogBuf)) { Log(("dbgDiggerLinuxIDmsg_QueryKernelLog: 'log_buf' value %RGv is not valid.\n", GCPtrLogBuf)); return VERR_NOT_FOUND; } if ( cbLogBuf < 4096 || !RT_IS_POWER_OF_TWO(cbLogBuf) || cbLogBuf > 16*_1M) { Log(("dbgDiggerLinuxIDmsg_QueryKernelLog: 'log_buf_len' value %#x is not valid.\n", cbLogBuf)); return VERR_NOT_FOUND; } uint32_t const cbLogAlign = 4; if ( idxFirst > cbLogBuf - sizeof(LNXPRINTKHDR) || (idxFirst & (cbLogAlign - 1)) != 0) { Log(("dbgDiggerLinuxIDmsg_QueryKernelLog: 'log_first_idx' value %#x is not valid.\n", idxFirst)); return VERR_NOT_FOUND; } if ( idxNext > cbLogBuf - sizeof(LNXPRINTKHDR) || (idxNext & (cbLogAlign - 1)) != 0) { Log(("dbgDiggerLinuxIDmsg_QueryKernelLog: 'log_next_idx' value %#x is not valid.\n", idxNext)); return VERR_NOT_FOUND; } /* * Read the whole log buffer. */ uint8_t *pbLogBuf = (uint8_t *)RTMemAlloc(cbLogBuf); if (!pbLogBuf) { Log(("dbgDiggerLinuxIDmsg_QueryKernelLog: Failed to allocate %#x bytes for log buffer\n", cbLogBuf)); return VERR_NO_MEMORY; } DBGFADDRESS Addr; rc = DBGFR3MemRead(pUVM, 0 /*idCpu*/, DBGFR3AddrFromFlat(pUVM, &Addr, GCPtrLogBuf), pbLogBuf, cbLogBuf); if (RT_FAILURE(rc)) { Log(("dbgDiggerLinuxIDmsg_QueryKernelLog: Error reading %#x bytes of log buffer at %RGv: %Rrc\n", cbLogBuf, Addr.FlatPtr, rc)); RTMemFree(pbLogBuf); return VERR_NOT_FOUND; } /* * Count the messages in the buffer while doing some basic validation. */ uint32_t const cbUsed = idxFirst == idxNext ? cbLogBuf /* could be empty... */ : idxFirst < idxNext ? idxNext - idxFirst : cbLogBuf - idxFirst + idxNext; uint32_t cbLeft = cbUsed; uint32_t offCur = idxFirst; uint32_t cLogMsgs = 0; while (cbLeft > 0) { PCLNXPRINTKHDR pHdr = (PCLNXPRINTKHDR)&pbLogBuf[offCur]; if (!pHdr->cbTotal) { /* Wrap around packet, most likely... */ if (cbLogBuf - offCur >= cbLeft) break; offCur = 0; pHdr = (PCLNXPRINTKHDR)&pbLogBuf[offCur]; } if (RT_UNLIKELY( pHdr->cbTotal > cbLogBuf - sizeof(*pHdr) - offCur || pHdr->cbTotal > cbLeft || (pHdr->cbTotal & (cbLogAlign - 1)) != 0 || pHdr->cbTotal < (uint32_t)pHdr->cbText + (uint32_t)pHdr->cbDict + sizeof(*pHdr) )) { Log(("dbgDiggerLinuxIDmsg_QueryKernelLog: Invalid printk_log record at %#x: cbTotal=%#x cbText=%#x cbDict=%#x cbLogBuf=%#x cbLeft=%#x\n", offCur, pHdr->cbTotal, pHdr->cbText, pHdr->cbDict, cbLogBuf, cbLeft)); rc = VERR_INVALID_STATE; break; } if (pHdr->cbText > 0) cLogMsgs++; /* next */ offCur += pHdr->cbTotal; cbLeft -= pHdr->cbTotal; } if (RT_FAILURE(rc)) { RTMemFree(pbLogBuf); return rc; } /* * Copy the messages into the output buffer. */ offCur = idxFirst; cbLeft = cbUsed; /* Skip messages that the caller doesn't want. */ if (cMessages < cLogMsgs) { uint32_t cToSkip = cLogMsgs - cMessages; while (cToSkip > 0) { PCLNXPRINTKHDR pHdr = (PCLNXPRINTKHDR)&pbLogBuf[offCur]; if (!pHdr->cbTotal) { offCur = 0; pHdr = (PCLNXPRINTKHDR)&pbLogBuf[offCur]; } if (pHdr->cbText > 0) cToSkip--; /* next */ offCur += pHdr->cbTotal; cbLeft -= pHdr->cbTotal; } } /* Now copy the messages. */ size_t offDst = 0; while (cbLeft > 0) { PCLNXPRINTKHDR pHdr = (PCLNXPRINTKHDR)&pbLogBuf[offCur]; if (!pHdr->cbTotal) { if (cbLogBuf - offCur >= cbLeft) break; offCur = 0; pHdr = (PCLNXPRINTKHDR)&pbLogBuf[offCur]; } if (pHdr->cbText > 0) { char *pchText = (char *)(pHdr + 1); size_t cchText = RTStrNLen(pchText, pHdr->cbText); if (offDst + cchText < cbBuf) { memcpy(&pszBuf[offDst], pHdr + 1, cchText); pszBuf[offDst + cchText] = '\n'; } else if (offDst < cbBuf) memcpy(&pszBuf[offDst], pHdr + 1, cbBuf - offDst); offDst += cchText + 1; } /* next */ offCur += pHdr->cbTotal; cbLeft -= pHdr->cbTotal; } /* Done with the buffer. */ RTMemFree(pbLogBuf); /* Make sure we've reserved a char for the terminator. */ if (!offDst) offDst = 1; /* Set return size value. */ if (pcbActual) *pcbActual = offDst; /* * All VBox strings are UTF-8 and bad things may in theory happen if we * pass bad UTF-8 to code which assumes it's all valid. So, we enforce * UTF-8 upon the guest kernel messages here even if they (probably) have * no defined code set in reality. */ if (offDst <= cbBuf) { pszBuf[offDst - 1] = '\0'; RTStrPurgeEncoding(pszBuf); return VINF_SUCCESS; } if (cbBuf) { pszBuf[cbBuf - 1] = '\0'; RTStrPurgeEncoding(pszBuf); } return VERR_BUFFER_OVERFLOW; }