/** * Worker for DBGFR3SelQueryInfo that calls into SELM. */ static DECLCALLBACK(int) dbgfR3SelQueryInfo(PVM pVM, VMCPUID idCpu, RTSEL Sel, uint32_t fFlags, PDBGFSELINFO pSelInfo) { /* * Make the query. */ int rc; if (!(fFlags & DBGFSELQI_FLAGS_DT_SHADOW)) { PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu); VMCPU_ASSERT_EMT(pVCpu); rc = SELMR3GetSelectorInfo(pVM, pVCpu, Sel, pSelInfo); /* * 64-bit mode HACKS for making data and stack selectors wide open when * queried. This is voodoo magic. */ if (fFlags & DBGFSELQI_FLAGS_DT_ADJ_64BIT_MODE) { /* Expand 64-bit data and stack selectors. The check is a bit bogus... */ if ( RT_SUCCESS(rc) && (pSelInfo->fFlags & ( DBGFSELINFO_FLAGS_LONG_MODE | DBGFSELINFO_FLAGS_REAL_MODE | DBGFSELINFO_FLAGS_PROT_MODE | DBGFSELINFO_FLAGS_GATE | DBGFSELINFO_FLAGS_HYPER | DBGFSELINFO_FLAGS_INVALID | DBGFSELINFO_FLAGS_NOT_PRESENT)) == DBGFSELINFO_FLAGS_LONG_MODE && pSelInfo->cbLimit != ~(RTGCPTR)0 && CPUMIsGuestIn64BitCode(pVCpu, CPUMGetGuestCtxCore(pVCpu)) ) { pSelInfo->GCPtrBase = 0; pSelInfo->cbLimit = ~(RTGCPTR)0; } else if ( Sel == 0 && CPUMIsGuestIn64BitCode(pVCpu, CPUMGetGuestCtxCore(pVCpu))) { pSelInfo->GCPtrBase = 0; pSelInfo->cbLimit = ~(RTGCPTR)0; pSelInfo->Sel = 0; pSelInfo->SelGate = 0; pSelInfo->fFlags = DBGFSELINFO_FLAGS_LONG_MODE; pSelInfo->u.Raw64.Gen.u1Present = 1; pSelInfo->u.Raw64.Gen.u1Long = 1; pSelInfo->u.Raw64.Gen.u1DescType = 1; rc = VINF_SUCCESS; } } } else { if (HWACCMIsEnabled(pVM)) rc = VERR_INVALID_STATE; else rc = SELMR3GetShadowSelectorInfo(pVM, Sel, pSelInfo); } return rc; }
/** * Common worker for DBGFR3StackWalkBeginGuestEx, DBGFR3StackWalkBeginHyperEx, * DBGFR3StackWalkBeginGuest and DBGFR3StackWalkBeginHyper. */ static int dbgfR3StackWalkBeginCommon(PUVM pUVM, VMCPUID idCpu, DBGFCODETYPE enmCodeType, PCDBGFADDRESS pAddrFrame, PCDBGFADDRESS pAddrStack, PCDBGFADDRESS pAddrPC, DBGFRETURNTYPE enmReturnType, PCDBGFSTACKFRAME *ppFirstFrame) { /* * Validate parameters. */ *ppFirstFrame = NULL; UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE); PVM pVM = pUVM->pVM; VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE); AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID); if (pAddrFrame) AssertReturn(DBGFR3AddrIsValid(pUVM, pAddrFrame), VERR_INVALID_PARAMETER); if (pAddrStack) AssertReturn(DBGFR3AddrIsValid(pUVM, pAddrStack), VERR_INVALID_PARAMETER); if (pAddrPC) AssertReturn(DBGFR3AddrIsValid(pUVM, pAddrPC), VERR_INVALID_PARAMETER); AssertReturn(enmReturnType >= DBGFRETURNTYPE_INVALID && enmReturnType < DBGFRETURNTYPE_END, VERR_INVALID_PARAMETER); /* * Get the CPUM context pointer and pass it on the specified EMT. */ RTDBGAS hAs; PCCPUMCTXCORE pCtxCore; switch (enmCodeType) { case DBGFCODETYPE_GUEST: pCtxCore = CPUMGetGuestCtxCore(VMMGetCpuById(pVM, idCpu)); hAs = DBGF_AS_GLOBAL; break; case DBGFCODETYPE_HYPER: pCtxCore = CPUMGetHyperCtxCore(VMMGetCpuById(pVM, idCpu)); hAs = DBGF_AS_RC_AND_GC_GLOBAL; break; case DBGFCODETYPE_RING0: pCtxCore = NULL; /* No valid context present. */ hAs = DBGF_AS_R0; break; default: AssertFailedReturn(VERR_INVALID_PARAMETER); } return VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3StackWalkCtxFull, 10, pUVM, idCpu, pCtxCore, hAs, enmCodeType, pAddrFrame, pAddrStack, pAddrPC, enmReturnType, ppFirstFrame); }