BS3_DECL(void) Bs3SlabInit(PBS3SLABCTL pSlabCtl, size_t cbSlabCtl, uint32_t uFlatSlabPtr, uint32_t cbSlab, uint16_t cbChunk) { uint16_t cBits; BS3_ASSERT(RT_IS_POWER_OF_TWO(cbChunk)); BS3_ASSERT(cbSlab >= cbChunk * 4); BS3_ASSERT(!(uFlatSlabPtr & (cbChunk - 1))); BS3_XPTR_SET_FLAT(BS3SLABCTL, pSlabCtl->pNext, 0); BS3_XPTR_SET_FLAT(BS3SLABCTL, pSlabCtl->pHead, 0); BS3_XPTR_SET_FLAT(BS3SLABCTL, pSlabCtl->pbStart, uFlatSlabPtr); pSlabCtl->cbChunk = cbChunk; pSlabCtl->cChunkShift = ASMBitFirstSetU16(cbChunk) - 1; pSlabCtl->cChunks = cbSlab >> pSlabCtl->cChunkShift; pSlabCtl->cFreeChunks = pSlabCtl->cChunks; cBits = RT_ALIGN_T(pSlabCtl->cChunks, 32, uint16_t); BS3_ASSERT(cbSlabCtl >= RT_OFFSETOF(BS3SLABCTL, bmAllocated[cBits >> 3])); Bs3MemZero(&pSlabCtl->bmAllocated, cBits >> 3); /* Mark excess bitmap padding bits as allocated. */ if (cBits != pSlabCtl->cChunks) { uint16_t iBit; for (iBit = pSlabCtl->cChunks; iBit < cBits; iBit++) ASMBitSet(pSlabCtl->bmAllocated, iBit); } }
/** * Sets a manifest attribute. * * @returns IPRT status code. * @param hManifest The manifest handle. * @param pszAttr The attribute name. If this already exists, * its value will be replaced. * @param pszValue The value string. * @param fType The attribute type, pass * RTMANIFEST_ATTR_UNKNOWN if not known. */ RTDECL(int) RTManifestSetAttr(RTMANIFEST hManifest, const char *pszAttr, const char *pszValue, uint32_t fType) { RTMANIFESTINT *pThis = hManifest; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertReturn(pThis->u32Magic == RTMANIFEST_MAGIC, VERR_INVALID_HANDLE); AssertPtr(pszAttr); AssertPtr(pszValue); AssertReturn(RT_IS_POWER_OF_TWO(fType) && fType < RTMANIFEST_ATTR_END, VERR_INVALID_PARAMETER); return rtManifestSetAttrWorker(&pThis->SelfEntry, pszAttr, pszValue, fType); }
/** * Sets an attribute of a manifest entry. * * @returns IPRT status code. * @param hManifest The manifest handle. * @param pszEntry The entry name. This will automatically be * added if there was no previous call to * RTManifestEntryAdd for this name. See * RTManifestEntryAdd for the entry name rules. * @param pszAttr The attribute name. If this already exists, * its value will be replaced. * @param pszValue The value string. * @param fType The attribute type, pass * RTMANIFEST_ATTR_UNKNOWN if not known. */ RTDECL(int) RTManifestEntrySetAttr(RTMANIFEST hManifest, const char *pszEntry, const char *pszAttr, const char *pszValue, uint32_t fType) { RTMANIFESTINT *pThis = hManifest; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertReturn(pThis->u32Magic == RTMANIFEST_MAGIC, VERR_INVALID_HANDLE); AssertPtr(pszEntry); AssertPtr(pszAttr); AssertPtr(pszValue); AssertReturn(RT_IS_POWER_OF_TWO(fType) && fType < RTMANIFEST_ATTR_END, VERR_INVALID_PARAMETER); bool fNeedNormalization; size_t cchEntry; int rc = rtManifestValidateNameEntry(pszEntry, &fNeedNormalization, &cchEntry); AssertRCReturn(rc, rc); /* * Resolve the entry, adding one if necessary. */ PRTMANIFESTENTRY pEntry; rc = rtManifestGetEntry(pThis, pszEntry, fNeedNormalization, cchEntry, &pEntry); if (rc == VERR_NOT_FOUND) { pEntry = (PRTMANIFESTENTRY)RTMemAlloc(RT_OFFSETOF(RTMANIFESTENTRY, szName[cchEntry + 1])); if (!pEntry) return VERR_NO_MEMORY; pEntry->StrCore.cchString = cchEntry; pEntry->StrCore.pszString = pEntry->szName; pEntry->Attributes = NULL; pEntry->cAttributes = 0; memcpy(pEntry->szName, pszEntry, cchEntry + 1); if (fNeedNormalization) rtManifestNormalizeEntry(pEntry->szName); if (!RTStrSpaceInsert(&pThis->Entries, &pEntry->StrCore)) { RTMemFree(pEntry); return VERR_INTERNAL_ERROR_4; } pThis->cEntries++; } else if (RT_FAILURE(rc)) return rc; return rtManifestSetAttrWorker(pEntry, pszAttr, pszValue, fType); }
/** * Checks I/O access for guest or hypervisor breakpoints. * * @returns Strict VBox status code * @retval VINF_SUCCESS no breakpoint. * @retval VINF_EM_DBG_BREAKPOINT hypervisor breakpoint triggered. * @retval VINF_EM_RAW_GUEST_TRAP guest breakpoint triggered, DR6 and DR7 have * been updated appropriately. * * @param pVM The cross context VM structure. * @param pVCpu The cross context CPU structure for the calling EMT. * @param pCtx The CPU context for the calling EMT. * @param uIoPort The I/O port being accessed. * @param cbValue The size/width of the access, in bytes. */ VMM_INT_DECL(VBOXSTRICTRC) DBGFBpCheckIo(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTIOPORT uIoPort, uint8_t cbValue) { uint32_t const uIoPortFirst = uIoPort; uint32_t const uIoPortLast = uIoPortFirst + cbValue - 1; /* * Check hyper breakpoints first as the VMM debugger has priority over * the guest. */ for (unsigned iBp = 0; iBp < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints); iBp++) { if ( pVM->dbgf.s.aHwBreakpoints[iBp].u.Reg.fType == X86_DR7_RW_IO && pVM->dbgf.s.aHwBreakpoints[iBp].fEnabled && pVM->dbgf.s.aHwBreakpoints[iBp].enmType == DBGFBPTYPE_REG ) { uint8_t cbReg = pVM->dbgf.s.aHwBreakpoints[iBp].u.Reg.cb; Assert(RT_IS_POWER_OF_TWO(cbReg)); uint64_t uDrXFirst = pVM->dbgf.s.aHwBreakpoints[iBp].GCPtr & ~(uint64_t)(cbReg - 1); uint64_t uDrXLast = uDrXFirst + cbReg - 1; if (uDrXFirst <= uIoPortLast && uDrXLast >= uIoPortFirst) { /* (See also DBGFRZTrap01Handler.) */ pVCpu->dbgf.s.iActiveBp = pVM->dbgf.s.aHwBreakpoints[iBp].iBp; pVCpu->dbgf.s.fSingleSteppingRaw = false; LogFlow(("DBGFBpCheckIo: hit hw breakpoint %d at %04x:%RGv (iop %#x)\n", pVM->dbgf.s.aHwBreakpoints[iBp].iBp, pCtx->cs.Sel, pCtx->rip, uIoPort)); return VINF_EM_DBG_BREAKPOINT; } } } /* * Check the guest. */ uint32_t const uDr7 = pCtx->dr[7]; if ( (uDr7 & X86_DR7_ENABLED_MASK) && X86_DR7_ANY_RW_IO(uDr7) && (pCtx->cr4 & X86_CR4_DE) ) { for (unsigned iBp = 0; iBp < 4; iBp++) { if ( (uDr7 & X86_DR7_L_G(iBp)) && X86_DR7_GET_RW(uDr7, iBp) == X86_DR7_RW_IO) { /* ASSUME the breakpoint and the I/O width qualifier uses the same encoding (1 2 x 4). */ static uint8_t const s_abInvAlign[4] = { 0, 1, 7, 3 }; uint8_t cbInvAlign = s_abInvAlign[X86_DR7_GET_LEN(uDr7, iBp)]; uint64_t uDrXFirst = pCtx->dr[iBp] & ~(uint64_t)cbInvAlign; uint64_t uDrXLast = uDrXFirst + cbInvAlign; if (uDrXFirst <= uIoPortLast && uDrXLast >= uIoPortFirst) { /* * Update DR6 and DR7. * * See "AMD64 Architecture Programmer's Manual Volume 2", * chapter 13.1.1.3 for details on DR6 bits. The basics is * that the B0..B3 bits are always cleared while the others * must be cleared by software. * * The following sub chapters says the GD bit is always * cleared when generating a #DB so the handler can safely * access the debug registers. */ pCtx->dr[6] &= ~X86_DR6_B_MASK; pCtx->dr[6] |= X86_DR6_B(iBp); pCtx->dr[7] &= ~X86_DR7_GD; LogFlow(("DBGFBpCheckIo: hit hw breakpoint %d at %04x:%RGv (iop %#x)\n", pVM->dbgf.s.aHwBreakpoints[iBp].iBp, pCtx->cs.Sel, pCtx->rip, uIoPort)); return VINF_EM_RAW_GUEST_TRAP; } } } } return VINF_SUCCESS; }
/** * The slow case for SUPReadTsc where we need to apply deltas. * * Must only be called when deltas are applicable, so please do not call it * directly. * * @returns TSC with delta applied. * @param pGip Pointer to the GIP. * * @remarks May be called with interrupts disabled in ring-0! This is why the * ring-0 code doesn't attempt to figure the delta. * * @internal */ SUPDECL(uint64_t) SUPReadTscWithDelta(PSUPGLOBALINFOPAGE pGip) { uint64_t uTsc; uint16_t iGipCpu; AssertCompile(RT_IS_POWER_OF_TWO(RTCPUSET_MAX_CPUS)); AssertCompile(RT_ELEMENTS(pGip->aiCpuFromCpuSetIdx) >= RTCPUSET_MAX_CPUS); Assert(pGip->enmUseTscDelta > SUPGIPUSETSCDELTA_PRACTICALLY_ZERO); /* * Read the TSC and get the corresponding aCPUs index. */ #ifdef IN_RING3 if (pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS) { /* RDTSCP gives us all we need, no loops/cli. */ uint32_t iCpuSet; uTsc = ASMReadTscWithAux(&iCpuSet); iCpuSet &= RTCPUSET_MAX_CPUS - 1; iGipCpu = pGip->aiCpuFromCpuSetIdx[iCpuSet]; } else if (pGip->fGetGipCpu & SUPGIPGETCPU_IDTR_LIMIT_MASK_MAX_SET_CPUS) { /* Storing the IDTR is normally very quick, but we need to loop. */ uint32_t cTries = 0; for (;;) { uint16_t cbLim = ASMGetIdtrLimit(); uTsc = ASMReadTSC(); if (RT_LIKELY(ASMGetIdtrLimit() == cbLim)) { uint16_t iCpuSet = cbLim - 256 * (ARCH_BITS == 64 ? 16 : 8); iCpuSet &= RTCPUSET_MAX_CPUS - 1; iGipCpu = pGip->aiCpuFromCpuSetIdx[iCpuSet]; break; } if (cTries >= 16) { iGipCpu = UINT16_MAX; break; } cTries++; } } else { /* Get APIC ID via the slow CPUID instruction, requires looping. */ uint32_t cTries = 0; for (;;) { uint8_t idApic = ASMGetApicId(); uTsc = ASMReadTSC(); if (RT_LIKELY(ASMGetApicId() == idApic)) { iGipCpu = pGip->aiCpuFromApicId[idApic]; break; } if (cTries >= 16) { iGipCpu = UINT16_MAX; break; } cTries++; } } #elif defined(IN_RING0) /* Ring-0: Use use RTMpCpuId(), no loops. */ RTCCUINTREG uFlags = ASMIntDisableFlags(); int iCpuSet = RTMpCpuIdToSetIndex(RTMpCpuId()); if (RT_LIKELY((unsigned)iCpuSet < RT_ELEMENTS(pGip->aiCpuFromCpuSetIdx))) iGipCpu = pGip->aiCpuFromCpuSetIdx[iCpuSet]; else iGipCpu = UINT16_MAX; uTsc = ASMReadTSC(); ASMSetFlags(uFlags); # elif defined(IN_RC) /* Raw-mode context: We can get the host CPU set index via VMCPU, no loops. */ RTCCUINTREG uFlags = ASMIntDisableFlags(); /* Are already disable, but play safe. */ uint32_t iCpuSet = VMMGetCpu(&g_VM)->iHostCpuSet; if (RT_LIKELY(iCpuSet < RT_ELEMENTS(pGip->aiCpuFromCpuSetIdx))) iGipCpu = pGip->aiCpuFromCpuSetIdx[iCpuSet]; else iGipCpu = UINT16_MAX; uTsc = ASMReadTSC(); ASMSetFlags(uFlags); #else # error "IN_RING3, IN_RC or IN_RING0 must be defined!" #endif /* * If the delta is valid, apply it. */ if (RT_LIKELY(iGipCpu < pGip->cCpus)) { int64_t iTscDelta = pGip->aCPUs[iGipCpu].i64TSCDelta; if (RT_LIKELY(iTscDelta != INT64_MAX)) return uTsc - iTscDelta; # ifdef IN_RING3 /* * The delta needs calculating, call supdrv to get the TSC. */ int rc = SUPR3ReadTsc(&uTsc, NULL); if (RT_SUCCESS(rc)) return uTsc; AssertMsgFailed(("SUPR3ReadTsc -> %Rrc\n", rc)); uTsc = ASMReadTSC(); # endif /* IN_RING3 */ } /* * This shouldn't happen, especially not in ring-3 and raw-mode context. * But if it does, return something that's half useful. */ AssertMsgFailed(("iGipCpu=%d (%#x) cCpus=%d fGetGipCpu=%#x\n", iGipCpu, iGipCpu, pGip->cCpus, pGip->fGetGipCpu)); return uTsc; }
/** * @interface_method_impl{DBGFOSIDMESG,pfnQueryKernelLog} */ static DECLCALLBACK(int) dbgDiggerLinuxIDmsg_QueryKernelLog(PDBGFOSIDMESG pThis, PUVM pUVM, uint32_t fFlags, uint32_t cMessages, char *pszBuf, size_t cbBuf, size_t *pcbActual) { PDBGDIGGERLINUX pData = RT_FROM_MEMBER(pThis, DBGDIGGERLINUX, IDmesg); if (cMessages < 1) return VERR_INVALID_PARAMETER; /* * Resolve the symbols we need and read their values. */ RTDBGAS hAs = DBGFR3AsResolveAndRetain(pUVM, DBGF_AS_KERNEL); RTDBGMOD hMod; int rc = RTDbgAsModuleByName(hAs, "vmlinux", 0, &hMod); if (RT_FAILURE(rc)) return VERR_NOT_FOUND; RTDbgAsRelease(hAs); RTGCPTR GCPtrLogBuf; uint32_t cbLogBuf; uint32_t idxFirst; uint32_t idxNext; struct { void *pvVar; size_t cbHost, cbGuest; const char *pszSymbol; } aSymbols[] = { { &GCPtrLogBuf, sizeof(GCPtrLogBuf), pData->f64Bit ? sizeof(uint64_t) : sizeof(uint32_t), "log_buf" }, { &cbLogBuf, sizeof(cbLogBuf), sizeof(cbLogBuf), "log_buf_len" }, { &idxFirst, sizeof(idxFirst), sizeof(idxFirst), "log_first_idx" }, { &idxNext, sizeof(idxNext), sizeof(idxNext), "log_next_idx" }, }; for (uint32_t i = 0; i < RT_ELEMENTS(aSymbols); i++) { RTDBGSYMBOL SymInfo; rc = RTDbgModSymbolByName(hMod, aSymbols[i].pszSymbol, &SymInfo); if (RT_SUCCESS(rc)) { RT_BZERO(aSymbols[i].pvVar, aSymbols[i].cbHost); Assert(aSymbols[i].cbHost >= aSymbols[i].cbGuest); DBGFADDRESS Addr; rc = DBGFR3MemRead(pUVM, 0 /*idCpu*/, DBGFR3AddrFromFlat(pUVM, &Addr, (RTGCPTR)SymInfo.Value + pData->AddrKernelBase.FlatPtr), aSymbols[i].pvVar, aSymbols[i].cbGuest); if (RT_SUCCESS(rc)) continue; Log(("dbgDiggerLinuxIDmsg_QueryKernelLog: Reading '%s' at %RGv: %Rrc\n", aSymbols[i].pszSymbol, Addr.FlatPtr, rc)); } else Log(("dbgDiggerLinuxIDmsg_QueryKernelLog: Error looking up '%s': %Rrc\n", aSymbols[i].pszSymbol, rc)); RTDbgModRelease(hMod); return VERR_NOT_FOUND; } /* * Check if the values make sense. */ if (pData->f64Bit ? !LNX64_VALID_ADDRESS(GCPtrLogBuf) : !LNX32_VALID_ADDRESS(GCPtrLogBuf)) { Log(("dbgDiggerLinuxIDmsg_QueryKernelLog: 'log_buf' value %RGv is not valid.\n", GCPtrLogBuf)); return VERR_NOT_FOUND; } if ( cbLogBuf < 4096 || !RT_IS_POWER_OF_TWO(cbLogBuf) || cbLogBuf > 16*_1M) { Log(("dbgDiggerLinuxIDmsg_QueryKernelLog: 'log_buf_len' value %#x is not valid.\n", cbLogBuf)); return VERR_NOT_FOUND; } uint32_t const cbLogAlign = 4; if ( idxFirst > cbLogBuf - sizeof(LNXPRINTKHDR) || (idxFirst & (cbLogAlign - 1)) != 0) { Log(("dbgDiggerLinuxIDmsg_QueryKernelLog: 'log_first_idx' value %#x is not valid.\n", idxFirst)); return VERR_NOT_FOUND; } if ( idxNext > cbLogBuf - sizeof(LNXPRINTKHDR) || (idxNext & (cbLogAlign - 1)) != 0) { Log(("dbgDiggerLinuxIDmsg_QueryKernelLog: 'log_next_idx' value %#x is not valid.\n", idxNext)); return VERR_NOT_FOUND; } /* * Read the whole log buffer. */ uint8_t *pbLogBuf = (uint8_t *)RTMemAlloc(cbLogBuf); if (!pbLogBuf) { Log(("dbgDiggerLinuxIDmsg_QueryKernelLog: Failed to allocate %#x bytes for log buffer\n", cbLogBuf)); return VERR_NO_MEMORY; } DBGFADDRESS Addr; rc = DBGFR3MemRead(pUVM, 0 /*idCpu*/, DBGFR3AddrFromFlat(pUVM, &Addr, GCPtrLogBuf), pbLogBuf, cbLogBuf); if (RT_FAILURE(rc)) { Log(("dbgDiggerLinuxIDmsg_QueryKernelLog: Error reading %#x bytes of log buffer at %RGv: %Rrc\n", cbLogBuf, Addr.FlatPtr, rc)); RTMemFree(pbLogBuf); return VERR_NOT_FOUND; } /* * Count the messages in the buffer while doing some basic validation. */ uint32_t const cbUsed = idxFirst == idxNext ? cbLogBuf /* could be empty... */ : idxFirst < idxNext ? idxNext - idxFirst : cbLogBuf - idxFirst + idxNext; uint32_t cbLeft = cbUsed; uint32_t offCur = idxFirst; uint32_t cLogMsgs = 0; while (cbLeft > 0) { PCLNXPRINTKHDR pHdr = (PCLNXPRINTKHDR)&pbLogBuf[offCur]; if (!pHdr->cbTotal) { /* Wrap around packet, most likely... */ if (cbLogBuf - offCur >= cbLeft) break; offCur = 0; pHdr = (PCLNXPRINTKHDR)&pbLogBuf[offCur]; } if (RT_UNLIKELY( pHdr->cbTotal > cbLogBuf - sizeof(*pHdr) - offCur || pHdr->cbTotal > cbLeft || (pHdr->cbTotal & (cbLogAlign - 1)) != 0 || pHdr->cbTotal < (uint32_t)pHdr->cbText + (uint32_t)pHdr->cbDict + sizeof(*pHdr) )) { Log(("dbgDiggerLinuxIDmsg_QueryKernelLog: Invalid printk_log record at %#x: cbTotal=%#x cbText=%#x cbDict=%#x cbLogBuf=%#x cbLeft=%#x\n", offCur, pHdr->cbTotal, pHdr->cbText, pHdr->cbDict, cbLogBuf, cbLeft)); rc = VERR_INVALID_STATE; break; } if (pHdr->cbText > 0) cLogMsgs++; /* next */ offCur += pHdr->cbTotal; cbLeft -= pHdr->cbTotal; } if (RT_FAILURE(rc)) { RTMemFree(pbLogBuf); return rc; } /* * Copy the messages into the output buffer. */ offCur = idxFirst; cbLeft = cbUsed; /* Skip messages that the caller doesn't want. */ if (cMessages < cLogMsgs) { uint32_t cToSkip = cLogMsgs - cMessages; while (cToSkip > 0) { PCLNXPRINTKHDR pHdr = (PCLNXPRINTKHDR)&pbLogBuf[offCur]; if (!pHdr->cbTotal) { offCur = 0; pHdr = (PCLNXPRINTKHDR)&pbLogBuf[offCur]; } if (pHdr->cbText > 0) cToSkip--; /* next */ offCur += pHdr->cbTotal; cbLeft -= pHdr->cbTotal; } } /* Now copy the messages. */ size_t offDst = 0; while (cbLeft > 0) { PCLNXPRINTKHDR pHdr = (PCLNXPRINTKHDR)&pbLogBuf[offCur]; if (!pHdr->cbTotal) { if (cbLogBuf - offCur >= cbLeft) break; offCur = 0; pHdr = (PCLNXPRINTKHDR)&pbLogBuf[offCur]; } if (pHdr->cbText > 0) { char *pchText = (char *)(pHdr + 1); size_t cchText = RTStrNLen(pchText, pHdr->cbText); if (offDst + cchText < cbBuf) { memcpy(&pszBuf[offDst], pHdr + 1, cchText); pszBuf[offDst + cchText] = '\n'; } else if (offDst < cbBuf) memcpy(&pszBuf[offDst], pHdr + 1, cbBuf - offDst); offDst += cchText + 1; } /* next */ offCur += pHdr->cbTotal; cbLeft -= pHdr->cbTotal; } /* Done with the buffer. */ RTMemFree(pbLogBuf); /* Make sure we've reserved a char for the terminator. */ if (!offDst) offDst = 1; /* Set return size value. */ if (pcbActual) *pcbActual = offDst; /* * All VBox strings are UTF-8 and bad things may in theory happen if we * pass bad UTF-8 to code which assumes it's all valid. So, we enforce * UTF-8 upon the guest kernel messages here even if they (probably) have * no defined code set in reality. */ if (offDst <= cbBuf) { pszBuf[offDst - 1] = '\0'; RTStrPurgeEncoding(pszBuf); return VINF_SUCCESS; } if (cbBuf) { pszBuf[cbBuf - 1] = '\0'; RTStrPurgeEncoding(pszBuf); } return VERR_BUFFER_OVERFLOW; }