static int emR3HmExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcRC) #endif { NOREF(rcRC); #ifdef LOG_ENABLED /* * Log it. */ Log(("EMINS: %04x:%RGv RSP=%RGv\n", pVCpu->cpum.GstCtx.cs.Sel, (RTGCPTR)pVCpu->cpum.GstCtx.rip, (RTGCPTR)pVCpu->cpum.GstCtx.rsp)); if (pszPrefix) { DBGFR3_INFO_LOG(pVM, pVCpu, "cpumguest", pszPrefix); DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, pszPrefix); } #endif /* * Use IEM and fallback on REM if the functionality is missing. * Once IEM gets mature enough, nothing should ever fall back. */ STAM_PROFILE_START(&pVCpu->em.s.StatIEMEmu, a); VBOXSTRICTRC rcStrict; uint32_t idxContinueExitRec = pVCpu->em.s.idxContinueExitRec; RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); if (idxContinueExitRec >= RT_ELEMENTS(pVCpu->em.s.aExitRecords)) { CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK); rcStrict = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); } else { RT_UNTRUSTED_VALIDATED_FENCE(); rcStrict = EMHistoryExec(pVCpu, &pVCpu->em.s.aExitRecords[idxContinueExitRec], 0); LogFlow(("emR3HmExecuteInstruction: %Rrc (EMHistoryExec)\n", VBOXSTRICTRC_VAL(rcStrict))); } STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMEmu, a); if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED) { #ifdef VBOX_WITH_REM STAM_PROFILE_START(&pVCpu->em.s.StatREMEmu, b); EMRemLock(pVM); /* Flush the recompiler TLB if the VCPU has changed. */ if (pVM->em.s.idLastRemCpu != pVCpu->idCpu) CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL); pVM->em.s.idLastRemCpu = pVCpu->idCpu; rcStrict = REMR3EmulateInstruction(pVM, pVCpu); EMRemUnlock(pVM); STAM_PROFILE_STOP(&pVCpu->em.s.StatREMEmu, b); #else /* !VBOX_WITH_REM */ NOREF(pVM); #endif /* !VBOX_WITH_REM */ } return VBOXSTRICTRC_TODO(rcStrict); }
static int emR3HmExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcRC) #endif { #ifdef LOG_ENABLED PCPUMCTX pCtx = pVCpu->em.s.pCtx; #endif int rc; NOREF(rcRC); #ifdef LOG_ENABLED /* * Log it. */ Log(("EMINS: %04x:%RGv RSP=%RGv\n", pCtx->cs.Sel, (RTGCPTR)pCtx->rip, (RTGCPTR)pCtx->rsp)); if (pszPrefix) { DBGFR3_INFO_LOG(pVM, "cpumguest", pszPrefix); DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, pszPrefix); } #endif /* * Use IEM and fallback on REM if the functionality is missing. * Once IEM gets mature enough, nothing should ever fall back. */ STAM_PROFILE_START(&pVCpu->em.s.StatIEMEmu, a); rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMEmu, a); if ( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED) { #ifdef VBOX_WITH_REM STAM_PROFILE_START(&pVCpu->em.s.StatREMEmu, b); EMRemLock(pVM); /* Flush the recompiler TLB if the VCPU has changed. */ if (pVM->em.s.idLastRemCpu != pVCpu->idCpu) CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL); pVM->em.s.idLastRemCpu = pVCpu->idCpu; rc = REMR3EmulateInstruction(pVM, pVCpu); EMRemUnlock(pVM); STAM_PROFILE_STOP(&pVCpu->em.s.StatREMEmu, b); #else /* !VBOX_WITH_REM */ NOREF(pVM); #endif /* !VBOX_WITH_REM */ } #ifdef EM_NOTIFY_HM if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM) HMR3NotifyEmulated(pVCpu); #endif return rc; }
/** * @interface_method_impl{PDMINETWORKUP,pfnSendBuf} */ PDMBOTHCBDECL(int) drvDedicatedNicUp_SendBuf(PPDMINETWORKUP pInterface, PPDMSCATTERGATHER pSgBuf, bool fOnWorkerThread) { PDRVDEDICATEDNIC pThis = RT_FROM_MEMBER(pInterface, DRVDEDICATEDNIC, CTX_SUFF(INetworkUp)); STAM_PROFILE_START(&pThis->StatTransmit, a); AssertPtr(pSgBuf); Assert(pSgBuf->fFlags == (PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_1)); Assert(pSgBuf->cbUsed <= pSgBuf->cbAvailable); #ifdef IN_RING0 Assert(pSgBuf == &pThis->XmitSg); #endif Assert(PDMCritSectIsOwner(&pThis->XmitLock)); #ifdef IN_RING0 /* * Tell the driver to send the packet. */ return VERR_INTERNAL_ERROR_4; #else /* IN_RING3 */ /* * Call ring-0 to start the transfer. */ int rc = PDMDrvHlpCallR0(pThis->pDrvInsR3, DRVDEDICATEDNICR0OP_SEND, pSgBuf->cbUsed); if (RT_FAILURE(rc) && rc != VERR_NET_DOWN) rc = VERR_NET_NO_BUFFER_SPACE; pSgBuf->fFlags = 0; return rc; #endif /* IN_RING3 */ }
/** * Executes one (or perhaps a few more) IO instruction(s). * * @returns VBox status code suitable for EM. * @param pVM The cross context VM structure. * @param pVCpu The cross context virtual CPU structure. */ static int emR3HmExecuteIOInstruction(PVM pVM, PVMCPU pVCpu) { PCPUMCTX pCtx = pVCpu->em.s.pCtx; STAM_PROFILE_START(&pVCpu->em.s.StatIOEmu, a); /* * Try to restart the io instruction that was refused in ring-0. */ VBOXSTRICTRC rcStrict = HMR3RestartPendingIOInstr(pVM, pVCpu, pCtx); if (IOM_SUCCESS(rcStrict)) { STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIoRestarted); STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a); return VBOXSTRICTRC_TODO(rcStrict); /* rip already updated. */ } AssertMsgReturn(rcStrict == VERR_NOT_FOUND, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), RT_SUCCESS_NP(rcStrict) ? VERR_IPE_UNEXPECTED_INFO_STATUS : VBOXSTRICTRC_TODO(rcStrict)); /* * Hand it over to the interpreter. */ rcStrict = IEMExecOne(pVCpu); LogFlow(("emR3HmExecuteIOInstruction: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIoIem); STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a); return VBOXSTRICTRC_TODO(rcStrict); }
/** * Executes one (or perhaps a few more) IO instruction(s). * * @returns VBox status code suitable for EM. * @param pVM The cross context VM structure. * @param pVCpu The cross context virtual CPU structure. */ static int emR3HmExecuteIOInstruction(PVM pVM, PVMCPU pVCpu) { RT_NOREF(pVM); STAM_PROFILE_START(&pVCpu->em.s.StatIOEmu, a); VBOXSTRICTRC rcStrict; uint32_t idxContinueExitRec = pVCpu->em.s.idxContinueExitRec; RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); if (idxContinueExitRec >= RT_ELEMENTS(pVCpu->em.s.aExitRecords)) { /* * Hand it over to the interpreter. */ CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK); rcStrict = IEMExecOne(pVCpu); LogFlow(("emR3HmExecuteIOInstruction: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); } else { RT_UNTRUSTED_VALIDATED_FENCE(); CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK); rcStrict = EMHistoryExec(pVCpu, &pVCpu->em.s.aExitRecords[idxContinueExitRec], 0); LogFlow(("emR3HmExecuteIOInstruction: %Rrc (EMHistoryExec)\n", VBOXSTRICTRC_VAL(rcStrict))); STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIoRestarted); } STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIoIem); STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a); return VBOXSTRICTRC_TODO(rcStrict); }
/* * Try and write() to the socket, whatever doesn't get written * append to the buffer... for a host with a fast net connection, * this prevents an unnecessary copy of the data * (the socket is non-blocking, so we won't hang) */ void sbappend(PNATState pData, struct socket *so, struct mbuf *m) { int ret = 0; int mlen = 0; STAM_PROFILE_START(&pData->StatIOSBAppend_pf, a); LogFlow(("sbappend: so = %lx, m = %lx, m->m_len = %d\n", (long)so, (long)m, m ? m->m_len : 0)); STAM_COUNTER_INC(&pData->StatIOSBAppend); /* Shouldn't happen, but... e.g. foreign host closes connection */ mlen = m_length(m, NULL); if (mlen <= 0) { STAM_COUNTER_INC(&pData->StatIOSBAppend_zm); goto done; } /* * If there is urgent data, call sosendoob * if not all was sent, sowrite will take care of the rest * (The rest of this function is just an optimisation) */ if (so->so_urgc) { sbappendsb(pData, &so->so_rcv, m); m_freem(pData, m); sosendoob(so); return; } /* * We only write if there's nothing in the buffer, * otherwise it'll arrive out of order, and hence corrupt */ if (so->so_rcv.sb_cc == 0) { caddr_t buf = NULL; if (m->m_next) { buf = RTMemAlloc(mlen); if (buf == NULL) { ret = 0; goto no_sent; } m_copydata(m, 0, mlen, buf); } else buf = mtod(m, char *); ret = send(so->s, buf, mlen, 0); if (m->m_next) RTMemFree(buf); }
/* * Ip input routine. Checksum and byte swap header. If fragmented * try to reassemble. Process options. Pass to next level. */ void ip_input(PNATState pData, struct mbuf *m) { register struct ip *ip; int hlen = 0; int mlen = 0; STAM_PROFILE_START(&pData->StatIP_input, a); LogFlowFunc(("ENTER: m = %lx\n", (long)m)); ip = mtod(m, struct ip *); Log2(("ip_dst=%RTnaipv4(len:%d) m_len = %d\n", ip->ip_dst, RT_N2H_U16(ip->ip_len), m->m_len)); ipstat.ips_total++; { int rc; if (!(m->m_flags & M_SKIP_FIREWALL)) { STAM_PROFILE_START(&pData->StatALIAS_input, b); rc = LibAliasIn(select_alias(pData, m), mtod(m, char *), m_length(m, NULL)); STAM_PROFILE_STOP(&pData->StatALIAS_input, b); Log2(("NAT: LibAlias return %d\n", rc)); } else
/** * Wrapper which does the read and updates range statistics when such are enabled. */ DECLINLINE(VBOXSTRICTRC) iomMMIODoRead(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void *pvValue, unsigned cbValue) { #ifdef VBOX_WITH_STATISTICS int rcSem = IOM_LOCK_SHARED(pVM); if (rcSem == VERR_SEM_BUSY) return VINF_IOM_R3_MMIO_READ; PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange); if (!pStats) # ifdef IN_RING3 return VERR_NO_MEMORY; # else return VINF_IOM_R3_MMIO_READ; # endif STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a); #else NOREF(pVCpu); #endif VBOXSTRICTRC rcStrict; if (RT_LIKELY(pRange->CTX_SUFF(pfnReadCallback))) { if ( ( cbValue == 4 && !(GCPhys & 3)) || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_PASSTHRU || ( cbValue == 8 && !(GCPhys & 7) && (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD_QWORD ) ) rcStrict = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys, pvValue, cbValue); else rcStrict = iomMMIODoComplicatedRead(pVM, pRange, GCPhys, pvValue, cbValue); } else rcStrict = VINF_IOM_MMIO_UNUSED_FF; if (rcStrict != VINF_SUCCESS) { switch (VBOXSTRICTRC_VAL(rcStrict)) { case VINF_IOM_MMIO_UNUSED_FF: rcStrict = iomMMIODoReadFFs(pvValue, cbValue); break; case VINF_IOM_MMIO_UNUSED_00: rcStrict = iomMMIODoRead00s(pvValue, cbValue); break; } } STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a); STAM_COUNTER_INC(&pStats->Accesses); return rcStrict; }
/** * Wrapper which does the write and updates range statistics when such are enabled. * @warning RT_SUCCESS(rc=VINF_IOM_R3_MMIO_WRITE) is TRUE! */ static VBOXSTRICTRC iomMMIODoWrite(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault, const void *pvData, unsigned cb) { #ifdef VBOX_WITH_STATISTICS int rcSem = IOM_LOCK_SHARED(pVM); if (rcSem == VERR_SEM_BUSY) return VINF_IOM_R3_MMIO_WRITE; PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhysFault, pRange); if (!pStats) # ifdef IN_RING3 return VERR_NO_MEMORY; # else return VINF_IOM_R3_MMIO_WRITE; # endif STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a); #else NOREF(pVCpu); #endif VBOXSTRICTRC rcStrict; if (RT_LIKELY(pRange->CTX_SUFF(pfnWriteCallback))) { if ( (cb == 4 && !(GCPhysFault & 3)) || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_PASSTHRU || (cb == 8 && !(GCPhysFault & 7) && IOMMMIO_DOES_WRITE_MODE_ALLOW_QWORD(pRange->fFlags)) ) rcStrict = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhysFault, (void *)pvData, cb); /** @todo fix const!! */ else rcStrict = iomMMIODoComplicatedWrite(pVM, pVCpu, pRange, GCPhysFault, pvData, cb); } else rcStrict = VINF_SUCCESS; STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a); STAM_COUNTER_INC(&pStats->Accesses); return rcStrict; }
/** * Executes one (or perhaps a few more) IO instruction(s). * * @returns VBox status code suitable for EM. * @param pVM Pointer to the VM. * @param pVCpu Pointer to the VMCPU. */ static int emR3ExecuteIOInstruction(PVM pVM, PVMCPU pVCpu) { PCPUMCTX pCtx = pVCpu->em.s.pCtx; STAM_PROFILE_START(&pVCpu->em.s.StatIOEmu, a); /* Try to restart the io instruction that was refused in ring-0. */ VBOXSTRICTRC rcStrict = HWACCMR3RestartPendingIOInstr(pVM, pVCpu, pCtx); if (IOM_SUCCESS(rcStrict)) { STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIoRestarted); STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a); return VBOXSTRICTRC_TODO(rcStrict); /* rip already updated. */ } AssertMsgReturn(rcStrict == VERR_NOT_FOUND, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), RT_SUCCESS_NP(rcStrict) ? VERR_IPE_UNEXPECTED_INFO_STATUS : VBOXSTRICTRC_TODO(rcStrict)); /** @todo probably we should fall back to the recompiler; otherwise we'll go back and forth between HC & GC * as io instructions tend to come in packages of more than one */ DISCPUSTATE Cpu; int rc2 = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &Cpu, "IO EMU"); if (RT_SUCCESS(rc2)) { rcStrict = VINF_EM_RAW_EMULATE_INSTR; if (!(Cpu.fPrefix & (DISPREFIX_REP | DISPREFIX_REPNE))) { switch (Cpu.pCurInstr->uOpcode) { case OP_IN: { STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIn); rcStrict = IOMInterpretIN(pVM, CPUMCTX2CORE(pCtx), &Cpu); break; } case OP_OUT: { STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatOut); rcStrict = IOMInterpretOUT(pVM, CPUMCTX2CORE(pCtx), &Cpu); break; } } } else if (Cpu.fPrefix & DISPREFIX_REP) { switch (Cpu.pCurInstr->uOpcode) { case OP_INSB: case OP_INSWD: { STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIn); rcStrict = IOMInterpretINS(pVM, CPUMCTX2CORE(pCtx), &Cpu); break; } case OP_OUTSB: case OP_OUTSWD: { STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatOut); rcStrict = IOMInterpretOUTS(pVM, CPUMCTX2CORE(pCtx), &Cpu); break; } } } /* * Handled the I/O return codes. * (The unhandled cases end up with rcStrict == VINF_EM_RAW_EMULATE_INSTR.) */ if (IOM_SUCCESS(rcStrict)) { pCtx->rip += Cpu.cbInstr; STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a); return VBOXSTRICTRC_TODO(rcStrict); } if (rcStrict == VINF_EM_RAW_GUEST_TRAP) { /* The active trap will be dispatched. */ Assert(TRPMHasTrap(pVCpu)); STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a); return VINF_SUCCESS; } AssertMsg(rcStrict != VINF_TRPM_XCPT_DISPATCHED, ("Handle VINF_TRPM_XCPT_DISPATCHED\n")); if (RT_FAILURE(rcStrict)) { STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a); return VBOXSTRICTRC_TODO(rcStrict); } AssertMsg(rcStrict == VINF_EM_RAW_EMULATE_INSTR || rcStrict == VINF_EM_RESCHEDULE_REM, ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); } STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a); return emR3ExecuteInstruction(pVM, pVCpu, "IO: "); }
static int emR3ExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcRC) #endif { #ifdef LOG_ENABLED PCPUMCTX pCtx = pVCpu->em.s.pCtx; #endif int rc; NOREF(rcRC); /* * * The simple solution is to use the recompiler. * The better solution is to disassemble the current instruction and * try handle as many as possible without using REM. * */ #ifdef LOG_ENABLED /* * Disassemble the instruction if requested. */ if (pszPrefix) { DBGFR3InfoLog(pVM, "cpumguest", pszPrefix); DBGFR3DisasInstrCurrentLog(pVCpu, pszPrefix); } #endif /* LOG_ENABLED */ #if 0 /* Try our own instruction emulator before falling back to the recompiler. */ DISCPUSTATE Cpu; rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &Cpu, "GEN EMU"); if (RT_SUCCESS(rc)) { switch (Cpu.pCurInstr->uOpcode) { /* @todo we can do more now */ case OP_MOV: case OP_AND: case OP_OR: case OP_XOR: case OP_POP: case OP_INC: case OP_DEC: case OP_XCHG: STAM_PROFILE_START(&pVCpu->em.s.StatMiscEmu, a); rc = EMInterpretInstructionCpuUpdtPC(pVM, pVCpu, &Cpu, CPUMCTX2CORE(pCtx), 0); if (RT_SUCCESS(rc)) { #ifdef EM_NOTIFY_HWACCM if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HWACC) HWACCMR3NotifyEmulated(pVCpu); #endif STAM_PROFILE_STOP(&pVCpu->em.s.StatMiscEmu, a); return rc; } if (rc != VERR_EM_INTERPRETER) AssertMsgFailedReturn(("rc=%Rrc\n", rc), rc); STAM_PROFILE_STOP(&pVCpu->em.s.StatMiscEmu, a); break; } } #endif /* 0 */ STAM_PROFILE_START(&pVCpu->em.s.StatREMEmu, a); Log(("EMINS: %04x:%RGv RSP=%RGv\n", pCtx->cs.Sel, (RTGCPTR)pCtx->rip, (RTGCPTR)pCtx->rsp)); #ifdef VBOX_WITH_REM EMRemLock(pVM); /* Flush the recompiler TLB if the VCPU has changed. */ if (pVM->em.s.idLastRemCpu != pVCpu->idCpu) CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL); pVM->em.s.idLastRemCpu = pVCpu->idCpu; rc = REMR3EmulateInstruction(pVM, pVCpu); EMRemUnlock(pVM); #else rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM); #endif STAM_PROFILE_STOP(&pVCpu->em.s.StatREMEmu, a); #ifdef EM_NOTIFY_HWACCM if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HWACC) HWACCMR3NotifyEmulated(pVCpu); #endif return rc; }
/* * Read from so's socket into sb_snd, updating all relevant sbuf fields * NOTE: This will only be called if it is select()ed for reading, so * a read() of 0 (or less) means it's disconnected */ int soread(PNATState pData, struct socket *so) { int n, nn, lss, total; struct sbuf *sb = &so->so_snd; u_int len = sb->sb_datalen - sb->sb_cc; struct iovec iov[2]; int mss = so->so_tcpcb->t_maxseg; int sockerr; STAM_PROFILE_START(&pData->StatIOread, a); STAM_COUNTER_RESET(&pData->StatIORead_in_1); STAM_COUNTER_RESET(&pData->StatIORead_in_2); QSOCKET_LOCK(tcb); SOCKET_LOCK(so); QSOCKET_UNLOCK(tcb); LogFlow(("soread: so = %R[natsock]\n", so)); Log2(("%s: so = %R[natsock] so->so_snd = %R[sbuf]\n", RT_GCC_EXTENSION __PRETTY_FUNCTION__, so, sb)); /* * No need to check if there's enough room to read. * soread wouldn't have been called if there weren't */ len = sb->sb_datalen - sb->sb_cc; iov[0].iov_base = sb->sb_wptr; iov[1].iov_base = 0; iov[1].iov_len = 0; if (sb->sb_wptr < sb->sb_rptr) { iov[0].iov_len = sb->sb_rptr - sb->sb_wptr; /* Should never succeed, but... */ if (iov[0].iov_len > len) iov[0].iov_len = len; if (iov[0].iov_len > mss) iov[0].iov_len -= iov[0].iov_len%mss; n = 1; } else { iov[0].iov_len = (sb->sb_data + sb->sb_datalen) - sb->sb_wptr; /* Should never succeed, but... */ if (iov[0].iov_len > len) iov[0].iov_len = len; len -= iov[0].iov_len; if (len) { iov[1].iov_base = sb->sb_data; iov[1].iov_len = sb->sb_rptr - sb->sb_data; if (iov[1].iov_len > len) iov[1].iov_len = len; total = iov[0].iov_len + iov[1].iov_len; if (total > mss) { lss = total % mss; if (iov[1].iov_len > lss) { iov[1].iov_len -= lss; n = 2; } else { lss -= iov[1].iov_len; iov[0].iov_len -= lss; n = 1; } } else n = 2; } else { if (iov[0].iov_len > mss) iov[0].iov_len -= iov[0].iov_len%mss; n = 1; } } #ifdef HAVE_READV nn = readv(so->s, (struct iovec *)iov, n); #else nn = recv(so->s, iov[0].iov_base, iov[0].iov_len, (so->so_tcpcb->t_force? MSG_OOB:0)); #endif if (nn < 0) sockerr = errno; /* save it, as it may be clobbered by logging */ else sockerr = 0; Log2(("%s: read(1) nn = %d bytes\n", RT_GCC_EXTENSION __PRETTY_FUNCTION__, nn)); Log2(("%s: so = %R[natsock] so->so_snd = %R[sbuf]\n", RT_GCC_EXTENSION __PRETTY_FUNCTION__, so, sb)); if (nn <= 0) { #ifdef RT_OS_WINDOWS /* * Windows reports ESHUTDOWN after SHUT_RD (SD_RECEIVE) * instead of just returning EOF indication. */ if (nn < 0 && sockerr == ESHUTDOWN) { nn = 0; sockerr = 0; } #endif if (nn == 0) /* XXX: should this be inside #if defined(RT_OS_WINDOWS)? */ { /* * Special case for WSAEnumNetworkEvents: If we receive 0 bytes that * _could_ mean that the connection is closed. But we will receive an * FD_CLOSE event later if the connection was _really_ closed. With * www.youtube.com I see this very often. Closing the socket too early * would be dangerous. */ int status; unsigned long pending = 0; status = ioctlsocket(so->s, FIONREAD, &pending); if (status < 0) Log(("NAT:%s: error in WSAIoctl: %d\n", RT_GCC_EXTENSION __PRETTY_FUNCTION__, errno)); if (pending != 0) { SOCKET_UNLOCK(so); STAM_PROFILE_STOP(&pData->StatIOread, a); return 0; } } if ( nn < 0 && soIgnorableErrorCode(sockerr)) { SOCKET_UNLOCK(so); STAM_PROFILE_STOP(&pData->StatIOread, a); return 0; } else { int fUninitializedTemplate = 0; int shuterr; fUninitializedTemplate = RT_BOOL(( sototcpcb(so) && ( sototcpcb(so)->t_template.ti_src.s_addr == INADDR_ANY || sototcpcb(so)->t_template.ti_dst.s_addr == INADDR_ANY))); /* nn == 0 means peer has performed an orderly shutdown */ Log2(("%s: disconnected, nn = %d, errno = %d (%s)\n", RT_GCC_EXTENSION __PRETTY_FUNCTION__, nn, sockerr, strerror(sockerr))); shuterr = sofcantrcvmore(so); if (!sockerr && !shuterr && !fUninitializedTemplate) tcp_sockclosed(pData, sototcpcb(so)); else { LogRel2(("NAT: sockerr %d, shuterr %d - %R[natsock]\n", sockerr, shuterr, so)); tcp_drop(pData, sototcpcb(so), sockerr); } SOCKET_UNLOCK(so); STAM_PROFILE_STOP(&pData->StatIOread, a); return -1; } } STAM_STATS( if (n == 1) { STAM_COUNTER_INC(&pData->StatIORead_in_1); STAM_COUNTER_ADD(&pData->StatIORead_in_1_bytes, nn); } else { STAM_COUNTER_INC(&pData->StatIORead_in_2); STAM_COUNTER_ADD(&pData->StatIORead_in_2_1st_bytes, nn); } );
/* * Ip input routine. Checksum and byte swap header. If fragmented * try to reassemble. Process options. Pass to next level. */ void ip_input(PNATState pData, struct mbuf *m) { register struct ip *ip; int hlen = 0; int mlen = 0; STAM_PROFILE_START(&pData->StatIP_input, a); LogFlowFunc(("ENTER: m = %lx\n", (long)m)); ip = mtod(m, struct ip *); Log2(("ip_dst=%RTnaipv4(len:%d) m_len = %d\n", ip->ip_dst, RT_N2H_U16(ip->ip_len), m->m_len)); ipstat.ips_total++; { int rc; STAM_PROFILE_START(&pData->StatALIAS_input, b); rc = LibAliasIn(select_alias(pData, m), mtod(m, char *), m_length(m, NULL)); STAM_PROFILE_STOP(&pData->StatALIAS_input, b); Log2(("NAT: LibAlias return %d\n", rc)); if (m->m_len != RT_N2H_U16(ip->ip_len)) m->m_len = RT_N2H_U16(ip->ip_len); } mlen = m->m_len; if (mlen < sizeof(struct ip)) { ipstat.ips_toosmall++; goto bad_free_m; } ip = mtod(m, struct ip *); if (ip->ip_v != IPVERSION) { ipstat.ips_badvers++; goto bad_free_m; } hlen = ip->ip_hl << 2; if ( hlen < sizeof(struct ip) || hlen > m->m_len) { /* min header length */ ipstat.ips_badhlen++; /* or packet too short */ goto bad_free_m; } /* keep ip header intact for ICMP reply * ip->ip_sum = cksum(m, hlen); * if (ip->ip_sum) { */ if (cksum(m, hlen)) { ipstat.ips_badsum++; goto bad_free_m; } /* * Convert fields to host representation. */ NTOHS(ip->ip_len); if (ip->ip_len < hlen) { ipstat.ips_badlen++; goto bad_free_m; } NTOHS(ip->ip_id); NTOHS(ip->ip_off); /* * Check that the amount of data in the buffers * is as at least much as the IP header would have us expect. * Trim mbufs if longer than we expect. * Drop packet if shorter than we expect. */ if (mlen < ip->ip_len) { ipstat.ips_tooshort++; goto bad_free_m; } /* Should drop packet if mbuf too long? hmmm... */ if (mlen > ip->ip_len) m_adj(m, ip->ip_len - m->m_len); /* check ip_ttl for a correct ICMP reply */ if (ip->ip_ttl==0 || ip->ip_ttl == 1) { icmp_error(pData, m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, 0, "ttl"); goto no_free_m; } ip->ip_ttl--; /* * If offset or IP_MF are set, must reassemble. * Otherwise, nothing need be done. * (We could look in the reassembly queue to see * if the packet was previously fragmented, * but it's not worth the time; just let them time out.) * */ if (ip->ip_off & (IP_MF | IP_OFFMASK)) { m = ip_reass(pData, m); if (m == NULL) goto no_free_m; ip = mtod(m, struct ip *); hlen = ip->ip_hl << 2; }
/** * Common worker for the \#PF handler and IOMMMIOPhysHandler (APIC+VT-x). * * @returns VBox status code (appropriate for GC return). * @param pVM The cross context VM structure. * @param pVCpu The cross context virtual CPU structure of the calling EMT. * @param uErrorCode CPU Error code. This is UINT32_MAX when we don't have * any error code (the EPT misconfig hack). * @param pCtxCore Trap register frame. * @param GCPhysFault The GC physical address corresponding to pvFault. * @param pvUser Pointer to the MMIO ring-3 range entry. */ static VBOXSTRICTRC iomMmioCommonPfHandler(PVM pVM, PVMCPU pVCpu, uint32_t uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault, void *pvUser) { int rc = IOM_LOCK_SHARED(pVM); #ifndef IN_RING3 if (rc == VERR_SEM_BUSY) return VINF_IOM_R3_MMIO_READ_WRITE; #endif AssertRC(rc); STAM_PROFILE_START(&pVM->iom.s.StatRZMMIOHandler, a); Log(("iomMmioCommonPfHandler: GCPhys=%RGp uErr=%#x rip=%RGv\n", GCPhysFault, uErrorCode, (RTGCPTR)pCtxCore->rip)); PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser; Assert(pRange); Assert(pRange == iomMmioGetRange(pVM, pVCpu, GCPhysFault)); iomMmioRetainRange(pRange); #ifndef VBOX_WITH_STATISTICS IOM_UNLOCK_SHARED(pVM); #else /* * Locate the statistics. */ PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhysFault, pRange); if (!pStats) { iomMmioReleaseRange(pVM, pRange); # ifdef IN_RING3 return VERR_NO_MEMORY; # else STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a); STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures); return VINF_IOM_R3_MMIO_READ_WRITE; # endif } #endif #ifndef IN_RING3 /* * Should we defer the request right away? This isn't usually the case, so * do the simple test first and the try deal with uErrorCode being N/A. */ if (RT_UNLIKELY( ( !pRange->CTX_SUFF(pfnWriteCallback) || !pRange->CTX_SUFF(pfnReadCallback)) && ( uErrorCode == UINT32_MAX ? pRange->pfnWriteCallbackR3 || pRange->pfnReadCallbackR3 : uErrorCode & X86_TRAP_PF_RW ? !pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3 : !pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3 ) ) ) { if (uErrorCode & X86_TRAP_PF_RW) STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3)); else STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3)); STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a); STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures); iomMmioReleaseRange(pVM, pRange); return VINF_IOM_R3_MMIO_READ_WRITE; } #endif /* !IN_RING3 */ /* * Retain the range and do locking. */ PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns); rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE); if (rc != VINF_SUCCESS) { iomMmioReleaseRange(pVM, pRange); return rc; } /* * Let IEM call us back via iomMmioHandler. */ VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu); NOREF(pCtxCore); NOREF(GCPhysFault); STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a); PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo)); iomMmioReleaseRange(pVM, pRange); if (RT_SUCCESS(rcStrict)) return rcStrict; if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED) { Log(("IOM: Hit unsupported IEM feature!\n")); rcStrict = VINF_EM_RAW_EMULATE_INSTR; } return rcStrict; }
/* This function will free m0! */ int ip_output0(PNATState pData, struct socket *so, struct mbuf *m0, int urg) { register struct ip *ip; register struct mbuf *m = m0; register int hlen = sizeof(struct ip); int len, off, error = 0; struct ethhdr *eh = NULL; uint8_t eth_dst[ETH_ALEN]; int rc = 1; STAM_PROFILE_START(&pData->StatIP_output, a); #ifdef LOG_ENABLED LogFlowFunc(("ip_output: so = %R[natsock], m0 = %lx\n", so, (long)m0)); #else NOREF(so); #endif M_ASSERTPKTHDR(m); Assert(m->m_pkthdr.header); #if 0 /* We do no options */ if (opt) { m = ip_insertoptions(m, opt, &len); hlen = len; } #endif ip = mtod(m, struct ip *); LogFunc(("ip(src:%RTnaipv4, dst:%RTnaipv4)\n", ip->ip_src, ip->ip_dst)); /* * Fill in IP header. */ ip->ip_v = IPVERSION; ip->ip_off &= IP_DF; ip->ip_id = RT_H2N_U16(ip_currid++); ip->ip_hl = hlen >> 2; ipstat.ips_localout++; /* Current TCP/IP stack hasn't routing information at * all so we need to calculate destination ethernet address */ rc = rt_lookup_in_cache(pData, ip->ip_dst.s_addr, eth_dst); if (RT_FAILURE(rc)) goto exit_drop_package; eh = (struct ethhdr *)(m->m_data - ETH_HLEN); /* * If small enough for interface, can just send directly. */ if ((u_int16_t)ip->ip_len <= if_mtu) { ip->ip_len = RT_H2N_U16((u_int16_t)ip->ip_len); ip->ip_off = RT_H2N_U16((u_int16_t)ip->ip_off); ip->ip_sum = 0; ip->ip_sum = cksum(m, hlen); if (!(m->m_flags & M_SKIP_FIREWALL)){ struct m_tag *t; STAM_PROFILE_START(&pData->StatALIAS_output, b); if ((t = m_tag_find(m, PACKET_TAG_ALIAS, NULL)) != 0) rc = LibAliasOut((struct libalias *)&t[1], mtod(m, char *), m_length(m, NULL)); else rc = LibAliasOut(pData->proxy_alias, mtod(m, char *), m_length(m, NULL)); if (rc == PKT_ALIAS_IGNORED) { Log(("NAT: packet was droppped\n")); goto exit_drop_package; } STAM_PROFILE_STOP(&pData->StatALIAS_output, b); }
/** * Process pending items in one queue. * * @returns Success indicator. * If false the item the consumer said "enough!". * @param pQueue The queue. */ static bool pdmR3QueueFlush(PPDMQUEUE pQueue) { STAM_PROFILE_START(&pQueue->StatFlushPrf,p); /* * Get the lists. */ PPDMQUEUEITEMCORE pItems = ASMAtomicXchgPtrT(&pQueue->pPendingR3, NULL, PPDMQUEUEITEMCORE); RTRCPTR pItemsRC = ASMAtomicXchgRCPtr(&pQueue->pPendingRC, NIL_RTRCPTR); RTR0PTR pItemsR0 = ASMAtomicXchgR0Ptr(&pQueue->pPendingR0, NIL_RTR0PTR); AssertMsgReturn( pItemsR0 || pItemsRC || pItems, ("Someone is racing us? This shouldn't happen!\n"), true); /* * Reverse the list (it's inserted in LIFO order to avoid semaphores, remember). */ PPDMQUEUEITEMCORE pCur = pItems; pItems = NULL; while (pCur) { PPDMQUEUEITEMCORE pInsert = pCur; pCur = pCur->pNextR3; pInsert->pNextR3 = pItems; pItems = pInsert; } /* * Do the same for any pending RC items. */ while (pItemsRC) { PPDMQUEUEITEMCORE pInsert = (PPDMQUEUEITEMCORE)MMHyperRCToR3(pQueue->pVMR3, pItemsRC); pItemsRC = pInsert->pNextRC; pInsert->pNextRC = NIL_RTRCPTR; pInsert->pNextR3 = pItems; pItems = pInsert; } /* * Do the same for any pending R0 items. */ while (pItemsR0) { PPDMQUEUEITEMCORE pInsert = (PPDMQUEUEITEMCORE)MMHyperR0ToR3(pQueue->pVMR3, pItemsR0); pItemsR0 = pInsert->pNextR0; pInsert->pNextR0 = NIL_RTR0PTR; pInsert->pNextR3 = pItems; pItems = pInsert; } /* * Feed the items to the consumer function. */ Log2(("pdmR3QueueFlush: pQueue=%p enmType=%d pItems=%p\n", pQueue, pQueue->enmType, pItems)); switch (pQueue->enmType) { case PDMQUEUETYPE_DEV: while (pItems) { if (!pQueue->u.Dev.pfnCallback(pQueue->u.Dev.pDevIns, pItems)) break; pCur = pItems; pItems = pItems->pNextR3; pdmR3QueueFreeItem(pQueue, pCur); } break; case PDMQUEUETYPE_DRV: while (pItems) { if (!pQueue->u.Drv.pfnCallback(pQueue->u.Drv.pDrvIns, pItems)) break; pCur = pItems; pItems = pItems->pNextR3; pdmR3QueueFreeItem(pQueue, pCur); } break; case PDMQUEUETYPE_INTERNAL: while (pItems) { if (!pQueue->u.Int.pfnCallback(pQueue->pVMR3, pItems)) break; pCur = pItems; pItems = pItems->pNextR3; pdmR3QueueFreeItem(pQueue, pCur); } break; case PDMQUEUETYPE_EXTERNAL: while (pItems) { if (!pQueue->u.Ext.pfnCallback(pQueue->u.Ext.pvUser, pItems)) break; pCur = pItems; pItems = pItems->pNextR3; pdmR3QueueFreeItem(pQueue, pCur); } break; default: AssertMsgFailed(("Invalid queue type %d\n", pQueue->enmType)); break; } /* * Success? */ if (pItems) { /* * Reverse the list. */ pCur = pItems; pItems = NULL; while (pCur) { PPDMQUEUEITEMCORE pInsert = pCur; pCur = pInsert->pNextR3; pInsert->pNextR3 = pItems; pItems = pInsert; } /* * Insert the list at the tail of the pending list. */ for (;;) { if (ASMAtomicCmpXchgPtr(&pQueue->pPendingR3, pItems, NULL)) break; PPDMQUEUEITEMCORE pPending = ASMAtomicXchgPtrT(&pQueue->pPendingR3, NULL, PPDMQUEUEITEMCORE); if (pPending) { pCur = pPending; while (pCur->pNextR3) pCur = pCur->pNextR3; pCur->pNextR3 = pItems; pItems = pPending; } } STAM_REL_COUNTER_INC(&pQueue->StatFlushLeftovers); STAM_PROFILE_STOP(&pQueue->StatFlushPrf,p); return false; } STAM_PROFILE_STOP(&pQueue->StatFlushPrf,p); return true; }
/** * \#GP (General Protection Fault) handler. * * @returns VBox status code. * VINF_SUCCESS means we completely handled this trap, * other codes are passed execution to host context. * * @param pVM Pointer to the VM. * @param pTrpmCpu Pointer to TRPMCPU data (within VM). * @param pRegFrame Pointer to the register frame for the trap. */ static int trpmGCTrap0dHandler(PVM pVM, PTRPMCPU pTrpmCpu, PCPUMCTXCORE pRegFrame) { LogFlow(("trpmGCTrap0dHandler: cs:eip=%RTsel:%08RX32 uErr=%RGv\n", pRegFrame->cs.Sel, pRegFrame->eip, pTrpmCpu->uActiveErrorCode)); PVMCPU pVCpu = TRPMCPU_2_VMCPU(pTrpmCpu); /* * Convert and validate CS. */ STAM_PROFILE_START(&pVM->trpm.s.StatTrap0dDisasm, a); RTGCPTR PC; uint32_t cBits; int rc = SELMValidateAndConvertCSAddrGCTrap(pVCpu, pRegFrame->eflags, pRegFrame->ss.Sel, pRegFrame->cs.Sel, pRegFrame->rip, &PC, &cBits); if (RT_FAILURE(rc)) { Log(("trpmGCTrap0dHandler: Failed to convert %RTsel:%RX32 (cpl=%d) - rc=%Rrc !!\n", pRegFrame->cs.Sel, pRegFrame->eip, pRegFrame->ss.Sel & X86_SEL_RPL, rc)); STAM_PROFILE_STOP(&pVM->trpm.s.StatTrap0dDisasm, a); return trpmGCExitTrap(pVM, pVCpu, VINF_EM_RAW_EMULATE_INSTR, pRegFrame); } /* * Disassemble the instruction. */ DISCPUSTATE Cpu; uint32_t cbOp; rc = EMInterpretDisasOneEx(pVM, pVCpu, PC, pRegFrame, &Cpu, &cbOp); if (RT_FAILURE(rc)) { AssertMsgFailed(("DISCoreOneEx failed to PC=%RGv rc=%Rrc\n", PC, rc)); STAM_PROFILE_STOP(&pVM->trpm.s.StatTrap0dDisasm, a); return trpmGCExitTrap(pVM, pVCpu, VINF_EM_RAW_EMULATE_INSTR, pRegFrame); } STAM_PROFILE_STOP(&pVM->trpm.s.StatTrap0dDisasm, a); /* * Optimize RDTSC traps. * Some guests (like Solaris) are using RDTSC all over the place and * will end up trapping a *lot* because of that. * * Note: it's no longer safe to access the instruction opcode directly due to possible stale code TLB entries */ if (Cpu.pCurInstr->uOpcode == OP_RDTSC) return trpmGCTrap0dHandlerRdTsc(pVM, pVCpu, pRegFrame); /* * Deal with I/O port access. */ if ( pVCpu->trpm.s.uActiveErrorCode == 0 && (Cpu.pCurInstr->fOpType & DISOPTYPE_PORTIO)) { VBOXSTRICTRC rcStrict = IOMRCIOPortHandler(pVM, pRegFrame, &Cpu); if (IOM_SUCCESS(rcStrict)) pRegFrame->rip += cbOp; rc = VBOXSTRICTRC_TODO(rcStrict); return trpmGCExitTrap(pVM, pVCpu, rc, pRegFrame); } /* * Deal with Ring-0 (privileged instructions) */ if ( (pRegFrame->ss.Sel & X86_SEL_RPL) <= 1 && !pRegFrame->eflags.Bits.u1VM) return trpmGCTrap0dHandlerRing0(pVM, pVCpu, pRegFrame, &Cpu, PC); /* * Deal with Ring-3 GPs. */ if (!pRegFrame->eflags.Bits.u1VM) return trpmGCTrap0dHandlerRing3(pVM, pVCpu, pRegFrame, &Cpu, PC); /* * Deal with v86 code. * * We always set IOPL to zero which makes e.g. pushf fault in V86 * mode. The guest might use IOPL=3 and therefore not expect a #GP. * Simply fall back to the recompiler to emulate this instruction if * that's the case. To get the correct we must use CPUMRawGetEFlags. */ X86EFLAGS eflags; eflags.u32 = CPUMRawGetEFlags(pVCpu); /* Get the correct value. */ Log3(("TRPM #GP V86: cs:eip=%04x:%08x IOPL=%d efl=%08x\n", pRegFrame->cs.Sel, pRegFrame->eip, eflags.Bits.u2IOPL, eflags.u)); if (eflags.Bits.u2IOPL != 3) { Assert(eflags.Bits.u2IOPL == 0); rc = TRPMForwardTrap(pVCpu, pRegFrame, 0xD, 0, TRPM_TRAP_HAS_ERRORCODE, TRPM_TRAP, 0xd); Assert(rc == VINF_EM_RAW_GUEST_TRAP); return trpmGCExitTrap(pVM, pVCpu, rc, pRegFrame); } return trpmGCExitTrap(pVM, pVCpu, VINF_EM_RAW_EMULATE_INSTR, pRegFrame); }
/** * Executes hardware accelerated raw code. (Intel VT-x & AMD-V) * * This function contains the raw-mode version of the inner * execution loop (the outer loop being in EMR3ExecuteVM()). * * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE, VINF_EM_RESCHEDULE_RAW, * VINF_EM_RESCHEDULE_REM, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE. * * @param pVM The cross context VM structure. * @param pVCpu The cross context virtual CPU structure. * @param pfFFDone Where to store an indicator telling whether or not * FFs were done before returning. */ int emR3HmExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone) { int rc = VERR_IPE_UNINITIALIZED_STATUS; PCPUMCTX pCtx = pVCpu->em.s.pCtx; LogFlow(("emR3HmExecute%d: (cs:eip=%04x:%RGv)\n", pVCpu->idCpu, pCtx->cs.Sel, (RTGCPTR)pCtx->rip)); *pfFFDone = false; STAM_COUNTER_INC(&pVCpu->em.s.StatHmExecuteEntry); #ifdef EM_NOTIFY_HM HMR3NotifyScheduled(pVCpu); #endif /* * Spin till we get a forced action which returns anything but VINF_SUCCESS. */ for (;;) { STAM_PROFILE_ADV_START(&pVCpu->em.s.StatHmEntry, a); /* Check if a forced reschedule is pending. */ if (HMR3IsRescheduleRequired(pVM, pCtx)) { rc = VINF_EM_RESCHEDULE; break; } /* * Process high priority pre-execution raw-mode FFs. */ #ifdef VBOX_WITH_RAW_MODE Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT)); #endif if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK) || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK)) { rc = emR3HmForcedActions(pVM, pVCpu, pCtx); if (rc != VINF_SUCCESS) break; } #ifdef LOG_ENABLED /* * Log important stuff before entering GC. */ if (TRPMHasTrap(pVCpu)) Log(("CPU%d: Pending hardware interrupt=0x%x cs:rip=%04X:%RGv\n", pVCpu->idCpu, TRPMGetTrapNo(pVCpu), pCtx->cs.Sel, (RTGCPTR)pCtx->rip)); uint32_t cpl = CPUMGetGuestCPL(pVCpu); if (pVM->cCpus == 1) { if (pCtx->eflags.Bits.u1VM) Log(("HWV86: %08X IF=%d\n", pCtx->eip, pCtx->eflags.Bits.u1IF)); else if (CPUMIsGuestIn64BitCodeEx(pCtx)) Log(("HWR%d: %04X:%RGv ESP=%RGv IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pCtx->cs.Sel, (RTGCPTR)pCtx->rip, pCtx->rsp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER)); else Log(("HWR%d: %04X:%08X ESP=%08X IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pCtx->cs.Sel, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER)); } else { if (pCtx->eflags.Bits.u1VM) Log(("HWV86-CPU%d: %08X IF=%d\n", pVCpu->idCpu, pCtx->eip, pCtx->eflags.Bits.u1IF)); else if (CPUMIsGuestIn64BitCodeEx(pCtx)) Log(("HWR%d-CPU%d: %04X:%RGv ESP=%RGv IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pVCpu->idCpu, pCtx->cs.Sel, (RTGCPTR)pCtx->rip, pCtx->rsp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER)); else Log(("HWR%d-CPU%d: %04X:%08X ESP=%08X IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pVCpu->idCpu, pCtx->cs.Sel, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER)); } #endif /* LOG_ENABLED */ /* * Execute the code. */ STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatHmEntry, a); if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu))) { STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x); rc = VMMR3HmRunGC(pVM, pVCpu); STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x); } else { /* Give up this time slice; virtual time continues */ STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u); RTThreadSleep(5); STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u); rc = VINF_SUCCESS; } /* * Deal with high priority post execution FFs before doing anything else. */ VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK); if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK) || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK)) rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc); /* * Process the returned status code. */ if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST) break; rc = emR3HmHandleRC(pVM, pVCpu, pCtx, rc); if (rc != VINF_SUCCESS) break; /* * Check and execute forced actions. */ #ifdef VBOX_HIGH_RES_TIMERS_HACK TMTimerPollVoid(pVM, pVCpu); #endif if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_MASK)) { rc = emR3ForcedActions(pVM, pVCpu, rc); VBOXVMM_EM_FF_ALL_RET(pVCpu, rc); if ( rc != VINF_SUCCESS && rc != VINF_EM_RESCHEDULE_HM) { *pfFFDone = true; break; } } } /* * Return to outer loop. */ #if defined(LOG_ENABLED) && defined(DEBUG) RTLogFlush(NULL); #endif return rc; }
/* re-use off */ off = (sb->sb_data + sb->sb_datalen) - from; if (off > len) off = len; memcpy(to, from, off); len -= off; if (len) memcpy(to+off, sb->sb_data, len); } } #else /* VBOX_WITH_SLIRP_BSD_SBUF */ void sbappend (PNATState pData, struct socket *so, struct mbuf *m) { int ret = 0; int mlen = 0; caddr_t buf = NULL; STAM_PROFILE_START(&pData->StatIOSBAppend_pf, a); LogFlow(("sbappend: so = %R[natsock], m = %lx, m->m_len = %d\n", so, (long)m, m ? m->m_len : 0)); STAM_COUNTER_INC(&pData->StatIOSBAppend); mlen = m_length(m, NULL); if (mlen <= 0) { STAM_COUNTER_INC(&pData->StatIOSBAppend_zm); goto done; } /* * We only write if there's nothing in the buffer, * ottherwise it'll arrive out of order, and hence corrupt */ buf = RTMemAlloc(mlen); if (buf == NULL) { ret = 0; goto no_sent; } m_copydata(m, 0, mlen, buf); /* * If there is urgent data, call sosendoob * if not all was sent, sowrite will take care of the rest * (The rest of this function is just an optimisation) */ if (so->so_urgc) { sbuf_bcpy(&so->so_rcv, buf, mlen); RTMemFree(buf); m_free(pData, m); sosendoob(so); return; } if(!sbuf_len(&so->so_rcv)) ret = send(so->s, buf, mlen, 0); no_sent: if (ret <= 0) { STAM_COUNTER_INC(&pData->StatIOSBAppend_wf); /* * Nothing was written * It's possible that the socket has closed, but * we don't need to check because if it has closed, * it will be detected in the normal way by soread() */ sbuf_bcpy(&so->so_rcv, buf, mlen); STAM_PROFILE_STOP(&pData->StatIOSBAppend_pf_wf, a); goto done; } else if (ret != mlen) { STAM_COUNTER_INC(&pData->StatIOSBAppend_wp); /* * Something was written, but not everything.. * sbappendsb the rest */ sbuf_bcpy(&so->so_rcv, &buf[ret + 1], mlen - ret); STAM_PROFILE_STOP(&pData->StatIOSBAppend_pf_wp, a); goto done; } /* else */ /* Whatever happened, we free the mbuf */ STAM_COUNTER_INC(&pData->StatIOSBAppend_wa); STAM_PROFILE_STOP(&pData->StatIOSBAppend_pf_wa, a); done: RTMemFree(buf); m_freem(pData, m); }