Exemplo n.º 1
0
/**
 * Default VMR3Wait() worker.
 *
 * @returns VBox status code.
 * @param   pUVMCPU            Pointer to the user mode VMCPU structure.
 */
static DECLCALLBACK(int) vmR3DefaultWait(PUVMCPU pUVCpu)
{
    ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);

    PVM    pVM   = pUVCpu->pVM;
    PVMCPU pVCpu = pUVCpu->pVCpu;
    int    rc    = VINF_SUCCESS;
    for (;;)
    {
        /*
         * Check Relevant FFs.
         */
        if (    VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
            ||  VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_EXTERNAL_SUSPENDED_MASK))
            break;

        /*
         * Wait for a while. Someone will wake us up or interrupt the call if
         * anything needs our attention.
         */
        rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, 1000);
        if (rc == VERR_TIMEOUT)
            rc = VINF_SUCCESS;
        else if (RT_FAILURE(rc))
        {
            rc = vmR3FatalWaitError(pUVCpu, "RTSemEventWait->%Rrc", rc);
            break;
        }
    }

    ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
    return rc;
}
Exemplo n.º 2
0
/**
 * The global 1 halt method - VMR3Wait() worker.
 *
 * @returns VBox status code.
 * @param   pUVCpu            Pointer to the user mode VMCPU structure.
 */
static DECLCALLBACK(int) vmR3HaltGlobal1Wait(PUVMCPU pUVCpu)
{
    ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);

    PVM    pVM   = pUVCpu->pUVM->pVM;
    PVMCPU pVCpu = VMMGetCpu(pVM);
    Assert(pVCpu->idCpu == pUVCpu->idCpu);

    int rc = VINF_SUCCESS;
    for (;;)
    {
        /*
         * Check Relevant FFs.
         */
        if (    VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
            ||  VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_EXTERNAL_SUSPENDED_MASK))
            break;

        /*
         * Wait for a while. Someone will wake us up or interrupt the call if
         * anything needs our attention.
         */
        rc = SUPR3CallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, VMMR0_DO_GVMM_SCHED_HALT, RTTimeNanoTS() + 1000000000 /* +1s */, NULL);
        if (rc == VERR_INTERRUPTED)
            rc = VINF_SUCCESS;
        else if (RT_FAILURE(rc))
        {
            rc = vmR3FatalWaitError(pUVCpu, "VMMR0_DO_GVMM_SCHED_HALT->%Rrc\n", rc);
            break;
        }
    }

    ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
    return rc;
}
Exemplo n.º 3
0
RTDECL(int) RTLocalIpcServerCancel(RTLOCALIPCSERVER hServer)
{
    /*
     * Validate input.
     */
    PRTLOCALIPCSERVERINT pThis = (PRTLOCALIPCSERVERINT)hServer;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertReturn(pThis->u32Magic == RTLOCALIPCSERVER_MAGIC, VERR_INVALID_MAGIC);

    /*
     * Enter the critical section, then set the cancellation flag
     * and signal the event (to wake up anyone in/at WaitForSingleObject).
     */
    int rc = RTCritSectEnter(&pThis->CritSect);
    if (RT_SUCCESS(rc))
    {
        ASMAtomicUoWriteBool(&pThis->fCancelled, true);
        BOOL fRc = SetEvent(pThis->hEvent);
        AssertMsg(fRc, ("%d\n", GetLastError())); NOREF(fRc);

        rc = RTCritSectLeave(&pThis->CritSect);
    }

    return rc;
}
Exemplo n.º 4
0
RTDECL(int) RTLocalIpcSessionClose(RTLOCALIPCSESSION hSession)
{
    /*
     * Validate input.
     */
    if (hSession == NIL_RTLOCALIPCSESSION)
        return VINF_SUCCESS;
    PRTLOCALIPCSESSIONINT pThis = (PRTLOCALIPCSESSIONINT)hSession;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertReturn(pThis->u32Magic == RTLOCALIPCSESSION_MAGIC, VERR_INVALID_MAGIC);

    /*
     * Cancel any thread currently busy using the session,
     * leaving the cleanup to it.
     */
    RTCritSectEnter(&pThis->CritSect);
    ASMAtomicUoWriteU32(&pThis->u32Magic, ~RTLOCALIPCSESSION_MAGIC);
    ASMAtomicUoWriteBool(&pThis->fCancelled, true);
    pThis->cRefs--;

    if (pThis->cRefs > 0)
    {
        BOOL fRc = SetEvent(pThis->hEvent);
        AssertMsg(fRc, ("%d\n", GetLastError())); NOREF(fRc);

        RTCritSectLeave(&pThis->CritSect);
    }
    else
        rtLocalIpcSessionWinDestroy(pThis);

    return VINF_SUCCESS;
}
Exemplo n.º 5
0
bool vboxNetFltOsMaybeRediscovered(PVBOXNETFLTINS pThis)
{
    struct ifnet *ifp, *ifp0;

    ifp = ASMAtomicUoReadPtrT(&pThis->u.s.ifp, struct ifnet *);
    VBOXCURVNET_SET(ifp->if_vnet);
    /*
     * Attempt to check if the interface is still there and re-initialize if
     * something has changed.
     */
    ifp0 = ifunit(pThis->szName);
    if (ifp != ifp0)
    {
        ASMAtomicUoWriteBool(&pThis->fDisconnectedFromHost, true);
        ng_rmnode_self(pThis->u.s.node);
        pThis->u.s.node = NULL;
    }

    if (ifp0 != NULL)
    {
        vboxNetFltOsDeleteInstance(pThis);
        vboxNetFltOsInitInstance(pThis, NULL);
    }
    VBOXCURVNET_RESTORE();

    return !ASMAtomicUoReadBool(&pThis->fDisconnectedFromHost);
}
/**
 * @interface_method_impl{PDMINETWORKUP,pfnBeginXmit}
 */
PDMBOTHCBDECL(int) drvDedicatedNicUp_BeginXmit(PPDMINETWORKUP pInterface, bool fOnWorkerThread)
{
    PDRVDEDICATEDNIC pThis = RT_FROM_MEMBER(pInterface, DRVDEDICATEDNIC, CTX_SUFF(INetworkUp));
    int rc = PDMCritSectTryEnter(&pThis->XmitLock);
    if (RT_SUCCESS(rc))
        ASMAtomicUoWriteBool(&pThis->fXmitOnXmitThread, fOnWorkerThread);
    return rc;
}
Exemplo n.º 7
0
int vboxNetFltOsInitInstance(PVBOXNETFLTINS pThis, void *pvContext)
{
    char nam[NG_NODESIZ];
    struct ifnet *ifp;
    node_p node;
    RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;

    VBOXCURVNET_SET_FROM_UCRED();
    NOREF(pvContext);
    ifp = ifunit(pThis->szName);
    if (ifp == NULL)
        return VERR_INTNET_FLT_IF_NOT_FOUND;

    /* Create a new netgraph node for this instance */
    if (ng_make_node_common(&ng_vboxnetflt_typestruct, &node) != 0)
        return VERR_INTERNAL_ERROR;

    RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);

    ASMAtomicUoWritePtr(&pThis->u.s.ifp, ifp);
    pThis->u.s.node = node;
    bcopy(IF_LLADDR(ifp), &pThis->u.s.MacAddr, ETHER_ADDR_LEN);
    ASMAtomicUoWriteBool(&pThis->fDisconnectedFromHost, false);

    /* Initialize deferred input queue */
    bzero(&pThis->u.s.inq, sizeof(struct ifqueue));
    mtx_init(&pThis->u.s.inq.ifq_mtx, "vboxnetflt inq", NULL, MTX_SPIN);
    TASK_INIT(&pThis->u.s.tskin, 0, vboxNetFltFreeBSDinput, pThis);

    /* Initialize deferred output queue */
    bzero(&pThis->u.s.outq, sizeof(struct ifqueue));
    mtx_init(&pThis->u.s.outq.ifq_mtx, "vboxnetflt outq", NULL, MTX_SPIN);
    TASK_INIT(&pThis->u.s.tskout, 0, vboxNetFltFreeBSDoutput, pThis);

    RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);

    NG_NODE_SET_PRIVATE(node, pThis);

    /* Attempt to name it vboxnetflt_<ifname> */
    snprintf(nam, NG_NODESIZ, "vboxnetflt_%s", pThis->szName);
    ng_name_node(node, nam);

    /* Report MAC address, promiscuous mode and GSO capabilities. */
    /** @todo keep these reports up to date, either by polling for changes or
     *        intercept some control flow if possible. */
    if (vboxNetFltTryRetainBusyNotDisconnected(pThis))
    {
        Assert(pThis->pSwitchPort);
        pThis->pSwitchPort->pfnReportMacAddress(pThis->pSwitchPort, &pThis->u.s.MacAddr);
        pThis->pSwitchPort->pfnReportPromiscuousMode(pThis->pSwitchPort, vboxNetFltFreeBsdIsPromiscuous(pThis));
        pThis->pSwitchPort->pfnReportGsoCapabilities(pThis->pSwitchPort, 0, INTNETTRUNKDIR_WIRE | INTNETTRUNKDIR_HOST);
        pThis->pSwitchPort->pfnReportNoPreemptDsts(pThis->pSwitchPort, 0 /* none */);
        vboxNetFltRelease(pThis, true /*fBusy*/);
    }
    VBOXCURVNET_RESTORE();

    return VINF_SUCCESS;
}
Exemplo n.º 8
0
/**
 * Signal a fatal wait error.
 *
 * @returns Fatal error code to be propagated up the call stack.
 * @param   pUVCpu              The user mode per CPU structure of the calling
 *                              EMT.
 * @param   pszFmt              The error format with a single %Rrc in it.
 * @param   rcFmt               The status code to format.
 */
static int vmR3FatalWaitError(PUVMCPU pUVCpu, const char *pszFmt, int rcFmt)
{
    /** @todo This is wrong ... raise a fatal error / guru meditation
     *        instead. */
    AssertLogRelMsgFailed((pszFmt, rcFmt));
    ASMAtomicUoWriteBool(&pUVCpu->pUVM->vm.s.fTerminateEMT, true);
    if (pUVCpu->pVM)
        VM_FF_SET(pUVCpu->pVM, VM_FF_CHECK_VM_STATE);
    return VERR_VM_FATAL_WAIT_ERROR;
}
Exemplo n.º 9
0
/**
 * Bootstrap VMR3Wait() worker.
 *
 * @returns VBox status code.
 * @param   pUVMCPU            Pointer to the user mode VMCPU structure.
 */
static DECLCALLBACK(int) vmR3BootstrapWait(PUVMCPU pUVCpu)
{
    PUVM pUVM = pUVCpu->pUVM;

    ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);

    int rc = VINF_SUCCESS;
    for (;;)
    {
        /*
         * Check Relevant FFs.
         */
        if (pUVM->vm.s.pNormalReqs   || pUVM->vm.s.pPriorityReqs)   /* global requests pending? */
            break;
        if (pUVCpu->vm.s.pNormalReqs || pUVCpu->vm.s.pPriorityReqs) /* local requests pending? */
            break;

        if (    pUVCpu->pVM
            &&  (   VM_FF_ISPENDING(pUVCpu->pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
                 || VMCPU_FF_ISPENDING(VMMGetCpu(pUVCpu->pVM), VMCPU_FF_EXTERNAL_SUSPENDED_MASK)
                )
            )
            break;
        if (pUVM->vm.s.fTerminateEMT)
            break;

        /*
         * Wait for a while. Someone will wake us up or interrupt the call if
         * anything needs our attention.
         */
        rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, 1000);
        if (rc == VERR_TIMEOUT)
            rc = VINF_SUCCESS;
        else if (RT_FAILURE(rc))
        {
            rc = vmR3FatalWaitError(pUVCpu, "RTSemEventWait->%Rrc\n", rc);
            break;
        }
    }

    ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
    return rc;
}
VBoxDbgConsole::backThread(RTTHREAD Thread, void *pvUser)
{
    VBoxDbgConsole *pThis = (VBoxDbgConsole *)pvUser;
    LogFlow(("backThread: Thread=%p pvUser=%p\n", (void *)Thread, pvUser));

    NOREF(Thread);

    /*
     * Create and execute the console.
     */
    int rc = pThis->dbgcCreate(&pThis->m_Back.Core, 0);

    ASMAtomicUoWriteBool(&pThis->m_fThreadTerminated, true);
    if (!ASMAtomicUoReadBool(&pThis->m_fTerminate))
        QApplication::postEvent(pThis, new VBoxDbgConsoleEvent(rc == VINF_SUCCESS
                                                               ? VBoxDbgConsoleEvent::kTerminatedUser
                                                               : VBoxDbgConsoleEvent::kTerminatedOther));
    LogFlow(("backThread: returns %Rrc (m_fTerminate=%RTbool)\n", rc, ASMAtomicUoReadBool(&pThis->m_fTerminate)));
    return rc;
}
Exemplo n.º 11
0
/**
 *
 * @see iff_detached_func in the darwin kpi.
 */
static void vboxNetFltDarwinIffDetached(void *pvThis, ifnet_t pIfNet)
{
    PVBOXNETFLTINS pThis = (PVBOXNETFLTINS)pvThis;
    uint64_t NanoTS = RTTimeSystemNanoTS();
    LogFlow(("vboxNetFltDarwinIffDetached: pThis=%p NanoTS=%RU64 (%d)\n",
             pThis, NanoTS, VALID_PTR(pIfNet) ? VBOX_GET_PCOUNT(pIfNet) :  -1));

    Assert(!pThis->fDisconnectedFromHost);
    Assert(!pThis->fRediscoveryPending);

    /*
     * If we've put it into promiscuous mode, undo that now. If we don't
     * the if_pcount will go all wrong when it's replugged.
     */
    if (ASMAtomicXchgBool(&pThis->u.s.fSetPromiscuous, false))
        ifnet_set_promiscuous(pIfNet, 0);

    /*
     * We carefully take the spinlock and increase the interface reference
     * behind it in order to avoid problematic races with the detached callback.
     */
    RTSpinlockAcquire(pThis->hSpinlock);

    pIfNet = ASMAtomicUoReadPtrT(&pThis->u.s.pIfNet, ifnet_t);
    int cPromisc = VALID_PTR(pIfNet) ? VBOX_GET_PCOUNT(pIfNet) : - 1;

    ASMAtomicUoWriteNullPtr(&pThis->u.s.pIfNet);
    ASMAtomicUoWriteNullPtr(&pThis->u.s.pIfFilter);
    ASMAtomicWriteBool(&pThis->u.s.fNeedSetPromiscuous, false);
    pThis->u.s.fSetPromiscuous = false;
    ASMAtomicUoWriteU64(&pThis->NanoTSLastRediscovery, NanoTS);
    ASMAtomicUoWriteBool(&pThis->fRediscoveryPending, false);
    ASMAtomicWriteBool(&pThis->fDisconnectedFromHost, true);

    RTSpinlockReleaseNoInts(pThis->hSpinlock);

    if (pIfNet)
        ifnet_release(pIfNet);
    LogRel(("VBoxNetFlt: was detached from '%s' (%d)\n", pThis->szName, cPromisc));
}
Exemplo n.º 12
0
/**
 * The global 1 halt method - Block in GMM (ring-0) and let it
 * try take care of the global scheduling of EMT threads.
 */
static DECLCALLBACK(int) vmR3HaltGlobal1Halt(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t u64Now)
{
    PUVM    pUVM  = pUVCpu->pUVM;
    PVMCPU  pVCpu = pUVCpu->pVCpu;
    PVM     pVM   = pUVCpu->pVM;
    Assert(VMMGetCpu(pVM) == pVCpu);
    NOREF(u64Now);

    /*
     * Halt loop.
     */
    //uint64_t u64NowLog, u64Start;
    //u64Start = u64NowLog = RTTimeNanoTS();
    int rc = VINF_SUCCESS;
    ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
    unsigned cLoops = 0;
    for (;; cLoops++)
    {
        /*
         * Work the timers and check if we can exit.
         */
        uint64_t const u64StartTimers   = RTTimeNanoTS();
        TMR3TimerQueuesDo(pVM);
        uint64_t const cNsElapsedTimers = RTTimeNanoTS() - u64StartTimers;
        STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltTimers, cNsElapsedTimers);
        if (    VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
            ||  VMCPU_FF_ISPENDING(pVCpu, fMask))
            break;

        /*
         * Estimate time left to the next event.
         */
        //u64NowLog = RTTimeNanoTS();
        uint64_t u64Delta;
        uint64_t u64GipTime = TMTimerPollGIP(pVM, pVCpu, &u64Delta);
        if (    VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
            ||  VMCPU_FF_ISPENDING(pVCpu, fMask))
            break;

        /*
         * Block if we're not spinning and the interval isn't all that small.
         */
        if (u64Delta >= pUVM->vm.s.Halt.Global1.cNsSpinBlockThresholdCfg)
        {
            VMMR3YieldStop(pVM);
            if (    VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
                ||  VMCPU_FF_ISPENDING(pVCpu, fMask))
                break;

            //RTLogPrintf("loop=%-3d  u64GipTime=%'llu / %'llu   now=%'llu / %'llu\n", cLoops, u64GipTime, u64Delta, u64NowLog, u64GipTime - u64NowLog);
            uint64_t const u64StartSchedHalt   = RTTimeNanoTS();
            rc = SUPR3CallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, VMMR0_DO_GVMM_SCHED_HALT, u64GipTime, NULL);
            uint64_t const u64EndSchedHalt     = RTTimeNanoTS();
            uint64_t const cNsElapsedSchedHalt = u64EndSchedHalt - u64StartSchedHalt;
            STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlock, cNsElapsedSchedHalt);

            if (rc == VERR_INTERRUPTED)
                rc = VINF_SUCCESS;
            else if (RT_FAILURE(rc))
            {
                rc = vmR3FatalWaitError(pUVCpu, "VMMR0_DO_GVMM_SCHED_HALT->%Rrc\n", rc);
                break;
            }
            else
            {
                int64_t const cNsOverslept = u64EndSchedHalt - u64GipTime;
                if (cNsOverslept > 50000)
                    STAM_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlockOverslept, cNsOverslept);
                else if (cNsOverslept < -50000)
                    STAM_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlockInsomnia,  cNsElapsedSchedHalt);
                else
                    STAM_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlockOnTime,    cNsElapsedSchedHalt);
            }
        }
        /*
         * When spinning call upon the GVMM and do some wakups once
         * in a while, it's not like we're actually busy or anything.
         */
        else if (!(cLoops & 0x1fff))
        {
            uint64_t const u64StartSchedYield   = RTTimeNanoTS();
            rc = SUPR3CallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, VMMR0_DO_GVMM_SCHED_POLL, false /* don't yield */, NULL);
            uint64_t const cNsElapsedSchedYield = RTTimeNanoTS() - u64StartSchedYield;
            STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltYield, cNsElapsedSchedYield);
        }
    }
    //RTLogPrintf("*** %u loops %'llu;  lag=%RU64\n", cLoops, u64NowLog - u64Start, TMVirtualSyncGetLag(pVM));

    ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
    return rc;
}
Exemplo n.º 13
0
/**
 * Method 1 - Block whenever possible, and when lagging behind
 * switch to spinning for 10-30ms with occasional blocking until
 * the lag has been eliminated.
 */
static DECLCALLBACK(int) vmR3HaltMethod1Halt(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t u64Now)
{
    PUVM    pUVM    = pUVCpu->pUVM;
    PVMCPU  pVCpu   = pUVCpu->pVCpu;
    PVM     pVM     = pUVCpu->pVM;

    /*
     * To simplify things, we decide up-front whether we should switch to spinning or
     * not. This makes some ASSUMPTIONS about the cause of the spinning (PIT/RTC/PCNet)
     * and that it will generate interrupts or other events that will cause us to exit
     * the halt loop.
     */
    bool fBlockOnce = false;
    bool fSpinning = false;
    uint32_t u32CatchUpPct = TMVirtualSyncGetCatchUpPct(pVM);
    if (u32CatchUpPct /* non-zero if catching up */)
    {
        if (pUVCpu->vm.s.Halt.Method12.u64StartSpinTS)
        {
            fSpinning = TMVirtualSyncGetLag(pVM) >= pUVM->vm.s.Halt.Method12.u32StopSpinningCfg;
            if (fSpinning)
            {
                uint64_t u64Lag = TMVirtualSyncGetLag(pVM);
                fBlockOnce = u64Now - pUVCpu->vm.s.Halt.Method12.u64LastBlockTS
                           > RT_MAX(pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg,
                                    RT_MIN(u64Lag / pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg,
                                           pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg));
            }
            else
            {
                //RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pUVCpu->vm.s.Halt.Method12.u64StartSpinTS) / 1000000);
                pUVCpu->vm.s.Halt.Method12.u64StartSpinTS = 0;
            }
        }
        else
        {
            fSpinning = TMVirtualSyncGetLag(pVM) >= pUVM->vm.s.Halt.Method12.u32StartSpinningCfg;
            if (fSpinning)
                pUVCpu->vm.s.Halt.Method12.u64StartSpinTS = u64Now;
        }
    }
    else if (pUVCpu->vm.s.Halt.Method12.u64StartSpinTS)
    {
        //RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pUVCpu->vm.s.Halt.Method12.u64StartSpinTS) / 1000000);
        pUVCpu->vm.s.Halt.Method12.u64StartSpinTS = 0;
    }

    /*
     * Halt loop.
     */
    int rc = VINF_SUCCESS;
    ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
    unsigned cLoops = 0;
    for (;; cLoops++)
    {
        /*
         * Work the timers and check if we can exit.
         */
        uint64_t const u64StartTimers   = RTTimeNanoTS();
        TMR3TimerQueuesDo(pVM);
        uint64_t const cNsElapsedTimers = RTTimeNanoTS() - u64StartTimers;
        STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltTimers, cNsElapsedTimers);
        if (    VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
            ||  VMCPU_FF_ISPENDING(pVCpu, fMask))
            break;

        /*
         * Estimate time left to the next event.
         */
        uint64_t u64NanoTS;
        TMTimerPollGIP(pVM, pVCpu, &u64NanoTS);
        if (    VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
            ||  VMCPU_FF_ISPENDING(pVCpu, fMask))
            break;

        /*
         * Block if we're not spinning and the interval isn't all that small.
         */
        if (    (   !fSpinning
                 || fBlockOnce)
#if 1 /* DEBUGGING STUFF - REMOVE LATER */
            &&  u64NanoTS >= 100000) /* 0.100 ms */
#else
            &&  u64NanoTS >= 250000) /* 0.250 ms */
#endif
        {
            const uint64_t Start = pUVCpu->vm.s.Halt.Method12.u64LastBlockTS = RTTimeNanoTS();
            VMMR3YieldStop(pVM);

            uint32_t cMilliSecs = RT_MIN(u64NanoTS / 1000000, 15);
            if (cMilliSecs <= pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg)
                cMilliSecs = 1;
            else
                cMilliSecs -= pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg;

            //RTLogRelPrintf("u64NanoTS=%RI64 cLoops=%3d sleep %02dms (%7RU64) ", u64NanoTS, cLoops, cMilliSecs, u64NanoTS);
            uint64_t const u64StartSchedHalt   = RTTimeNanoTS();
            rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, cMilliSecs);
            uint64_t const cNsElapsedSchedHalt = RTTimeNanoTS() - u64StartSchedHalt;
            STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlock, cNsElapsedSchedHalt);

            if (rc == VERR_TIMEOUT)
                rc = VINF_SUCCESS;
            else if (RT_FAILURE(rc))
            {
                rc = vmR3FatalWaitError(pUVCpu, "RTSemEventWait->%Rrc\n", rc);
                break;
            }

            /*
             * Calc the statistics.
             * Update averages every 16th time, and flush parts of the history every 64th time.
             */
            const uint64_t Elapsed = RTTimeNanoTS() - Start;
            pUVCpu->vm.s.Halt.Method12.cNSBlocked += Elapsed;
            if (Elapsed > u64NanoTS)
                pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLong += Elapsed - u64NanoTS;
            pUVCpu->vm.s.Halt.Method12.cBlocks++;
            if (!(pUVCpu->vm.s.Halt.Method12.cBlocks & 0xf))
            {
                pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg = pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLong / pUVCpu->vm.s.Halt.Method12.cBlocks;
                if (!(pUVCpu->vm.s.Halt.Method12.cBlocks & 0x3f))
                {
                    pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLong = pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg * 0x40;
                    pUVCpu->vm.s.Halt.Method12.cBlocks = 0x40;
                }
            }
            //RTLogRelPrintf(" -> %7RU64 ns / %7RI64 ns delta%s\n", Elapsed, Elapsed - u64NanoTS, fBlockOnce ? " (block once)" : "");

            /*
             * Clear the block once flag if we actually blocked.
             */
            if (    fBlockOnce
                &&  Elapsed > 100000 /* 0.1 ms */)
                fBlockOnce = false;
        }
    }
    //if (fSpinning) RTLogRelPrintf("spun for %RU64 ns %u loops; lag=%RU64 pct=%d\n", RTTimeNanoTS() - u64Now, cLoops, TMVirtualSyncGetLag(pVM), u32CatchUpPct);

    ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
    return rc;
}
Exemplo n.º 14
0
/**
 * The old halt loop.
 */
static DECLCALLBACK(int) vmR3HaltOldDoHalt(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t /* u64Now*/)
{
    /*
     * Halt loop.
     */
    PVM    pVM   = pUVCpu->pVM;
    PVMCPU pVCpu = pUVCpu->pVCpu;

    int rc = VINF_SUCCESS;
    ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
    //unsigned cLoops = 0;
    for (;;)
    {
        /*
         * Work the timers and check if we can exit.
         * The poll call gives us the ticks left to the next event in
         * addition to perhaps set an FF.
         */
        uint64_t const u64StartTimers   = RTTimeNanoTS();
        TMR3TimerQueuesDo(pVM);
        uint64_t const cNsElapsedTimers = RTTimeNanoTS() - u64StartTimers;
        STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltTimers, cNsElapsedTimers);
        if (    VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
            ||  VMCPU_FF_ISPENDING(pVCpu, fMask))
            break;
        uint64_t u64NanoTS;
        TMTimerPollGIP(pVM, pVCpu, &u64NanoTS);
        if (    VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
            ||  VMCPU_FF_ISPENDING(pVCpu, fMask))
            break;

        /*
         * Wait for a while. Someone will wake us up or interrupt the call if
         * anything needs our attention.
         */
        if (u64NanoTS < 50000)
        {
            //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d spin\n", u64NanoTS, cLoops++);
            /* spin */;
        }
        else
        {
            VMMR3YieldStop(pVM);
            //uint64_t u64Start = RTTimeNanoTS();
            if (u64NanoTS <  870000) /* this is a bit speculative... works fine on linux. */
            {
                //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d yield", u64NanoTS, cLoops++);
                uint64_t const u64StartSchedYield   = RTTimeNanoTS();
                RTThreadYield(); /* this is the best we can do here */
                uint64_t const cNsElapsedSchedYield = RTTimeNanoTS() - u64StartSchedYield;
                STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltYield, cNsElapsedSchedYield);
            }
            else if (u64NanoTS < 2000000)
            {
                //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep 1ms", u64NanoTS, cLoops++);
                uint64_t const u64StartSchedHalt   = RTTimeNanoTS();
                rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, 1);
                uint64_t const cNsElapsedSchedHalt = RTTimeNanoTS() - u64StartSchedHalt;
                STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlock, cNsElapsedSchedHalt);
            }
            else
            {
                //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep %dms", u64NanoTS, cLoops++, (uint32_t)RT_MIN((u64NanoTS - 500000) / 1000000, 15));
                uint64_t const u64StartSchedHalt   = RTTimeNanoTS();
                rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, RT_MIN((u64NanoTS - 1000000) / 1000000, 15));
                uint64_t const cNsElapsedSchedHalt = RTTimeNanoTS() - u64StartSchedHalt;
                STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlock, cNsElapsedSchedHalt);
            }
            //uint64_t u64Slept = RTTimeNanoTS() - u64Start;
            //RTLogPrintf(" -> rc=%Rrc in %RU64 ns / %RI64 ns delta\n", rc, u64Slept, u64NanoTS - u64Slept);
        }
        if (rc == VERR_TIMEOUT)
            rc = VINF_SUCCESS;
        else if (RT_FAILURE(rc))
        {
            rc = vmR3FatalWaitError(pUVCpu, "RTSemEventWait->%Rrc\n", rc);
            break;
        }
    }

    ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
    return rc;
}
/**
 * @interface_method_impl{PDMINETWORKUP,pfnEndXmit}
 */
PDMBOTHCBDECL(void) drvDedicatedNicUp_EndXmit(PPDMINETWORKUP pInterface)
{
    PDRVDEDICATEDNIC pThis = RT_FROM_MEMBER(pInterface, DRVDEDICATEDNIC, CTX_SUFF(INetworkUp));
    ASMAtomicUoWriteBool(&pThis->fXmitOnXmitThread, false);
    PDMCritSectLeave(&pThis->XmitLock);
}
Exemplo n.º 16
0
/**
 * Internal worker for vboxNetFltOsInitInstance and vboxNetFltOsMaybeRediscovered.
 *
 * @returns VBox status code.
 * @param   pThis           The instance.
 * @param   fRediscovery    If set we're doing a rediscovery attempt, so, don't
 *                          flood the release log.
 */
static int vboxNetFltDarwinAttachToInterface(PVBOXNETFLTINS pThis, bool fRediscovery)
{
    LogFlow(("vboxNetFltDarwinAttachToInterface: pThis=%p (%s)\n", pThis, pThis->szName));

    /*
     * Locate the interface first.
     *
     * The pIfNet member is updated before iflt_attach is called and used
     * to deal with the hypothetical case where someone rips out the
     * interface immediately after our iflt_attach call.
     */
    ifnet_t pIfNet = NULL;
    errno_t err = ifnet_find_by_name(pThis->szName, &pIfNet);
    if (err)
    {
        Assert(err == ENXIO);
        if (!fRediscovery)
            LogRel(("VBoxFltDrv: failed to find ifnet '%s' (err=%d)\n", pThis->szName, err));
        else
            Log(("VBoxFltDrv: failed to find ifnet '%s' (err=%d)\n", pThis->szName, err));
        return VERR_INTNET_FLT_IF_NOT_FOUND;
    }

    RTSpinlockAcquire(pThis->hSpinlock);
    ASMAtomicUoWritePtr(&pThis->u.s.pIfNet, pIfNet);
    RTSpinlockReleaseNoInts(pThis->hSpinlock);

    /*
     * Get the mac address while we still have a valid ifnet reference.
     */
    err = ifnet_lladdr_copy_bytes(pIfNet, &pThis->u.s.MacAddr, sizeof(pThis->u.s.MacAddr));
    if (!err)
    {
        /*
         * Try attach the filter.
         */
        struct iff_filter RegRec;
        RegRec.iff_cookie   = pThis;
        RegRec.iff_name     = "VBoxNetFlt";
        RegRec.iff_protocol = 0;
        RegRec.iff_input    = vboxNetFltDarwinIffInput;
        RegRec.iff_output   = vboxNetFltDarwinIffOutput;
        RegRec.iff_event    = vboxNetFltDarwinIffEvent;
        RegRec.iff_ioctl    = vboxNetFltDarwinIffIoCtl;
        RegRec.iff_detached = vboxNetFltDarwinIffDetached;
        interface_filter_t pIfFilter = NULL;
        err = iflt_attach(pIfNet, &RegRec, &pIfFilter);
        Assert(err || pIfFilter);

        RTSpinlockAcquire(pThis->hSpinlock);
        pIfNet = ASMAtomicUoReadPtrT(&pThis->u.s.pIfNet, ifnet_t);
        if (pIfNet && !err)
        {
            ASMAtomicUoWriteBool(&pThis->fDisconnectedFromHost, false);
            ASMAtomicUoWritePtr(&pThis->u.s.pIfFilter, pIfFilter);
            pIfNet = NULL; /* don't dereference it */
        }
        RTSpinlockReleaseNoInts(pThis->hSpinlock);

        /* Report capabilities. */
        if (   !pIfNet
            && vboxNetFltTryRetainBusyNotDisconnected(pThis))
        {
            Assert(pThis->pSwitchPort);
            pThis->pSwitchPort->pfnReportMacAddress(pThis->pSwitchPort, &pThis->u.s.MacAddr);
            pThis->pSwitchPort->pfnReportPromiscuousMode(pThis->pSwitchPort, vboxNetFltDarwinIsPromiscuous(pThis));
            pThis->pSwitchPort->pfnReportGsoCapabilities(pThis->pSwitchPort, 0,  INTNETTRUNKDIR_WIRE | INTNETTRUNKDIR_HOST);
            pThis->pSwitchPort->pfnReportNoPreemptDsts(pThis->pSwitchPort, 0 /* none */);
            vboxNetFltRelease(pThis, true /*fBusy*/);
        }
    }

    /* Release the interface on failure. */
    if (pIfNet)
        ifnet_release(pIfNet);

    int rc = RTErrConvertFromErrno(err);
    if (RT_SUCCESS(rc))
        LogRel(("VBoxFltDrv: attached to '%s' / %.*Rhxs\n", pThis->szName, sizeof(pThis->u.s.MacAddr), &pThis->u.s.MacAddr));
    else
        LogRel(("VBoxFltDrv: failed to attach to ifnet '%s' (err=%d)\n", pThis->szName, err));
    return rc;
}