Ejemplo n.º 1
0
/**
 * @interface_method_impl{PDMINETWORKUP,pfnAllocBuf}
 */
PDMBOTHCBDECL(int) drvNetShaperUp_AllocBuf(PPDMINETWORKUP pInterface, size_t cbMin,
                                                  PCPDMNETWORKGSO pGso, PPPDMSCATTERGATHER ppSgBuf)
{
    PDRVNETSHAPER pThis = RT_FROM_MEMBER(pInterface, DRVNETSHAPER, CTX_SUFF(INetworkUp));
    if (RT_UNLIKELY(!pThis->CTX_SUFF(pIBelowNet)))
        return VERR_NET_DOWN;
    //LogFlow(("drvNetShaperUp_AllocBuf: cb=%d\n", cbMin));
    STAM_REL_COUNTER_ADD(&pThis->StatXmitBytesRequested, cbMin);
    STAM_REL_COUNTER_INC(&pThis->StatXmitPktsRequested);
#ifdef IN_RING3
    if (!PDMR3NsAllocateBandwidth(&pThis->Filter, cbMin))
    {
        STAM_REL_COUNTER_ADD(&pThis->StatXmitBytesDenied, cbMin);
        STAM_REL_COUNTER_INC(&pThis->StatXmitPktsDenied);
        return VERR_TRY_AGAIN;
    }
#endif
#ifdef IN_RING0
    if (!PDMR0NsAllocateBandwidth(&pThis->Filter, cbMin))
    {
        STAM_REL_COUNTER_ADD(&pThis->StatXmitBytesDenied, cbMin);
        STAM_REL_COUNTER_INC(&pThis->StatXmitPktsDenied);
        return VERR_TRY_AGAIN;
    }
#endif
    STAM_REL_COUNTER_ADD(&pThis->StatXmitBytesGranted, cbMin);
    STAM_REL_COUNTER_INC(&pThis->StatXmitPktsGranted);
    //LogFlow(("drvNetShaperUp_AllocBuf: got cb=%d\n", cbMin));
    return pThis->CTX_SUFF(pIBelowNet)->pfnAllocBuf(pThis->CTX_SUFF(pIBelowNet), cbMin, pGso, ppSgBuf);
}
Ejemplo n.º 2
0
/**
 * Common worker for the debug and normal APIs.
 *
 * @retval  VINF_SUCCESS on success.
 * @retval  VERR_SEM_BUSY if the critsect was owned.
 * @retval  VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
 * @retval  VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
 *
 * @param   pCritSect   The critical section.
 */
static int pdmCritSectTryEnter(PPDMCRITSECT pCritSect, PCRTLOCKVALSRCPOS pSrcPos)
{
    /*
     * If the critical section has already been destroyed, then inform the caller.
     */
    AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
                    ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
                    VERR_SEM_DESTROYED);

    /*
     * See if we're lucky.
     */
    /* NOP ... */
    if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
        return VINF_SUCCESS;

    RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
    /* ... not owned ... */
    if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
        return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);

    /* ... or nested. */
    if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
    {
        ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
        ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
        Assert(pCritSect->s.Core.cNestings > 1);
        return VINF_SUCCESS;
    }

    /* no spinning */

    /*
     * Return busy.
     */
#ifdef IN_RING3
    STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
#else
    STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
#endif
    LogFlow(("PDMCritSectTryEnter: locked\n"));
    return VERR_SEM_BUSY;
}
Ejemplo n.º 3
0
/**
 * Allocate an item from a queue.
 * The allocated item must be handed on to PDMR3QueueInsert() after the
 * data have been filled in.
 *
 * @returns Pointer to allocated queue item.
 * @returns NULL on failure. The queue is exhausted.
 * @param   pQueue      The queue handle.
 * @thread  Any thread.
 */
VMMDECL(PPDMQUEUEITEMCORE) PDMQueueAlloc(PPDMQUEUE pQueue)
{
    Assert(VALID_PTR(pQueue) && pQueue->CTX_SUFF(pVM));
    PPDMQUEUEITEMCORE pNew;
    uint32_t iNext;
    uint32_t i;
    do
    {
        i = pQueue->iFreeTail;
        if (i == pQueue->iFreeHead)
        {
            STAM_REL_COUNTER_INC(&pQueue->StatAllocFailures);
            return NULL;
        }
        pNew = pQueue->aFreeItems[i].CTX_SUFF(pItem);
        iNext = (i + 1) % (pQueue->cItems + PDMQUEUE_FREE_SLACK);
    } while (!ASMAtomicCmpXchgU32(&pQueue->iFreeTail, iNext, i));
    return pNew;
}
Ejemplo n.º 4
0
/**
 * Queue an item.
 * The item must have been obtained using PDMQueueAlloc(). Once the item
 * have been passed to this function it must not be touched!
 *
 * @param   pQueue      The queue handle.
 * @param   pItem       The item to insert.
 * @thread  Any thread.
 */
VMMDECL(void) PDMQueueInsert(PPDMQUEUE pQueue, PPDMQUEUEITEMCORE pItem)
{
    Assert(VALID_PTR(pQueue) && pQueue->CTX_SUFF(pVM));
    Assert(VALID_PTR(pItem));

#if 0 /* the paranoid android version: */
    void *pvNext;
    do
    {
        pvNext = ASMAtomicUoReadPtr((void * volatile *)&pQueue->CTX_SUFF(pPending));
        ASMAtomicUoWritePtr((void * volatile *)&pItem->CTX_SUFF(pNext), pvNext);
    } while (!ASMAtomicCmpXchgPtr(&pQueue->CTX_SUFF(pPending), pItem, pvNext));
#else
    PPDMQUEUEITEMCORE pNext;
    do
    {
        pNext = pQueue->CTX_SUFF(pPending);
        pItem->CTX_SUFF(pNext) = pNext;
    } while (!ASMAtomicCmpXchgPtr(&pQueue->CTX_SUFF(pPending), pItem, pNext));
#endif

    if (!pQueue->pTimer)
    {
        PVM pVM = pQueue->CTX_SUFF(pVM);
        Log2(("PDMQueueInsert: VM_FF_PDM_QUEUES %d -> 1\n", VM_FF_ISSET(pVM, VM_FF_PDM_QUEUES)));
        VM_FF_SET(pVM, VM_FF_PDM_QUEUES);
        ASMAtomicBitSet(&pVM->pdm.s.fQueueFlushing, PDM_QUEUE_FLUSH_FLAG_PENDING_BIT);
#ifdef IN_RING3
# ifdef VBOX_WITH_REM
        REMR3NotifyQueuePending(pVM); /** @todo r=bird: we can remove REMR3NotifyQueuePending and let VMR3NotifyFF do the work. */
# endif
        VMR3NotifyGlobalFFU(pVM->pUVM, VMNOTIFYFF_FLAGS_DONE_REM);
#endif
    }
    STAM_REL_COUNTER_INC(&pQueue->StatInsert);
    STAM_STATS({ ASMAtomicIncU32(&pQueue->cStatPending); });
Ejemplo n.º 5
0
/**
 * Leaves a critical section entered with PDMCritSectEnter().
 *
 * @param   pCritSect           The PDM critical section to leave.
 */
VMMDECL(void) PDMCritSectLeave(PPDMCRITSECT pCritSect)
{
    AssertMsg(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic));
    Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC);

    /* Check for NOP sections before asserting ownership. */
    if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
        return;

    /*
     * Always check that the caller is the owner (screw performance).
     */
    RTNATIVETHREAD const hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
    AssertReleaseMsgReturnVoid(pCritSect->s.Core.NativeThreadOwner == hNativeSelf,
                               ("%p %s: %p != %p; cLockers=%d cNestings=%d\n", pCritSect, R3STRING(pCritSect->s.pszName),
                                pCritSect->s.Core.NativeThreadOwner, hNativeSelf,
                                pCritSect->s.Core.cLockers, pCritSect->s.Core.cNestings));
    Assert(pCritSect->s.Core.cNestings >= 1);

    /*
     * Nested leave.
     */
    if (pCritSect->s.Core.cNestings > 1)
    {
        ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
        Assert(pCritSect->s.Core.cNestings >= 1);
        ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
        Assert(pCritSect->s.Core.cLockers >= 0);
        return;
    }

#ifdef IN_RING0
# if 0 /** @todo Make SUPSemEventSignal interrupt safe (handle table++) and enable this for: defined(RT_OS_LINUX) || defined(RT_OS_OS2) */
    if (1) /* SUPSemEventSignal is safe */
# else
    if (ASMIntAreEnabled())
# endif
#endif
#if defined(IN_RING3) || defined(IN_RING0)
    {
        /*
         * Leave for real.
         */
        /* update members. */
# ifdef IN_RING3
        RTSEMEVENT hEventToSignal    = pCritSect->s.EventToSignal;
        pCritSect->s.EventToSignal   = NIL_RTSEMEVENT;
#  if defined(PDMCRITSECT_STRICT)
        if (pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
            RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
#  endif
        Assert(!pCritSect->s.Core.pValidatorRec || pCritSect->s.Core.pValidatorRec->hThread == NIL_RTTHREAD);
# endif
        ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
        ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
        ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
        Assert(pCritSect->s.Core.cNestings == 0);

        /* stop and decrement lockers. */
        STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
        ASMCompilerBarrier();
        if (ASMAtomicDecS32(&pCritSect->s.Core.cLockers) >= 0)
        {
            /* Someone is waiting, wake up one of them. */
            SUPSEMEVENT     hEvent   = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
            PSUPDRVSESSION  pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
            int rc = SUPSemEventSignal(pSession, hEvent);
            AssertRC(rc);
        }

# ifdef IN_RING3
        /* Signal exit event. */
        if (hEventToSignal != NIL_RTSEMEVENT)
        {
            LogBird(("Signalling %#x\n", hEventToSignal));
            int rc = RTSemEventSignal(hEventToSignal);
            AssertRC(rc);
        }
# endif

# if defined(DEBUG_bird) && defined(IN_RING0)
        VMMTrashVolatileXMMRegs();
# endif
    }
#endif  /* IN_RING3 || IN_RING0 */
#ifdef IN_RING0
    else
#endif
#if defined(IN_RING0) || defined(IN_RC)
    {
        /*
         * Try leave it.
         */
        if (pCritSect->s.Core.cLockers == 0)
        {
            ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
            RTNATIVETHREAD hNativeThread = pCritSect->s.Core.NativeThreadOwner;
            ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
            STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);

            ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
            if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
                return;

            /* darn, someone raced in on us. */
            ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeThread);
            STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
            Assert(pCritSect->s.Core.cNestings == 0);
            ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
        }
        ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK);

        /*
         * Queue the request.
         */
        PVM         pVM   = pCritSect->s.CTX_SUFF(pVM);     AssertPtr(pVM);
        PVMCPU      pVCpu = VMMGetCpu(pVM);                 AssertPtr(pVCpu);
        uint32_t    i     = pVCpu->pdm.s.cQueuedCritSectLeaves++;
        LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
        AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectsLeaves));
        pVCpu->pdm.s.apQueuedCritSectsLeaves[i] = MMHyperCCToR3(pVM, pCritSect);
        VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
        VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
        STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
        STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
    }
#endif /* IN_RING0 || IN_RC */
}
Ejemplo n.º 6
0
/**
 * Common worker for the debug and normal APIs.
 *
 * @returns VINF_SUCCESS if entered successfully.
 * @returns rcBusy when encountering a busy critical section in GC/R0.
 * @returns VERR_SEM_DESTROYED if the critical section is dead.
 *
 * @param   pCritSect           The PDM critical section to enter.
 * @param   rcBusy              The status code to return when we're in GC or R0
 *                              and the section is busy.
 */
DECL_FORCE_INLINE(int) pdmCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy, PCRTLOCKVALSRCPOS pSrcPos)
{
    Assert(pCritSect->s.Core.cNestings < 8);  /* useful to catch incorrect locking */
    Assert(pCritSect->s.Core.cNestings >= 0);

    /*
     * If the critical section has already been destroyed, then inform the caller.
     */
    AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
                    ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
                    VERR_SEM_DESTROYED);

    /*
     * See if we're lucky.
     */
    /* NOP ... */
    if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
        return VINF_SUCCESS;

    RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
    /* ... not owned ... */
    if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
        return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);

    /* ... or nested. */
    if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
    {
        ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
        ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
        Assert(pCritSect->s.Core.cNestings > 1);
        return VINF_SUCCESS;
    }

    /*
     * Spin for a bit without incrementing the counter.
     */
    /** @todo Move this to cfgm variables since it doesn't make sense to spin on UNI
     *        cpu systems. */
    int32_t cSpinsLeft = CTX_SUFF(PDMCRITSECT_SPIN_COUNT_);
    while (cSpinsLeft-- > 0)
    {
        if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
            return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
        ASMNopPause();
        /** @todo Should use monitor/mwait on e.g. &cLockers here, possibly with a
           cli'ed pendingpreemption check up front using sti w/ instruction fusing
           for avoiding races. Hmm ... This is assuming the other party is actually
           executing code on another CPU ... which we could keep track of if we
           wanted. */
    }

#ifdef IN_RING3
    /*
     * Take the slow path.
     */
    return pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);

#else
# ifdef IN_RING0
    /** @todo If preemption is disabled it means we're in VT-x/AMD-V context
     *        and would be better off switching out of that while waiting for
     *        the lock.  Several of the locks jumps back to ring-3 just to
     *        get the lock, the ring-3 code will then call the kernel to do
     *        the lock wait and when the call return it will call ring-0
     *        again and resume via in setjmp style.  Not very efficient. */
#  if 0
    if (ASMIntAreEnabled()) /** @todo this can be handled as well by changing
                             * callers not prepared for longjmp/blocking to
                             * use PDMCritSectTryEnter. */
    {
        /*
         * Leave HWACCM context while waiting if necessary.
         */
        int rc;
        if (RTThreadPreemptIsEnabled(NIL_RTTHREAD))
        {
            STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock,    1000000);
            rc = pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
        }
        else
        {
            STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock, 1000000000);
            PVM     pVM   = pCritSect->s.CTX_SUFF(pVM);
            PVMCPU  pVCpu = VMMGetCpu(pVM);
            HWACCMR0Leave(pVM, pVCpu);
            RTThreadPreemptRestore(NIL_RTTHREAD, ????);

            rc = pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);

            RTThreadPreemptDisable(NIL_RTTHREAD, ????);
            HWACCMR0Enter(pVM, pVCpu);
        }
        return rc;
    }
#  else
    /*
     * We preemption hasn't been disabled, we can block here in ring-0.
     */
    if (   RTThreadPreemptIsEnabled(NIL_RTTHREAD)
        && ASMIntAreEnabled())
        return pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
#  endif
#endif /* IN_RING0 */

    STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);

    /*
     * Call ring-3 to acquire the critical section?
     */
    if (rcBusy == VINF_SUCCESS)
    {
        PVM     pVM   = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
        PVMCPU  pVCpu = VMMGetCpu(pVM);             AssertPtr(pVCpu);
        return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_ENTER, MMHyperCCToR3(pVM, pCritSect));
    }

    /*
     * Return busy.
     */
    LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
    return rcBusy;
#endif /* !IN_RING3 */
}
Ejemplo n.º 7
0
/**
 * @interface_method_impl{PDMINETWORKDOWN,pfnXmitPending}
 */
static DECLCALLBACK(void) drvR3NetShaperDown_XmitPending(PPDMINETWORKDOWN pInterface)
{
    PDRVNETSHAPER pThis = RT_FROM_MEMBER(pInterface, DRVNETSHAPER, INetworkDown);
    STAM_REL_COUNTER_INC(&pThis->StatXmitPendingCalled);
    pThis->pIAboveNet->pfnXmitPending(pThis->pIAboveNet);
}
Ejemplo n.º 8
0
/**
 * Process pending items in one queue.
 *
 * @returns Success indicator.
 *          If false the item the consumer said "enough!".
 * @param   pQueue  The queue.
 */
static bool pdmR3QueueFlush(PPDMQUEUE pQueue)
{
    STAM_PROFILE_START(&pQueue->StatFlushPrf,p);

    /*
     * Get the lists.
     */
    PPDMQUEUEITEMCORE pItems   = ASMAtomicXchgPtrT(&pQueue->pPendingR3, NULL, PPDMQUEUEITEMCORE);
    RTRCPTR           pItemsRC = ASMAtomicXchgRCPtr(&pQueue->pPendingRC, NIL_RTRCPTR);
    RTR0PTR           pItemsR0 = ASMAtomicXchgR0Ptr(&pQueue->pPendingR0, NIL_RTR0PTR);

    AssertMsgReturn(   pItemsR0
                       || pItemsRC
                       || pItems,
                       ("Someone is racing us? This shouldn't happen!\n"),
                       true);

    /*
     * Reverse the list (it's inserted in LIFO order to avoid semaphores, remember).
     */
    PPDMQUEUEITEMCORE pCur = pItems;
    pItems = NULL;
    while (pCur)
    {
        PPDMQUEUEITEMCORE pInsert = pCur;
        pCur = pCur->pNextR3;
        pInsert->pNextR3 = pItems;
        pItems = pInsert;
    }

    /*
     * Do the same for any pending RC items.
     */
    while (pItemsRC)
    {
        PPDMQUEUEITEMCORE pInsert = (PPDMQUEUEITEMCORE)MMHyperRCToR3(pQueue->pVMR3, pItemsRC);
        pItemsRC = pInsert->pNextRC;
        pInsert->pNextRC = NIL_RTRCPTR;
        pInsert->pNextR3 = pItems;
        pItems = pInsert;
    }

    /*
     * Do the same for any pending R0 items.
     */
    while (pItemsR0)
    {
        PPDMQUEUEITEMCORE pInsert = (PPDMQUEUEITEMCORE)MMHyperR0ToR3(pQueue->pVMR3, pItemsR0);
        pItemsR0 = pInsert->pNextR0;
        pInsert->pNextR0 = NIL_RTR0PTR;
        pInsert->pNextR3 = pItems;
        pItems = pInsert;
    }

    /*
     * Feed the items to the consumer function.
     */
    Log2(("pdmR3QueueFlush: pQueue=%p enmType=%d pItems=%p\n", pQueue, pQueue->enmType, pItems));
    switch (pQueue->enmType)
    {
    case PDMQUEUETYPE_DEV:
        while (pItems)
        {
            if (!pQueue->u.Dev.pfnCallback(pQueue->u.Dev.pDevIns, pItems))
                break;
            pCur = pItems;
            pItems = pItems->pNextR3;
            pdmR3QueueFreeItem(pQueue, pCur);
        }
        break;

    case PDMQUEUETYPE_DRV:
        while (pItems)
        {
            if (!pQueue->u.Drv.pfnCallback(pQueue->u.Drv.pDrvIns, pItems))
                break;
            pCur = pItems;
            pItems = pItems->pNextR3;
            pdmR3QueueFreeItem(pQueue, pCur);
        }
        break;

    case PDMQUEUETYPE_INTERNAL:
        while (pItems)
        {
            if (!pQueue->u.Int.pfnCallback(pQueue->pVMR3, pItems))
                break;
            pCur = pItems;
            pItems = pItems->pNextR3;
            pdmR3QueueFreeItem(pQueue, pCur);
        }
        break;

    case PDMQUEUETYPE_EXTERNAL:
        while (pItems)
        {
            if (!pQueue->u.Ext.pfnCallback(pQueue->u.Ext.pvUser, pItems))
                break;
            pCur = pItems;
            pItems = pItems->pNextR3;
            pdmR3QueueFreeItem(pQueue, pCur);
        }
        break;

    default:
        AssertMsgFailed(("Invalid queue type %d\n", pQueue->enmType));
        break;
    }

    /*
     * Success?
     */
    if (pItems)
    {
        /*
         * Reverse the list.
         */
        pCur = pItems;
        pItems = NULL;
        while (pCur)
        {
            PPDMQUEUEITEMCORE pInsert = pCur;
            pCur = pInsert->pNextR3;
            pInsert->pNextR3 = pItems;
            pItems = pInsert;
        }

        /*
         * Insert the list at the tail of the pending list.
         */
        for (;;)
        {
            if (ASMAtomicCmpXchgPtr(&pQueue->pPendingR3, pItems, NULL))
                break;
            PPDMQUEUEITEMCORE pPending = ASMAtomicXchgPtrT(&pQueue->pPendingR3, NULL, PPDMQUEUEITEMCORE);
            if (pPending)
            {
                pCur = pPending;
                while (pCur->pNextR3)
                    pCur = pCur->pNextR3;
                pCur->pNextR3 = pItems;
                pItems = pPending;
            }
        }

        STAM_REL_COUNTER_INC(&pQueue->StatFlushLeftovers);
        STAM_PROFILE_STOP(&pQueue->StatFlushPrf,p);
        return false;
    }

    STAM_PROFILE_STOP(&pQueue->StatFlushPrf,p);
    return true;
}