コード例 #1
0
/**
 * @interface_method_impl{PDMINETWORKUP,pfnAllocBuf}
 */
PDMBOTHCBDECL(int) drvNetShaperUp_AllocBuf(PPDMINETWORKUP pInterface, size_t cbMin,
                                                  PCPDMNETWORKGSO pGso, PPPDMSCATTERGATHER ppSgBuf)
{
    PDRVNETSHAPER pThis = RT_FROM_MEMBER(pInterface, DRVNETSHAPER, CTX_SUFF(INetworkUp));
    if (RT_UNLIKELY(!pThis->CTX_SUFF(pIBelowNet)))
        return VERR_NET_DOWN;
    //LogFlow(("drvNetShaperUp_AllocBuf: cb=%d\n", cbMin));
    STAM_REL_COUNTER_ADD(&pThis->StatXmitBytesRequested, cbMin);
    STAM_REL_COUNTER_INC(&pThis->StatXmitPktsRequested);
#ifdef IN_RING3
    if (!PDMR3NsAllocateBandwidth(&pThis->Filter, cbMin))
    {
        STAM_REL_COUNTER_ADD(&pThis->StatXmitBytesDenied, cbMin);
        STAM_REL_COUNTER_INC(&pThis->StatXmitPktsDenied);
        return VERR_TRY_AGAIN;
    }
#endif
#ifdef IN_RING0
    if (!PDMR0NsAllocateBandwidth(&pThis->Filter, cbMin))
    {
        STAM_REL_COUNTER_ADD(&pThis->StatXmitBytesDenied, cbMin);
        STAM_REL_COUNTER_INC(&pThis->StatXmitPktsDenied);
        return VERR_TRY_AGAIN;
    }
#endif
    STAM_REL_COUNTER_ADD(&pThis->StatXmitBytesGranted, cbMin);
    STAM_REL_COUNTER_INC(&pThis->StatXmitPktsGranted);
    //LogFlow(("drvNetShaperUp_AllocBuf: got cb=%d\n", cbMin));
    return pThis->CTX_SUFF(pIBelowNet)->pfnAllocBuf(pThis->CTX_SUFF(pIBelowNet), cbMin, pGso, ppSgBuf);
}
コード例 #2
0
/**
 * Common worker for the debug and normal APIs.
 *
 * @returns VINF_SUCCESS if entered successfully.
 * @returns rcBusy when encountering a busy critical section in GC/R0.
 * @returns VERR_SEM_DESTROYED if the critical section is dead.
 *
 * @param   pCritSect           The PDM critical section to enter.
 * @param   rcBusy              The status code to return when we're in GC or R0
 *                              and the section is busy.
 */
DECL_FORCE_INLINE(int) pdmCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy, PCRTLOCKVALSRCPOS pSrcPos)
{
    Assert(pCritSect->s.Core.cNestings < 8);  /* useful to catch incorrect locking */
    Assert(pCritSect->s.Core.cNestings >= 0);

    /*
     * If the critical section has already been destroyed, then inform the caller.
     */
    AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
                    ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
                    VERR_SEM_DESTROYED);

    /*
     * See if we're lucky.
     */
    /* NOP ... */
    if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
        return VINF_SUCCESS;

    RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
    /* ... not owned ... */
    if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
        return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);

    /* ... or nested. */
    if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
    {
        ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
        ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
        Assert(pCritSect->s.Core.cNestings > 1);
        return VINF_SUCCESS;
    }

    /*
     * Spin for a bit without incrementing the counter.
     */
    /** @todo Move this to cfgm variables since it doesn't make sense to spin on UNI
     *        cpu systems. */
    int32_t cSpinsLeft = CTX_SUFF(PDMCRITSECT_SPIN_COUNT_);
    while (cSpinsLeft-- > 0)
    {
        if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
            return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
        ASMNopPause();
        /** @todo Should use monitor/mwait on e.g. &cLockers here, possibly with a
           cli'ed pendingpreemption check up front using sti w/ instruction fusing
           for avoiding races. Hmm ... This is assuming the other party is actually
           executing code on another CPU ... which we could keep track of if we
           wanted. */
    }

#ifdef IN_RING3
    /*
     * Take the slow path.
     */
    return pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);

#else
# ifdef IN_RING0
    /** @todo If preemption is disabled it means we're in VT-x/AMD-V context
     *        and would be better off switching out of that while waiting for
     *        the lock.  Several of the locks jumps back to ring-3 just to
     *        get the lock, the ring-3 code will then call the kernel to do
     *        the lock wait and when the call return it will call ring-0
     *        again and resume via in setjmp style.  Not very efficient. */
#  if 0
    if (ASMIntAreEnabled()) /** @todo this can be handled as well by changing
                             * callers not prepared for longjmp/blocking to
                             * use PDMCritSectTryEnter. */
    {
        /*
         * Leave HWACCM context while waiting if necessary.
         */
        int rc;
        if (RTThreadPreemptIsEnabled(NIL_RTTHREAD))
        {
            STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock,    1000000);
            rc = pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
        }
        else
        {
            STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock, 1000000000);
            PVM     pVM   = pCritSect->s.CTX_SUFF(pVM);
            PVMCPU  pVCpu = VMMGetCpu(pVM);
            HWACCMR0Leave(pVM, pVCpu);
            RTThreadPreemptRestore(NIL_RTTHREAD, ????);

            rc = pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);

            RTThreadPreemptDisable(NIL_RTTHREAD, ????);
            HWACCMR0Enter(pVM, pVCpu);
        }
        return rc;
    }
#  else
    /*
     * We preemption hasn't been disabled, we can block here in ring-0.
     */
    if (   RTThreadPreemptIsEnabled(NIL_RTTHREAD)
        && ASMIntAreEnabled())
        return pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
#  endif
#endif /* IN_RING0 */

    STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);

    /*
     * Call ring-3 to acquire the critical section?
     */
    if (rcBusy == VINF_SUCCESS)
    {
        PVM     pVM   = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
        PVMCPU  pVCpu = VMMGetCpu(pVM);             AssertPtr(pVCpu);
        return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_ENTER, MMHyperCCToR3(pVM, pCritSect));
    }

    /*
     * Return busy.
     */
    LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
    return rcBusy;
#endif /* !IN_RING3 */
}
コード例 #3
0
static DECLCALLBACK(int) drvscsiReqTransferEnqueue(VSCSILUN hVScsiLun,
                                                   void *pvScsiLunUser,
                                                   VSCSIIOREQ hVScsiIoReq)
{
    int rc = VINF_SUCCESS;
    PDRVSCSI pThis = (PDRVSCSI)pvScsiLunUser;

    if (pThis->pDrvBlockAsync)
    {
        /* async I/O path. */
        VSCSIIOREQTXDIR enmTxDir;

        LogFlowFunc(("Enqueuing hVScsiIoReq=%#p\n", hVScsiIoReq));

        enmTxDir = VSCSIIoReqTxDirGet(hVScsiIoReq);

        switch (enmTxDir)
        {
            case VSCSIIOREQTXDIR_FLUSH:
            {
                rc = pThis->pDrvBlockAsync->pfnStartFlush(pThis->pDrvBlockAsync, hVScsiIoReq);
                if (   RT_FAILURE(rc)
                    && rc != VERR_VD_ASYNC_IO_IN_PROGRESS
                    && pThis->cErrors++ < MAX_LOG_REL_ERRORS)
                    LogRel(("SCSI#%u: Flush returned rc=%Rrc\n",
                            pThis->pDrvIns->iInstance, rc));
                break;
            }
            case VSCSIIOREQTXDIR_UNMAP:
            {
                PCRTRANGE paRanges;
                unsigned cRanges;

                rc = VSCSIIoReqUnmapParamsGet(hVScsiIoReq, &paRanges, &cRanges);
                AssertRC(rc);

                pThis->pLed->Asserted.s.fWriting = pThis->pLed->Actual.s.fWriting = 1;
                rc = pThis->pDrvBlockAsync->pfnStartDiscard(pThis->pDrvBlockAsync, paRanges, cRanges, hVScsiIoReq);
                if (   RT_FAILURE(rc)
                    && rc != VERR_VD_ASYNC_IO_IN_PROGRESS
                    && pThis->cErrors++ < MAX_LOG_REL_ERRORS)
                    LogRel(("SCSI#%u: Discard returned rc=%Rrc\n",
                            pThis->pDrvIns->iInstance, rc));
                break;
            }
            case VSCSIIOREQTXDIR_READ:
            case VSCSIIOREQTXDIR_WRITE:
            {
                uint64_t  uOffset    = 0;
                size_t    cbTransfer = 0;
                size_t    cbSeg      = 0;
                PCRTSGSEG paSeg      = NULL;
                unsigned  cSeg       = 0;

                rc = VSCSIIoReqParamsGet(hVScsiIoReq, &uOffset, &cbTransfer,
                                         &cSeg, &cbSeg, &paSeg);
                AssertRC(rc);

                if (enmTxDir == VSCSIIOREQTXDIR_READ)
                {
                    pThis->pLed->Asserted.s.fReading = pThis->pLed->Actual.s.fReading = 1;
                    rc = pThis->pDrvBlockAsync->pfnStartRead(pThis->pDrvBlockAsync, uOffset,
                                                             paSeg, cSeg, cbTransfer,
                                                             hVScsiIoReq);
                    STAM_REL_COUNTER_ADD(&pThis->StatBytesRead, cbTransfer);
                }
                else
                {
                    pThis->pLed->Asserted.s.fWriting = pThis->pLed->Actual.s.fWriting = 1;
                    rc = pThis->pDrvBlockAsync->pfnStartWrite(pThis->pDrvBlockAsync, uOffset,
                                                              paSeg, cSeg, cbTransfer,
                                                              hVScsiIoReq);
                    STAM_REL_COUNTER_ADD(&pThis->StatBytesWritten, cbTransfer);
                }

                if (   RT_FAILURE(rc)
                    && rc != VERR_VD_ASYNC_IO_IN_PROGRESS
                    && pThis->cErrors++ < MAX_LOG_REL_ERRORS)
                    LogRel(("SCSI#%u: %s at offset %llu (%u bytes left) returned rc=%Rrc\n",
                            pThis->pDrvIns->iInstance,
                            enmTxDir == VSCSIIOREQTXDIR_READ
                            ? "Read"
                            : "Write",
                            uOffset,
                            cbTransfer, rc));
                break;
            }
            default:
                AssertMsgFailed(("Invalid transfer direction %u\n", enmTxDir));
        }

        if (rc == VINF_VD_ASYNC_IO_FINISHED)
        {
            if (enmTxDir == VSCSIIOREQTXDIR_READ)
                pThis->pLed->Actual.s.fReading = 0;
            else if (enmTxDir == VSCSIIOREQTXDIR_WRITE)
                pThis->pLed->Actual.s.fWriting = 0;
            else
                AssertMsg(enmTxDir == VSCSIIOREQTXDIR_FLUSH, ("Invalid transfer direction %u\n", enmTxDir));

            VSCSIIoReqCompleted(hVScsiIoReq, VINF_SUCCESS, false);
            rc = VINF_SUCCESS;
        }
        else if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
            rc = VINF_SUCCESS;
        else if (RT_FAILURE(rc))
        {
            if (enmTxDir == VSCSIIOREQTXDIR_READ)
                pThis->pLed->Actual.s.fReading = 0;
            else if (enmTxDir == VSCSIIOREQTXDIR_WRITE)
                pThis->pLed->Actual.s.fWriting = 0;
            else
                AssertMsg(enmTxDir == VSCSIIOREQTXDIR_FLUSH, ("Invalid transfer direction %u\n", enmTxDir));

            VSCSIIoReqCompleted(hVScsiIoReq, rc, drvscsiIsRedoPossible(rc));
            rc = VINF_SUCCESS;
        }
        else
            AssertMsgFailed(("Invalid return code rc=%Rrc\n", rc));
    }
    else
    {
        /* I/O thread. */
        rc = RTReqQueueCallEx(pThis->hQueueRequests, NULL, 0, RTREQFLAGS_NO_WAIT,
                              (PFNRT)drvscsiProcessRequestOne, 2, pThis, hVScsiIoReq);
    }

    return rc;
}
コード例 #4
0
static int drvscsiProcessRequestOne(PDRVSCSI pThis, VSCSIIOREQ hVScsiIoReq)
{
    int rc = VINF_SUCCESS;
    VSCSIIOREQTXDIR enmTxDir;

    enmTxDir = VSCSIIoReqTxDirGet(hVScsiIoReq);

    switch (enmTxDir)
    {
        case VSCSIIOREQTXDIR_FLUSH:
        {
            rc = pThis->pDrvBlock->pfnFlush(pThis->pDrvBlock);
            if (   RT_FAILURE(rc)
                && pThis->cErrors++ < MAX_LOG_REL_ERRORS)
                LogRel(("SCSI#%u: Flush returned rc=%Rrc\n",
                        pThis->pDrvIns->iInstance, rc));
            break;
        }
        case VSCSIIOREQTXDIR_READ:
        case VSCSIIOREQTXDIR_WRITE:
        {
            uint64_t  uOffset    = 0;
            size_t    cbTransfer = 0;
            size_t    cbSeg      = 0;
            PCRTSGSEG paSeg      = NULL;
            unsigned  cSeg       = 0;

            rc = VSCSIIoReqParamsGet(hVScsiIoReq, &uOffset, &cbTransfer, &cSeg, &cbSeg,
                                     &paSeg);
            AssertRC(rc);

            while (cbTransfer && cSeg)
            {
                size_t cbProcess = (cbTransfer < paSeg->cbSeg) ? cbTransfer : paSeg->cbSeg;

                Log(("%s: uOffset=%llu cbProcess=%u\n", __FUNCTION__, uOffset, cbProcess));

                if (enmTxDir == VSCSIIOREQTXDIR_READ)
                {
                    pThis->pLed->Asserted.s.fReading = pThis->pLed->Actual.s.fReading = 1;
                    rc = pThis->pDrvBlock->pfnRead(pThis->pDrvBlock, uOffset,
                                                    paSeg->pvSeg, cbProcess);
                    pThis->pLed->Actual.s.fReading = 0;
                    if (RT_FAILURE(rc))
                        break;
                    STAM_REL_COUNTER_ADD(&pThis->StatBytesRead, cbProcess);
                }
                else
                {
                    pThis->pLed->Asserted.s.fWriting = pThis->pLed->Actual.s.fWriting = 1;
                    rc = pThis->pDrvBlock->pfnWrite(pThis->pDrvBlock, uOffset,
                                                    paSeg->pvSeg, cbProcess);
                    pThis->pLed->Actual.s.fWriting = 0;
                    if (RT_FAILURE(rc))
                        break;
                    STAM_REL_COUNTER_ADD(&pThis->StatBytesWritten, cbProcess);
                }

                /* Go to the next entry. */
                uOffset     += cbProcess;
                cbTransfer -= cbProcess;
                paSeg++;
                cSeg--;
            }

            if (   RT_FAILURE(rc)
                && pThis->cErrors++ < MAX_LOG_REL_ERRORS)
                LogRel(("SCSI#%u: %s at offset %llu (%u bytes left) returned rc=%Rrc\n",
                        pThis->pDrvIns->iInstance,
                        enmTxDir == VSCSIIOREQTXDIR_READ
                        ? "Read"
                        : "Write",
                        uOffset,
                        cbTransfer, rc));

            break;
        }
        case VSCSIIOREQTXDIR_UNMAP:
        {
            PCRTRANGE paRanges;
            unsigned cRanges;

            rc = VSCSIIoReqUnmapParamsGet(hVScsiIoReq, &paRanges, &cRanges);
            AssertRC(rc);

            pThis->pLed->Asserted.s.fWriting = pThis->pLed->Actual.s.fWriting = 1;
            rc = pThis->pDrvBlock->pfnDiscard(pThis->pDrvBlock, paRanges, cRanges);
            pThis->pLed->Actual.s.fWriting = 0;

            if (   RT_FAILURE(rc)
                && pThis->cErrors++ < MAX_LOG_REL_ERRORS)
                LogRel(("SCSI#%u: Unmap returned rc=%Rrc\n",
                        pThis->pDrvIns->iInstance, rc));

            break;
        }
        default:
            AssertMsgFailed(("Invalid transfer direction %d\n", enmTxDir));
    }

    if (RT_SUCCESS(rc))
        VSCSIIoReqCompleted(hVScsiIoReq, rc, false /* fRedoPossible */);
    else
        VSCSIIoReqCompleted(hVScsiIoReq, rc, drvscsiIsRedoPossible(rc));

    return VINF_SUCCESS;
}