/**
 * Undoing rtstrFormatTypeWriteLock.
 */
DECLINLINE(void) rtstrFormatTypeWriteUnlock(void)
{
#if defined(RTSTRFORMATTYPE_WITH_LOCKING)
    Assert(g_i32Spinlock < 0);
    ASMAtomicAddS32(&g_i32Spinlock, RTSTRFORMATTYPE_LOCK_OFFSET);
#endif
}
Exemplo n.º 2
0
/**
 * Grows the cache.
 *
 * @returns IPRT status code.
 * @param   pThis               The memory cache instance.
 */
static int rtMemCacheGrow(RTMEMCACHEINT *pThis)
{
    /*
     * Enter the critical section here to avoid allocation races leading to
     * wasted memory (++) and make it easier to link in the new page.
     */
    RTCritSectEnter(&pThis->CritSect);
    int rc = VINF_SUCCESS;
    if (pThis->cFree < 0)
    {
        /*
         * Allocate and initialize the new page.
         *
         * We put the constructor bitmap at the lower end right after cFree.
         * We then push the object array to the end of the page and place the
         * allocation bitmap below it.  The hope is to increase the chance that
         * the allocation bitmap is in a different cache line than cFree since
         * this increases performance markably when lots of threads are beating
         * on the cache.
         */
        PRTMEMCACHEPAGE pPage = (PRTMEMCACHEPAGE)RTMemPageAlloc(PAGE_SIZE);
        if (pPage)
        {
            uint32_t const cObjects = RT_MIN(pThis->cPerPage, pThis->cMax - pThis->cTotal);

            ASMMemZeroPage(pPage);
            pPage->pCache       = pThis;
            pPage->pNext        = NULL;
            pPage->cFree        = cObjects;
            pPage->cObjects     = cObjects;
            uint8_t *pb = (uint8_t *)(pPage + 1);
            pb = RT_ALIGN_PT(pb, 8, uint8_t *);
            pPage->pbmCtor      = pb;
            pb = (uint8_t *)pPage + PAGE_SIZE - pThis->cbObject * cObjects;
            pPage->pbObjects    = pb;   Assert(RT_ALIGN_P(pb, pThis->cbAlignment) == pb);
            pb -= pThis->cBits / 8;
            pb = (uint8_t *)((uintptr_t)pb & ~(uintptr_t)7);
            pPage->pbmAlloc     = pb;
            Assert((uintptr_t)pPage->pbmCtor + pThis->cBits / 8 <= (uintptr_t)pPage->pbmAlloc);

            /* Mark the bitmap padding and any unused objects as allocated. */
            for (uint32_t iBit = cObjects; iBit < pThis->cBits; iBit++)
                ASMBitSet(pPage->pbmAlloc, iBit);

            /* Make it the hint. */
            ASMAtomicWritePtr(&pThis->pPageHint, pPage);

            /* Link the page in at the end of the list. */
            ASMAtomicWritePtr(pThis->ppPageNext, pPage);
            pThis->ppPageNext = &pPage->pNext;

            /* Add it to the page counts. */
            ASMAtomicAddS32(&pThis->cFree, cObjects);
            ASMAtomicAddU32(&pThis->cTotal, cObjects);
        }
        else
Exemplo n.º 3
0
RTDECL(int) RTFileAioCtxSubmit(RTFILEAIOCTX hAioCtx, PRTFILEAIOREQ pahReqs, size_t cReqs)
{
    /*
     * Parameter validation.
     */
    int rc = VINF_SUCCESS;
    PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;
    RTFILEAIOCTX_VALID_RETURN(pCtxInt);
    AssertReturn(cReqs > 0,  VERR_INVALID_PARAMETER);
    Assert(cReqs <= INT32_MAX);
    AssertPtrReturn(pahReqs, VERR_INVALID_POINTER);
    size_t i;

    for (i = 0; i < cReqs; i++)
    {
        PRTFILEAIOREQINTERNAL pReqInt = pahReqs[i];
        BOOL fSucceeded;

        Assert(pReqInt->cbTransfer == (DWORD)pReqInt->cbTransfer);
        if (pReqInt->enmTransferDirection == TRANSFERDIRECTION_READ)
        {
            fSucceeded = ReadFile(pReqInt->hFile, pReqInt->pvBuf,
                                  (DWORD)pReqInt->cbTransfer, NULL,
                                  &pReqInt->Overlapped);
        }
        else if (pReqInt->enmTransferDirection == TRANSFERDIRECTION_WRITE)
        {
            fSucceeded = WriteFile(pReqInt->hFile, pReqInt->pvBuf,
                                   (DWORD)pReqInt->cbTransfer, NULL,
                                   &pReqInt->Overlapped);
        }
        else
        {
            fSucceeded = false;
            AssertMsgFailed(("Invalid transfer direction\n"));
        }

        if (RT_UNLIKELY(!fSucceeded && GetLastError() != ERROR_IO_PENDING))
        {
            RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
            rc = RTErrConvertFromWin32(GetLastError());
            pReqInt->Rc = rc;
            break;
        }
        RTFILEAIOREQ_SET_STATE(pReqInt, SUBMITTED);
    }

    ASMAtomicAddS32(&pCtxInt->cRequests, (int32_t)i);

    return rc;
}
RTDECL(int) RTFileAioCtxSubmit(RTFILEAIOCTX hAioCtx, PRTFILEAIOREQ pahReqs, size_t cReqs)
{
    int rc = VINF_SUCCESS;
    PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;

    /* Parameter checks */
    AssertPtrReturn(pCtxInt, VERR_INVALID_HANDLE);
    AssertReturn(cReqs != 0, VERR_INVALID_POINTER);
    AssertPtrReturn(pahReqs,  VERR_INVALID_PARAMETER);

    rtFileAioCtxDump(pCtxInt);

    /* Check that we don't exceed the limit */
    if (ASMAtomicUoReadS32(&pCtxInt->cRequests) + cReqs > pCtxInt->cMaxRequests)
        return VERR_FILE_AIO_LIMIT_EXCEEDED;

    PRTFILEAIOREQINTERNAL pHead = NULL;

    do
    {
        int rcPosix = 0;
        size_t cReqsSubmit = 0;
        size_t i = 0;
        PRTFILEAIOREQINTERNAL pReqInt;

        while (   (i < cReqs)
               && (i < AIO_LISTIO_MAX))
        {
            pReqInt = pahReqs[i];
            if (RTFILEAIOREQ_IS_NOT_VALID(pReqInt))
            {
                /* Undo everything and stop submitting. */
                for (size_t iUndo = 0; iUndo < i; iUndo++)
                {
                    pReqInt = pahReqs[iUndo];
                    RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
                    pReqInt->pCtxInt = NULL;

                    /* Unlink from the list again. */
                    PRTFILEAIOREQINTERNAL pNext, pPrev;
                    pNext = pReqInt->pNext;
                    pPrev = pReqInt->pPrev;
                    if (pNext)
                        pNext->pPrev = pPrev;
                    if (pPrev)
                        pPrev->pNext = pNext;
                    else
                        pHead = pNext;
                }
                rc = VERR_INVALID_HANDLE;
                break;
            }

            pReqInt->pCtxInt = pCtxInt;

            if (pReqInt->fFlush)
                break;

            /* Link them together. */
            pReqInt->pNext = pHead;
            if (pHead)
                pHead->pPrev = pReqInt;
            pReqInt->pPrev = NULL;
            pHead = pReqInt;
            RTFILEAIOREQ_SET_STATE(pReqInt, SUBMITTED);

            cReqsSubmit++;
            i++;
        }

        if (cReqsSubmit)
        {
            rcPosix = lio_listio(LIO_NOWAIT, (struct aiocb **)pahReqs, cReqsSubmit, NULL);
            if (RT_UNLIKELY(rcPosix < 0))
            {
                size_t cReqsSubmitted = cReqsSubmit;

                if (errno == EAGAIN)
                    rc = VERR_FILE_AIO_INSUFFICIENT_RESSOURCES;
                else
                    rc = RTErrConvertFromErrno(errno);

                /* Check which ones were not submitted. */
                for (i = 0; i < cReqsSubmit; i++)
                {
                    pReqInt = pahReqs[i];

                    rcPosix = aio_error(&pReqInt->AioCB);

                    if ((rcPosix != EINPROGRESS) && (rcPosix != 0))
                    {
                        cReqsSubmitted--;

#if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD)
                        if (errno == EINVAL)
#else
                        if (rcPosix == EINVAL)
#endif
                        {
                            /* Was not submitted. */
                            RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
                        }
                        else
                        {
                            /* An error occurred. */
                            RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);

                            /*
                             * Looks like Apple and glibc interpret the standard in different ways.
                             * glibc returns the error code which would be in errno but Apple returns
                             * -1 and sets errno to the appropriate value
                             */
#if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD)
                            Assert(rcPosix == -1);
                            pReqInt->Rc = RTErrConvertFromErrno(errno);
#elif defined(RT_OS_LINUX)
                            pReqInt->Rc = RTErrConvertFromErrno(rcPosix);
#endif
                            pReqInt->cbTransfered = 0;
                        }
                        /* Unlink from the list. */
                        PRTFILEAIOREQINTERNAL pNext, pPrev;
                        pNext = pReqInt->pNext;
                        pPrev = pReqInt->pPrev;
                        if (pNext)
                            pNext->pPrev = pPrev;
                        if (pPrev)
                            pPrev->pNext = pNext;
                        else
                            pHead = pNext;

                        pReqInt->pNext = NULL;
                        pReqInt->pPrev = NULL;
                    }
                }
                ASMAtomicAddS32(&pCtxInt->cRequests, cReqsSubmitted);
                AssertMsg(pCtxInt->cRequests >= 0, ("Adding requests resulted in overflow\n"));
                break;
            }

            ASMAtomicAddS32(&pCtxInt->cRequests, cReqsSubmit);
            AssertMsg(pCtxInt->cRequests >= 0, ("Adding requests resulted in overflow\n"));
            cReqs   -= cReqsSubmit;
            pahReqs += cReqsSubmit;
        }

        /*
         * Check if we have a flush request now.
         * If not we hit the AIO_LISTIO_MAX limit
         * and will continue submitting requests
         * above.
         */
        if (cReqs && RT_SUCCESS_NP(rc))
        {
            pReqInt = pahReqs[0];

            if (pReqInt->fFlush)
            {
                /*
                 * lio_listio does not work with flush requests so
                 * we have to use aio_fsync directly.
                 */
                rcPosix = aio_fsync(O_SYNC, &pReqInt->AioCB);
                if (RT_UNLIKELY(rcPosix < 0))
                {
                    if (errno == EAGAIN)
                    {
                        rc = VERR_FILE_AIO_INSUFFICIENT_RESSOURCES;
                        RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
                    }
                    else
                    {
                        rc = RTErrConvertFromErrno(errno);
                        RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
                        pReqInt->Rc = rc;
                    }
                    pReqInt->cbTransfered = 0;
                    break;
                }

                /* Link them together. */
                pReqInt->pNext = pHead;
                if (pHead)
                    pHead->pPrev = pReqInt;
                pReqInt->pPrev = NULL;
                pHead = pReqInt;
                RTFILEAIOREQ_SET_STATE(pReqInt, SUBMITTED);

                ASMAtomicIncS32(&pCtxInt->cRequests);
                AssertMsg(pCtxInt->cRequests >= 0, ("Adding requests resulted in overflow\n"));
                cReqs--;
                pahReqs++;
            }
        }
    } while (   cReqs
             && RT_SUCCESS_NP(rc));

    if (pHead)
    {
        /*
         * Forward successfully submitted requests to the thread waiting for requests.
         * We search for a free slot first and if we don't find one
         * we will grab the first one and append our list to the existing entries.
         */
        unsigned iSlot = 0;
        while (  (iSlot < RT_ELEMENTS(pCtxInt->apReqsNewHead))
               && !ASMAtomicCmpXchgPtr(&pCtxInt->apReqsNewHead[iSlot], pHead, NULL))
            iSlot++;

        if (iSlot == RT_ELEMENTS(pCtxInt->apReqsNewHead))
        {
            /* Nothing found. */
            PRTFILEAIOREQINTERNAL pOldHead = ASMAtomicXchgPtrT(&pCtxInt->apReqsNewHead[0], NULL, PRTFILEAIOREQINTERNAL);

            /* Find the end of the current head and link the old list to the current. */
            PRTFILEAIOREQINTERNAL pTail = pHead;
            while (pTail->pNext)
                pTail = pTail->pNext;

            pTail->pNext = pOldHead;

            ASMAtomicWritePtr(&pCtxInt->apReqsNewHead[0], pHead);
        }

        /* Set the internal wakeup flag and wakeup the thread if possible. */
        bool fWokenUp = ASMAtomicXchgBool(&pCtxInt->fWokenUpInternal, true);
        if (!fWokenUp)
            rtFileAioCtxWakeup(pCtxInt);
    }

    rtFileAioCtxDump(pCtxInt);

    return rc;
}
Exemplo n.º 5
0
RTDECL(int) RTFileAioCtxSubmit(RTFILEAIOCTX hAioCtx, PRTFILEAIOREQ pahReqs, size_t cReqs)
{
    /*
     * Parameter validation.
     */
    int rc = VINF_SUCCESS;
    PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;
    RTFILEAIOCTX_VALID_RETURN(pCtxInt);
    AssertReturn(cReqs > 0,  VERR_INVALID_PARAMETER);
    AssertPtrReturn(pahReqs, VERR_INVALID_POINTER);

    do
    {
        int rcBSD = 0;
        size_t cReqsSubmit = 0;
        size_t i = 0;
        PRTFILEAIOREQINTERNAL pReqInt;

        while (   (i < cReqs)
               && (i < AIO_LISTIO_MAX))
        {
            pReqInt = pahReqs[i];
            if (RTFILEAIOREQ_IS_NOT_VALID(pReqInt))
            {
                /* Undo everything and stop submitting. */
                for (size_t iUndo = 0; iUndo < i; iUndo++)
                {
                    pReqInt = pahReqs[iUndo];
                    RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
                    pReqInt->pCtxInt = NULL;
                    pReqInt->AioCB.aio_sigevent.sigev_notify_kqueue = 0;
                }
                rc = VERR_INVALID_HANDLE;
                break;
            }

            pReqInt->AioCB.aio_sigevent.sigev_notify_kqueue = pCtxInt->iKQueue;
            pReqInt->pCtxInt                                = pCtxInt;
            RTFILEAIOREQ_SET_STATE(pReqInt, SUBMITTED);

            if (pReqInt->fFlush)
                break;

            cReqsSubmit++;
            i++;
        }

        if (cReqsSubmit)
        {
            rcBSD = lio_listio(LIO_NOWAIT, (struct aiocb **)pahReqs, cReqsSubmit, NULL);
            if (RT_UNLIKELY(rcBSD < 0))
            {
                if (errno == EAGAIN)
                    rc = VERR_FILE_AIO_INSUFFICIENT_RESSOURCES;
                else
                    rc = RTErrConvertFromErrno(errno);

                /* Check which requests got actually submitted and which not. */
                for (i = 0; i < cReqs; i++)
                {
                    pReqInt = pahReqs[i];
                    rcBSD = aio_error(&pReqInt->AioCB);
                    if (   rcBSD == -1
                        && errno == EINVAL)
                    {
                        /* Was not submitted. */
                        RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
                        pReqInt->pCtxInt = NULL;
                    }
                    else if (rcBSD != EINPROGRESS)
                    {
                        /* The request encountered an error. */
                        RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
                        pReqInt->Rc = RTErrConvertFromErrno(rcBSD);
                        pReqInt->pCtxInt      = NULL;
                        pReqInt->cbTransfered = 0;
                    }
                }
                break;
            }

            ASMAtomicAddS32(&pCtxInt->cRequests, cReqsSubmit);
            cReqs   -= cReqsSubmit;
            pahReqs += cReqsSubmit;
        }

        /* Check if we have a flush request now. */
        if (cReqs && RT_SUCCESS_NP(rc))
        {
            pReqInt = pahReqs[0];
            RTFILEAIOREQ_VALID_RETURN(pReqInt);

            if (pReqInt->fFlush)
            {
                /*
                 * lio_listio does not work with flush requests so
                 * we have to use aio_fsync directly.
                 */
                 rcBSD = aio_fsync(O_SYNC, &pReqInt->AioCB);
                 if (RT_UNLIKELY(rcBSD < 0))
                 {
                    if (rcBSD == EAGAIN)
                    {
                        /* Was not submitted. */
                        RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
                        pReqInt->pCtxInt = NULL;
                        return VERR_FILE_AIO_INSUFFICIENT_RESSOURCES;
                    }
                    else
                    {
                        RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
                        pReqInt->Rc = RTErrConvertFromErrno(errno);
                        pReqInt->cbTransfered = 0;
                        return pReqInt->Rc;
                    }
                 }

                ASMAtomicIncS32(&pCtxInt->cRequests);
                cReqs--;
                pahReqs++;
            }
        }
    } while (cReqs);

    return rc;
}
/**
 * The timer callback for an omni-timer.
 *
 * This is responsible for queueing the DPCs for the other CPUs and
 * perform the callback on the CPU on which it is called.
 *
 * @param   pDpc                The DPC object.
 * @param   pvUser              Pointer to the sub-timer.
 * @param   SystemArgument1     Some system stuff.
 * @param   SystemArgument2     Some system stuff.
 */
static void _stdcall rtTimerNtOmniMasterCallback(IN PKDPC pDpc, IN PVOID pvUser, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
{
    PRTTIMERNTSUBTIMER pSubTimer = (PRTTIMERNTSUBTIMER)pvUser;
    PRTTIMER pTimer = pSubTimer->pParent;
    int iCpuSelf = RTMpCpuIdToSetIndex(RTMpCpuId());

    AssertPtr(pTimer);
#ifdef RT_STRICT
    if (KeGetCurrentIrql() < DISPATCH_LEVEL)
        RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
    if (pSubTimer - &pTimer->aSubTimers[0] != iCpuSelf)
        RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: iCpuSelf=%d pSubTimer=%p / %d\n", iCpuSelf, pSubTimer, pSubTimer - &pTimer->aSubTimers[0]);
#endif

    /*
     * Check that we haven't been suspended before scheduling the other DPCs
     * and doing the callout.
     */
    if (    !ASMAtomicUoReadBool(&pTimer->fSuspended)
        &&  pTimer->u32Magic == RTTIMER_MAGIC)
    {
        RTCPUSET    OnlineSet;
        RTMpGetOnlineSet(&OnlineSet);

        ASMAtomicWriteHandle(&pSubTimer->hActiveThread, RTThreadNativeSelf());

        if (pTimer->u64NanoInterval)
        {
            /*
             * Recurring timer.
             */
            for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++)
                if (    RTCpuSetIsMemberByIndex(&OnlineSet, iCpu)
                    &&  iCpuSelf != iCpu)
                    KeInsertQueueDpc(&pTimer->aSubTimers[iCpu].NtDpc, 0, 0);

            uint64_t iTick = ++pSubTimer->iTick;
            rtTimerNtRearmInternval(pTimer, iTick, &pTimer->aSubTimers[RTMpCpuIdToSetIndex(pTimer->idCpu)].NtDpc);
            pTimer->pfnTimer(pTimer, pTimer->pvUser, iTick);
        }
        else
        {
            /*
             * Single shot timers gets complicated wrt to fSuspended maintance.
             */
            uint32_t cCpus = 0;
            for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++)
                if (RTCpuSetIsMemberByIndex(&OnlineSet, iCpu))
                    cCpus++;
            ASMAtomicAddS32(&pTimer->cOmniSuspendCountDown, cCpus);

            for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++)
                if (    RTCpuSetIsMemberByIndex(&OnlineSet, iCpu)
                    &&  iCpuSelf != iCpu)
                    if (!KeInsertQueueDpc(&pTimer->aSubTimers[iCpu].NtDpc, 0, 0))
                        ASMAtomicDecS32(&pTimer->cOmniSuspendCountDown); /* already queued and counted. */

            if (ASMAtomicDecS32(&pTimer->cOmniSuspendCountDown) <= 0)
                ASMAtomicWriteBool(&pTimer->fSuspended, true);

            pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);
        }

        ASMAtomicWriteHandle(&pSubTimer->hActiveThread, NIL_RTNATIVETHREAD);
    }

    NOREF(pDpc); NOREF(SystemArgument1); NOREF(SystemArgument2);
}
Exemplo n.º 7
0
RTDECL(int) RTFileAioCtxSubmit(RTFILEAIOCTX hAioCtx, PRTFILEAIOREQ pahReqs, size_t cReqs)
{
    int rc = VINF_SUCCESS;

    /*
     * Parameter validation.
     */
    PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;
    RTFILEAIOCTX_VALID_RETURN(pCtxInt);
    AssertReturn(cReqs > 0,  VERR_INVALID_PARAMETER);
    AssertPtrReturn(pahReqs, VERR_INVALID_POINTER);
    uint32_t i = cReqs;
    PRTFILEAIOREQINTERNAL pReqInt = NULL;

    /*
     * Validate requests and associate with the context.
     */
    while (i-- > 0)
    {
        pReqInt = pahReqs[i];
        if (RTFILEAIOREQ_IS_NOT_VALID(pReqInt))
        {
            /* Undo everything and stop submitting. */
            size_t iUndo = cReqs;
            while (iUndo-- > i)
            {
                pReqInt = pahReqs[iUndo];
                RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
                pReqInt->pCtxInt = NULL;
            }
            return VERR_INVALID_HANDLE;
        }

        pReqInt->AioContext = pCtxInt->AioContext;
        pReqInt->pCtxInt    = pCtxInt;
        RTFILEAIOREQ_SET_STATE(pReqInt, SUBMITTED);
    }

    do
    {
        /*
         * We cast pahReqs to the Linux iocb structure to avoid copying the requests
         * into a temporary array. This is possible because the iocb structure is
         * the first element in the request structure (see PRTFILEAIOCTXINTERNAL).
         */
        int cReqsSubmitted = 0;
        rc = rtFileAsyncIoLinuxSubmit(pCtxInt->AioContext, cReqs,
                                      (PLNXKAIOIOCB *)pahReqs,
                                      &cReqsSubmitted);
        if (RT_FAILURE(rc))
        {
            /*
             * We encountered an error.
             * This means that the first IoCB
             * is not correctly initialized
             * (invalid buffer alignment or bad file descriptor).
             * Revert every request into the prepared state except
             * the first one which will switch to completed.
             * Another reason could be insufficient resources.
             */
            i = cReqs;
            while (i-- > 0)
            {
                /* Already validated. */
                pReqInt = pahReqs[i];
                pReqInt->pCtxInt    = NULL;
                pReqInt->AioContext = 0;
                RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
            }

            if (rc == VERR_TRY_AGAIN)
                return VERR_FILE_AIO_INSUFFICIENT_RESSOURCES;
            else
            {
                /* The first request failed. */
                pReqInt = pahReqs[0];
                RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
                pReqInt->Rc = rc;
                pReqInt->cbTransfered = 0;
                return rc;
            }
        }

        /* Advance. */
        cReqs   -= cReqsSubmitted;
        pahReqs += cReqsSubmitted;
        ASMAtomicAddS32(&pCtxInt->cRequests, cReqsSubmitted);

    } while (cReqs);

    return rc;
}
Exemplo n.º 8
0
RTDECL(int) RTFileAioCtxSubmit(RTFILEAIOCTX hAioCtx, PRTFILEAIOREQ pahReqs, size_t cReqs)
{
    /*
     * Parameter validation.
     */
    int rc = VINF_SUCCESS;
    PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;
    RTFILEAIOCTX_VALID_RETURN(pCtxInt);
    AssertReturn(cReqs > 0,  VERR_INVALID_PARAMETER);
    AssertPtrReturn(pahReqs, VERR_INVALID_POINTER);
    size_t i = cReqs;

    do
    {
        int rcSol = 0;
        size_t cReqsSubmit = 0;
        PRTFILEAIOREQINTERNAL pReqInt;

        while(i-- > 0)
        {
            pReqInt = pahReqs[i];
            if (RTFILEAIOREQ_IS_NOT_VALID(pReqInt))
            {
                /* Undo everything and stop submitting. */
                for (size_t iUndo = 0; iUndo < i; iUndo++)
                {
                    pReqInt = pahReqs[iUndo];
                    RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
                    pReqInt->pCtxInt = NULL;
                }
                rc = VERR_INVALID_HANDLE;
                break;
            }

            pReqInt->PortNotifier.portnfy_port = pCtxInt->iPort;
            pReqInt->pCtxInt                   = pCtxInt;
            RTFILEAIOREQ_SET_STATE(pReqInt, SUBMITTED);

            if (pReqInt->fFlush)
                break;

            cReqsSubmit++;
        }

        if (cReqsSubmit)
        {
            rcSol = lio_listio(LIO_NOWAIT, (struct aiocb **)pahReqs, cReqsSubmit, NULL);
            if (RT_UNLIKELY(rcSol < 0))
            {
                if (rcSol == EAGAIN)
                    rc = VERR_FILE_AIO_INSUFFICIENT_RESSOURCES;
                else
                    rc = RTErrConvertFromErrno(errno);

                /* Check which requests got actually submitted and which not. */
                for (i = 0; i < cReqs; i++)
                {
                    pReqInt = pahReqs[i];
                    rcSol = aio_error(&pReqInt->AioCB);
                    if (rcSol == EINVAL)
                    {
                        /* Was not submitted. */
                        RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
                        pReqInt->pCtxInt = NULL;
                    }
                    else if (rcSol != EINPROGRESS)
                    {
                        /* The request encountered an error. */
                        RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
                    }
                }
                break;
            }

            ASMAtomicAddS32(&pCtxInt->cRequests, cReqsSubmit);
            cReqs   -= cReqsSubmit;
            pahReqs += cReqsSubmit;
        }

        if (cReqs)
        {
            pReqInt = pahReqs[0];
            RTFILEAIOREQ_VALID_RETURN(pReqInt);

            /*
             * If there are still requests left we have a flush request.
             * lio_listio does not work with this requests so
             * we have to use aio_fsync directly.
             */
            rcSol = aio_fsync(O_SYNC, &pReqInt->AioCB);
            if (RT_UNLIKELY(rcSol < 0))
            {
                RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
                rc = RTErrConvertFromErrno(errno);
                break;
            }

            ASMAtomicIncS32(&pCtxInt->cRequests);
            cReqs--;
            pahReqs++;
        }
    } while (cReqs);

    return rc;
}