Beispiel #1
0
/**
 * Executes one (or perhaps a few more) IO instruction(s).
 *
 * @returns VBox status code suitable for EM.
 * @param   pVM         The cross context VM structure.
 * @param   pVCpu       The cross context virtual CPU structure.
 */
static int emR3HmExecuteIOInstruction(PVM pVM, PVMCPU pVCpu)
{
    PCPUMCTX pCtx = pVCpu->em.s.pCtx;

    STAM_PROFILE_START(&pVCpu->em.s.StatIOEmu, a);

    /*
     * Try to restart the io instruction that was refused in ring-0.
     */
    VBOXSTRICTRC rcStrict = HMR3RestartPendingIOInstr(pVM, pVCpu, pCtx);
    if (IOM_SUCCESS(rcStrict))
    {
        STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIoRestarted);
        STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
        return VBOXSTRICTRC_TODO(rcStrict);     /* rip already updated. */
    }
    AssertMsgReturn(rcStrict == VERR_NOT_FOUND, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)),
                    RT_SUCCESS_NP(rcStrict) ? VERR_IPE_UNEXPECTED_INFO_STATUS : VBOXSTRICTRC_TODO(rcStrict));

    /*
     * Hand it over to the interpreter.
     */
    rcStrict = IEMExecOne(pVCpu);
    LogFlow(("emR3HmExecuteIOInstruction: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIoIem);
    STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
    return VBOXSTRICTRC_TODO(rcStrict);
}
Beispiel #2
0
/**
 * Initializes the tracing.
 *
 * @returns VBox status code
 * @param   pVM         The cross context VM structure.
 * @param   cbEntry     The trace entry size.
 * @param   cEntries    The number of entries.
 */
static int dbgfR3TraceEnable(PVM pVM, uint32_t cbEntry, uint32_t cEntries)
{
    /*
     * Don't enable it twice.
     */
    if (pVM->hTraceBufR3 != NIL_RTTRACEBUF)
        return VERR_ALREADY_EXISTS;

    /*
     * Resolve default parameter values.
     */
    int rc;
    if (!cbEntry)
    {
        rc = CFGMR3QueryU32Def(CFGMR3GetChild(CFGMR3GetRoot(pVM), "DBGF"), "TraceBufEntrySize", &cbEntry, 128);
        AssertRCReturn(rc, rc);
    }
    if (!cEntries)
    {
        rc = CFGMR3QueryU32Def(CFGMR3GetChild(CFGMR3GetRoot(pVM), "DBGF"), "TraceBufEntries", &cEntries, 4096);
        AssertRCReturn(rc, rc);
    }

    /*
     * Figure the required size.
     */
    RTTRACEBUF  hTraceBuf;
    size_t      cbBlock = 0;
    rc = RTTraceBufCarve(&hTraceBuf, cEntries, cbEntry, 0 /*fFlags*/, NULL, &cbBlock);
    if (rc != VERR_BUFFER_OVERFLOW)
    {
        AssertReturn(!RT_SUCCESS_NP(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
        return rc;
    }

    /*
     * Allocate a hyper heap block and carve a trace buffer out of it.
     *
     * Note! We ASSUME that the returned trace buffer handle has the same value
     *       as the heap block.
     */
    cbBlock = RT_ALIGN_Z(cbBlock, PAGE_SIZE);
    void *pvBlock;
    rc = MMR3HyperAllocOnceNoRel(pVM, cbBlock, PAGE_SIZE, MM_TAG_DBGF, &pvBlock);
    if (RT_FAILURE(rc))
        return rc;

    rc = RTTraceBufCarve(&hTraceBuf, cEntries, cbEntry, 0 /*fFlags*/, pvBlock, &cbBlock);
    AssertRCReturn(rc, rc);
    AssertRelease(hTraceBuf == (RTTRACEBUF)pvBlock);
    AssertRelease((void *)hTraceBuf == pvBlock);

    pVM->hTraceBufR3 = hTraceBuf;
    pVM->hTraceBufR0 = MMHyperCCToR0(pVM, hTraceBuf);
    pVM->hTraceBufRC = MMHyperCCToRC(pVM, hTraceBuf);
    return VINF_SUCCESS;
}
Beispiel #3
0
RTDECL(bool) RTSymlinkIsDangling(const char *pszSymlink)
{
    bool        fRc = false;
    RTFSOBJINFO ObjInfo;
    int rc = RTPathQueryInfoEx(pszSymlink, &ObjInfo, RTFSOBJATTRADD_NOTHING, RTPATH_F_ON_LINK);
    if (RT_SUCCESS(rc))
    {
        fRc = RTFS_IS_SYMLINK(ObjInfo.Attr.fMode);
        if (fRc)
        {
            rc = RTPathQueryInfoEx(pszSymlink, &ObjInfo, RTFSOBJATTRADD_NOTHING, RTPATH_F_FOLLOW_LINK);
            fRc = !RT_SUCCESS_NP(rc);
        }
    }

    LogFlow(("RTSymlinkIsDangling(%p={%s}): returns %RTbool\n", pszSymlink, pszSymlink, fRc));
    return fRc;
}
RTDECL(bool) RTManifestEntryExists(RTMANIFEST hManifest, const char *pszEntry)
{
    RTMANIFESTINT *pThis = hManifest;
    AssertPtrReturn(pThis, false);
    AssertReturn(pThis->u32Magic == RTMANIFEST_MAGIC, false);
    AssertPtr(pszEntry);

    bool    fNeedNormalization;
    size_t  cchEntry;
    int rc = rtManifestValidateNameEntry(pszEntry, &fNeedNormalization, &cchEntry);
    AssertRCReturn(rc, false);

    /*
     * Check if it exists.
     */
    PRTMANIFESTENTRY pEntry;
    rc = rtManifestGetEntry(pThis, pszEntry, fNeedNormalization, cchEntry, &pEntry);
    return RT_SUCCESS_NP(rc);
}
Beispiel #5
0
/**
 * Executes one (or perhaps a few more) IO instruction(s).
 *
 * @returns VBox status code suitable for EM.
 * @param   pVM         Pointer to the VM.
 * @param   pVCpu       Pointer to the VMCPU.
 */
static int emR3ExecuteIOInstruction(PVM pVM, PVMCPU pVCpu)
{
    PCPUMCTX pCtx = pVCpu->em.s.pCtx;

    STAM_PROFILE_START(&pVCpu->em.s.StatIOEmu, a);

    /* Try to restart the io instruction that was refused in ring-0. */
    VBOXSTRICTRC rcStrict = HWACCMR3RestartPendingIOInstr(pVM, pVCpu, pCtx);
    if (IOM_SUCCESS(rcStrict))
    {
        STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIoRestarted);
        STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
        return VBOXSTRICTRC_TODO(rcStrict);     /* rip already updated. */
    }
    AssertMsgReturn(rcStrict == VERR_NOT_FOUND, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)),
                    RT_SUCCESS_NP(rcStrict) ? VERR_IPE_UNEXPECTED_INFO_STATUS : VBOXSTRICTRC_TODO(rcStrict));

    /** @todo probably we should fall back to the recompiler; otherwise we'll go back and forth between HC & GC
     *   as io instructions tend to come in packages of more than one
     */
    DISCPUSTATE Cpu;
    int rc2 = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &Cpu, "IO EMU");
    if (RT_SUCCESS(rc2))
    {
        rcStrict = VINF_EM_RAW_EMULATE_INSTR;

        if (!(Cpu.fPrefix & (DISPREFIX_REP | DISPREFIX_REPNE)))
        {
            switch (Cpu.pCurInstr->uOpcode)
            {
                case OP_IN:
                {
                    STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIn);
                    rcStrict = IOMInterpretIN(pVM, CPUMCTX2CORE(pCtx), &Cpu);
                    break;
                }

                case OP_OUT:
                {
                    STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatOut);
                    rcStrict = IOMInterpretOUT(pVM, CPUMCTX2CORE(pCtx), &Cpu);
                    break;
                }
            }
        }
        else if (Cpu.fPrefix & DISPREFIX_REP)
        {
            switch (Cpu.pCurInstr->uOpcode)
            {
                case OP_INSB:
                case OP_INSWD:
                {
                    STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIn);
                    rcStrict = IOMInterpretINS(pVM, CPUMCTX2CORE(pCtx), &Cpu);
                    break;
                }

                case OP_OUTSB:
                case OP_OUTSWD:
                {
                    STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatOut);
                    rcStrict = IOMInterpretOUTS(pVM, CPUMCTX2CORE(pCtx), &Cpu);
                    break;
                }
            }
        }

        /*
         * Handled the I/O return codes.
         * (The unhandled cases end up with rcStrict == VINF_EM_RAW_EMULATE_INSTR.)
         */
        if (IOM_SUCCESS(rcStrict))
        {
            pCtx->rip += Cpu.cbInstr;
            STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
            return VBOXSTRICTRC_TODO(rcStrict);
        }

        if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
        {
            /* The active trap will be dispatched. */
            Assert(TRPMHasTrap(pVCpu));
            STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
            return VINF_SUCCESS;
        }
        AssertMsg(rcStrict != VINF_TRPM_XCPT_DISPATCHED, ("Handle VINF_TRPM_XCPT_DISPATCHED\n"));

        if (RT_FAILURE(rcStrict))
        {
            STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
            return VBOXSTRICTRC_TODO(rcStrict);
        }
        AssertMsg(rcStrict == VINF_EM_RAW_EMULATE_INSTR || rcStrict == VINF_EM_RESCHEDULE_REM, ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    }

    STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
    return emR3ExecuteInstruction(pVM, pVCpu, "IO: ");
}
RTDECL(int) RTFileAioCtxWait(RTFILEAIOCTX hAioCtx, size_t cMinReqs, RTMSINTERVAL cMillies,
                             PRTFILEAIOREQ pahReqs, size_t cReqs, uint32_t *pcReqs)
{
    int rc = VINF_SUCCESS;
    int cRequestsCompleted = 0;
    PRTFILEAIOCTXINTERNAL pCtxInt = (PRTFILEAIOCTXINTERNAL)hAioCtx;
    struct timespec Timeout;
    struct timespec *pTimeout = NULL;
    uint64_t         StartNanoTS = 0;

    LogFlowFunc(("hAioCtx=%#p cMinReqs=%zu cMillies=%u pahReqs=%#p cReqs=%zu pcbReqs=%#p\n",
                 hAioCtx, cMinReqs, cMillies, pahReqs, cReqs, pcReqs));

    /* Check parameters. */
    AssertPtrReturn(pCtxInt, VERR_INVALID_HANDLE);
    AssertPtrReturn(pcReqs, VERR_INVALID_POINTER);
    AssertPtrReturn(pahReqs, VERR_INVALID_POINTER);
    AssertReturn(cReqs != 0, VERR_INVALID_PARAMETER);
    AssertReturn(cReqs >= cMinReqs, VERR_OUT_OF_RANGE);

    rtFileAioCtxDump(pCtxInt);

    int32_t cRequestsWaiting = ASMAtomicReadS32(&pCtxInt->cRequests);

    if (   RT_UNLIKELY(cRequestsWaiting <= 0)
        && !(pCtxInt->fFlags & RTFILEAIOCTX_FLAGS_WAIT_WITHOUT_PENDING_REQUESTS))
        return VERR_FILE_AIO_NO_REQUEST;

    if (RT_UNLIKELY(cMinReqs > (uint32_t)cRequestsWaiting))
        return VERR_INVALID_PARAMETER;

    if (cMillies != RT_INDEFINITE_WAIT)
    {
        Timeout.tv_sec  = cMillies / 1000;
        Timeout.tv_nsec = (cMillies % 1000) * 1000000;
        pTimeout = &Timeout;
        StartNanoTS = RTTimeNanoTS();
    }

    /* Wait for at least one. */
    if (!cMinReqs)
        cMinReqs = 1;

    /* For the wakeup call. */
    Assert(pCtxInt->hThreadWait == NIL_RTTHREAD);
    ASMAtomicWriteHandle(&pCtxInt->hThreadWait, RTThreadSelf());

    /* Update the waiting list once before we enter the loop. */
    rc = rtFileAioCtxProcessEvents(pCtxInt);

    while (   cMinReqs
           && RT_SUCCESS_NP(rc))
    {
#ifdef RT_STRICT
        if (RT_UNLIKELY(!pCtxInt->iFirstFree))
        {
            for (unsigned i = 0; i < pCtxInt->cReqsWaitMax; i++)
                RTAssertMsg2Weak("wait[%d] = %#p\n", i, pCtxInt->apReqs[i]);

            AssertMsgFailed(("No request to wait for. pReqsWaitHead=%#p pReqsWaitTail=%#p\n",
                            pCtxInt->pReqsWaitHead, pCtxInt->pReqsWaitTail));
        }
#endif

        LogFlow(("Waiting for %d requests to complete\n", pCtxInt->iFirstFree));
        rtFileAioCtxDump(pCtxInt);

        ASMAtomicXchgBool(&pCtxInt->fWaiting, true);
        int rcPosix = aio_suspend((const struct aiocb * const *)pCtxInt->apReqs,
                                  pCtxInt->iFirstFree, pTimeout);
        ASMAtomicXchgBool(&pCtxInt->fWaiting, false);
        if (rcPosix < 0)
        {
            LogFlow(("aio_suspend failed %d nent=%u\n", errno, pCtxInt->iFirstFree));
            /* Check that this is an external wakeup event. */
            if (errno == EINTR)
                rc = rtFileAioCtxProcessEvents(pCtxInt);
            else
                rc = RTErrConvertFromErrno(errno);
        }
        else
        {
            /* Requests finished. */
            unsigned iReqCurr = 0;
            unsigned cDone = 0;

            /* Remove completed requests from the waiting list. */
            while (   (iReqCurr < pCtxInt->iFirstFree)
                   && (cDone < cReqs))
            {
                PRTFILEAIOREQINTERNAL pReq = pCtxInt->apReqs[iReqCurr];
                int rcReq = aio_error(&pReq->AioCB);

                if (rcReq != EINPROGRESS)
                {
                    /* Completed store the return code. */
                    if (rcReq == 0)
                    {
                        pReq->Rc = VINF_SUCCESS;
                        /* Call aio_return() to free resources. */
                        pReq->cbTransfered = aio_return(&pReq->AioCB);
                    }
                    else
                    {
#if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD)
                        pReq->Rc = RTErrConvertFromErrno(errno);
#else
                        pReq->Rc = RTErrConvertFromErrno(rcReq);
#endif
                    }

                    /* Mark the request as finished. */
                    RTFILEAIOREQ_SET_STATE(pReq, COMPLETED);
                    cDone++;

                    /* If there are other entries waiting put the head into the now free entry. */
                    if (pCtxInt->pReqsWaitHead)
                    {
                        PRTFILEAIOREQINTERNAL pReqInsert = pCtxInt->pReqsWaitHead;

                        pCtxInt->pReqsWaitHead = pReqInsert->pNext;
                        if (!pCtxInt->pReqsWaitHead)
                        {
                            /* List is empty now. Clear tail too. */
                            pCtxInt->pReqsWaitTail = NULL;
                        }

                        pReqInsert->iWaitingList = pReq->iWaitingList;
                        pCtxInt->apReqs[pReqInsert->iWaitingList] = pReqInsert;
                        iReqCurr++;
                    }
                    else
                    {
                        /*
                         * Move the last entry into the current position to avoid holes
                         * but only if it is not the last element already.
                         */
                        if (pReq->iWaitingList < pCtxInt->iFirstFree - 1)
                        {
                            pCtxInt->apReqs[pReq->iWaitingList] = pCtxInt->apReqs[--pCtxInt->iFirstFree];
                            pCtxInt->apReqs[pReq->iWaitingList]->iWaitingList = pReq->iWaitingList;
                        }
                        else
                            pCtxInt->iFirstFree--;

                        pCtxInt->apReqs[pCtxInt->iFirstFree] = NULL;
                    }

                    /* Put the request into the completed list. */
                    pahReqs[cRequestsCompleted++] = pReq;
                    pReq->iWaitingList = RTFILEAIOCTX_WAIT_ENTRY_INVALID;
                }
                else
                    iReqCurr++;
            }

            AssertMsg((cDone <= cReqs), ("Overflow cReqs=%u cMinReqs=%u cDone=%u\n",
                                         cReqs, cDone));
            cReqs    -= cDone;
            cMinReqs  = RT_MAX(cMinReqs, cDone) - cDone;
            ASMAtomicSubS32(&pCtxInt->cRequests, cDone);

            AssertMsg(pCtxInt->cRequests >= 0, ("Finished more requests than currently active\n"));

            if (!cMinReqs)
                break;

            if (cMillies != RT_INDEFINITE_WAIT)
            {
                uint64_t TimeDiff;

                /* Recalculate the timeout. */
                TimeDiff = RTTimeSystemNanoTS() - StartNanoTS;
                Timeout.tv_sec  = Timeout.tv_sec  - (TimeDiff / 1000000);
                Timeout.tv_nsec = Timeout.tv_nsec - (TimeDiff % 1000000);
            }

            /* Check for new elements. */
            rc = rtFileAioCtxProcessEvents(pCtxInt);
        }
    }

    *pcReqs = cRequestsCompleted;
    Assert(pCtxInt->hThreadWait == RTThreadSelf());
    ASMAtomicWriteHandle(&pCtxInt->hThreadWait, NIL_RTTHREAD);

    rtFileAioCtxDump(pCtxInt);

    return rc;
}
RTDECL(int) RTFileAioCtxSubmit(RTFILEAIOCTX hAioCtx, PRTFILEAIOREQ pahReqs, size_t cReqs)
{
    int rc = VINF_SUCCESS;
    PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;

    /* Parameter checks */
    AssertPtrReturn(pCtxInt, VERR_INVALID_HANDLE);
    AssertReturn(cReqs != 0, VERR_INVALID_POINTER);
    AssertPtrReturn(pahReqs,  VERR_INVALID_PARAMETER);

    rtFileAioCtxDump(pCtxInt);

    /* Check that we don't exceed the limit */
    if (ASMAtomicUoReadS32(&pCtxInt->cRequests) + cReqs > pCtxInt->cMaxRequests)
        return VERR_FILE_AIO_LIMIT_EXCEEDED;

    PRTFILEAIOREQINTERNAL pHead = NULL;

    do
    {
        int rcPosix = 0;
        size_t cReqsSubmit = 0;
        size_t i = 0;
        PRTFILEAIOREQINTERNAL pReqInt;

        while (   (i < cReqs)
               && (i < AIO_LISTIO_MAX))
        {
            pReqInt = pahReqs[i];
            if (RTFILEAIOREQ_IS_NOT_VALID(pReqInt))
            {
                /* Undo everything and stop submitting. */
                for (size_t iUndo = 0; iUndo < i; iUndo++)
                {
                    pReqInt = pahReqs[iUndo];
                    RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
                    pReqInt->pCtxInt = NULL;

                    /* Unlink from the list again. */
                    PRTFILEAIOREQINTERNAL pNext, pPrev;
                    pNext = pReqInt->pNext;
                    pPrev = pReqInt->pPrev;
                    if (pNext)
                        pNext->pPrev = pPrev;
                    if (pPrev)
                        pPrev->pNext = pNext;
                    else
                        pHead = pNext;
                }
                rc = VERR_INVALID_HANDLE;
                break;
            }

            pReqInt->pCtxInt = pCtxInt;

            if (pReqInt->fFlush)
                break;

            /* Link them together. */
            pReqInt->pNext = pHead;
            if (pHead)
                pHead->pPrev = pReqInt;
            pReqInt->pPrev = NULL;
            pHead = pReqInt;
            RTFILEAIOREQ_SET_STATE(pReqInt, SUBMITTED);

            cReqsSubmit++;
            i++;
        }

        if (cReqsSubmit)
        {
            rcPosix = lio_listio(LIO_NOWAIT, (struct aiocb **)pahReqs, cReqsSubmit, NULL);
            if (RT_UNLIKELY(rcPosix < 0))
            {
                size_t cReqsSubmitted = cReqsSubmit;

                if (errno == EAGAIN)
                    rc = VERR_FILE_AIO_INSUFFICIENT_RESSOURCES;
                else
                    rc = RTErrConvertFromErrno(errno);

                /* Check which ones were not submitted. */
                for (i = 0; i < cReqsSubmit; i++)
                {
                    pReqInt = pahReqs[i];

                    rcPosix = aio_error(&pReqInt->AioCB);

                    if ((rcPosix != EINPROGRESS) && (rcPosix != 0))
                    {
                        cReqsSubmitted--;

#if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD)
                        if (errno == EINVAL)
#else
                        if (rcPosix == EINVAL)
#endif
                        {
                            /* Was not submitted. */
                            RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
                        }
                        else
                        {
                            /* An error occurred. */
                            RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);

                            /*
                             * Looks like Apple and glibc interpret the standard in different ways.
                             * glibc returns the error code which would be in errno but Apple returns
                             * -1 and sets errno to the appropriate value
                             */
#if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD)
                            Assert(rcPosix == -1);
                            pReqInt->Rc = RTErrConvertFromErrno(errno);
#elif defined(RT_OS_LINUX)
                            pReqInt->Rc = RTErrConvertFromErrno(rcPosix);
#endif
                            pReqInt->cbTransfered = 0;
                        }
                        /* Unlink from the list. */
                        PRTFILEAIOREQINTERNAL pNext, pPrev;
                        pNext = pReqInt->pNext;
                        pPrev = pReqInt->pPrev;
                        if (pNext)
                            pNext->pPrev = pPrev;
                        if (pPrev)
                            pPrev->pNext = pNext;
                        else
                            pHead = pNext;

                        pReqInt->pNext = NULL;
                        pReqInt->pPrev = NULL;
                    }
                }
                ASMAtomicAddS32(&pCtxInt->cRequests, cReqsSubmitted);
                AssertMsg(pCtxInt->cRequests >= 0, ("Adding requests resulted in overflow\n"));
                break;
            }

            ASMAtomicAddS32(&pCtxInt->cRequests, cReqsSubmit);
            AssertMsg(pCtxInt->cRequests >= 0, ("Adding requests resulted in overflow\n"));
            cReqs   -= cReqsSubmit;
            pahReqs += cReqsSubmit;
        }

        /*
         * Check if we have a flush request now.
         * If not we hit the AIO_LISTIO_MAX limit
         * and will continue submitting requests
         * above.
         */
        if (cReqs && RT_SUCCESS_NP(rc))
        {
            pReqInt = pahReqs[0];

            if (pReqInt->fFlush)
            {
                /*
                 * lio_listio does not work with flush requests so
                 * we have to use aio_fsync directly.
                 */
                rcPosix = aio_fsync(O_SYNC, &pReqInt->AioCB);
                if (RT_UNLIKELY(rcPosix < 0))
                {
                    if (errno == EAGAIN)
                    {
                        rc = VERR_FILE_AIO_INSUFFICIENT_RESSOURCES;
                        RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
                    }
                    else
                    {
                        rc = RTErrConvertFromErrno(errno);
                        RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
                        pReqInt->Rc = rc;
                    }
                    pReqInt->cbTransfered = 0;
                    break;
                }

                /* Link them together. */
                pReqInt->pNext = pHead;
                if (pHead)
                    pHead->pPrev = pReqInt;
                pReqInt->pPrev = NULL;
                pHead = pReqInt;
                RTFILEAIOREQ_SET_STATE(pReqInt, SUBMITTED);

                ASMAtomicIncS32(&pCtxInt->cRequests);
                AssertMsg(pCtxInt->cRequests >= 0, ("Adding requests resulted in overflow\n"));
                cReqs--;
                pahReqs++;
            }
        }
    } while (   cReqs
             && RT_SUCCESS_NP(rc));

    if (pHead)
    {
        /*
         * Forward successfully submitted requests to the thread waiting for requests.
         * We search for a free slot first and if we don't find one
         * we will grab the first one and append our list to the existing entries.
         */
        unsigned iSlot = 0;
        while (  (iSlot < RT_ELEMENTS(pCtxInt->apReqsNewHead))
               && !ASMAtomicCmpXchgPtr(&pCtxInt->apReqsNewHead[iSlot], pHead, NULL))
            iSlot++;

        if (iSlot == RT_ELEMENTS(pCtxInt->apReqsNewHead))
        {
            /* Nothing found. */
            PRTFILEAIOREQINTERNAL pOldHead = ASMAtomicXchgPtrT(&pCtxInt->apReqsNewHead[0], NULL, PRTFILEAIOREQINTERNAL);

            /* Find the end of the current head and link the old list to the current. */
            PRTFILEAIOREQINTERNAL pTail = pHead;
            while (pTail->pNext)
                pTail = pTail->pNext;

            pTail->pNext = pOldHead;

            ASMAtomicWritePtr(&pCtxInt->apReqsNewHead[0], pHead);
        }

        /* Set the internal wakeup flag and wakeup the thread if possible. */
        bool fWokenUp = ASMAtomicXchgBool(&pCtxInt->fWokenUpInternal, true);
        if (!fWokenUp)
            rtFileAioCtxWakeup(pCtxInt);
    }

    rtFileAioCtxDump(pCtxInt);

    return rc;
}
Beispiel #8
0
RTDECL(int) RTFileAioCtxWait(RTFILEAIOCTX hAioCtx, size_t cMinReqs, RTMSINTERVAL cMillies,
                             PRTFILEAIOREQ pahReqs, size_t cReqs, uint32_t *pcReqs)
{
    int rc = VINF_SUCCESS;
    int cRequestsCompleted = 0;

    /*
     * Validate the parameters, making sure to always set pcReqs.
     */
    AssertPtrReturn(pcReqs, VERR_INVALID_POINTER);
    *pcReqs = 0; /* always set */
    PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;
    RTFILEAIOCTX_VALID_RETURN(pCtxInt);
    AssertPtrReturn(pahReqs, VERR_INVALID_POINTER);
    AssertReturn(cReqs != 0, VERR_INVALID_PARAMETER);
    AssertReturn(cReqs >= cMinReqs, VERR_OUT_OF_RANGE);

    if (RT_UNLIKELY(ASMAtomicReadS32(&pCtxInt->cRequests) == 0))
        return VERR_FILE_AIO_NO_REQUEST;

    /*
     * Convert the timeout if specified.
     */
    struct timespec    *pTimeout = NULL;
    struct timespec     Timeout = {0,0};
    uint64_t            StartNanoTS = 0;
    if (cMillies != RT_INDEFINITE_WAIT)
    {
        Timeout.tv_sec  = cMillies / 1000;
        Timeout.tv_nsec = cMillies % 1000 * 1000000;
        pTimeout = &Timeout;
        StartNanoTS = RTTimeNanoTS();
    }

    /* Wait for at least one. */
    if (!cMinReqs)
        cMinReqs = 1;

    /* For the wakeup call. */
    Assert(pCtxInt->hThreadWait == NIL_RTTHREAD);
    ASMAtomicWriteHandle(&pCtxInt->hThreadWait, RTThreadSelf());

    while (   cMinReqs
           && RT_SUCCESS_NP(rc))
    {
        struct kevent aKEvents[AIO_MAXIMUM_REQUESTS_PER_CONTEXT];
        int cRequestsToWait = cMinReqs < AIO_MAXIMUM_REQUESTS_PER_CONTEXT ? cReqs : AIO_MAXIMUM_REQUESTS_PER_CONTEXT;
        int rcBSD;
        uint64_t StartTime;

        ASMAtomicXchgBool(&pCtxInt->fWaiting, true);
        rcBSD = kevent(pCtxInt->iKQueue, NULL, 0, aKEvents, cRequestsToWait, pTimeout);
        ASMAtomicXchgBool(&pCtxInt->fWaiting, false);

        if (RT_UNLIKELY(rcBSD < 0))
        {
            rc = RTErrConvertFromErrno(errno);
            break;
        }

        uint32_t const cDone = rcBSD;

        /* Process received events. */
        for (uint32_t i = 0; i < cDone; i++)
        {
            PRTFILEAIOREQINTERNAL pReqInt = (PRTFILEAIOREQINTERNAL)aKEvents[i].udata;
            AssertPtr(pReqInt);
            Assert(pReqInt->u32Magic == RTFILEAIOREQ_MAGIC);

            /*
             * Retrieve the status code here already because the
             * user may omit the RTFileAioReqGetRC() call and
             * we will leak kernel resources then.
             * This will result in errors during submission
             * of other requests as soon as the max_aio_queue_per_proc
             * limit is reached.
             */
            int cbTransfered = aio_return(&pReqInt->AioCB);

            if (cbTransfered < 0)
            {
                pReqInt->Rc = RTErrConvertFromErrno(cbTransfered);
                pReqInt->cbTransfered = 0;
            }
            else
            {
                pReqInt->Rc = VINF_SUCCESS;
                pReqInt->cbTransfered = cbTransfered;
            }
            RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
            pahReqs[cRequestsCompleted++] = (RTFILEAIOREQ)pReqInt;
        }

        /*
         * Done Yet? If not advance and try again.
         */
        if (cDone >= cMinReqs)
            break;
        cMinReqs -= cDone;
        cReqs    -= cDone;

        if (cMillies != RT_INDEFINITE_WAIT)
        {
            /* The API doesn't return ETIMEDOUT, so we have to fix that ourselves. */
            uint64_t NanoTS = RTTimeNanoTS();
            uint64_t cMilliesElapsed = (NanoTS - StartNanoTS) / 1000000;
            if (cMilliesElapsed >= cMillies)
            {
                rc = VERR_TIMEOUT;
                break;
            }

            /* The syscall supposedly updates it, but we're paranoid. :-) */
            Timeout.tv_sec  = (cMillies - (RTMSINTERVAL)cMilliesElapsed) / 1000;
            Timeout.tv_nsec = (cMillies - (RTMSINTERVAL)cMilliesElapsed) % 1000 * 1000000;
        }
    }

    /*
     * Update the context state and set the return value.
     */
    *pcReqs = cRequestsCompleted;
    ASMAtomicSubS32(&pCtxInt->cRequests, cRequestsCompleted);
    Assert(pCtxInt->hThreadWait == RTThreadSelf());
    ASMAtomicWriteHandle(&pCtxInt->hThreadWait, NIL_RTTHREAD);

    /*
     * Clear the wakeup flag and set rc.
     */
    if (    pCtxInt->fWokenUp
        &&  RT_SUCCESS(rc))
    {
        ASMAtomicXchgBool(&pCtxInt->fWokenUp, false);
        rc = VERR_INTERRUPTED;
    }

    return rc;
}
Beispiel #9
0
RTDECL(int) RTFileAioCtxSubmit(RTFILEAIOCTX hAioCtx, PRTFILEAIOREQ pahReqs, size_t cReqs)
{
    /*
     * Parameter validation.
     */
    int rc = VINF_SUCCESS;
    PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;
    RTFILEAIOCTX_VALID_RETURN(pCtxInt);
    AssertReturn(cReqs > 0,  VERR_INVALID_PARAMETER);
    AssertPtrReturn(pahReqs, VERR_INVALID_POINTER);

    do
    {
        int rcBSD = 0;
        size_t cReqsSubmit = 0;
        size_t i = 0;
        PRTFILEAIOREQINTERNAL pReqInt;

        while (   (i < cReqs)
               && (i < AIO_LISTIO_MAX))
        {
            pReqInt = pahReqs[i];
            if (RTFILEAIOREQ_IS_NOT_VALID(pReqInt))
            {
                /* Undo everything and stop submitting. */
                for (size_t iUndo = 0; iUndo < i; iUndo++)
                {
                    pReqInt = pahReqs[iUndo];
                    RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
                    pReqInt->pCtxInt = NULL;
                    pReqInt->AioCB.aio_sigevent.sigev_notify_kqueue = 0;
                }
                rc = VERR_INVALID_HANDLE;
                break;
            }

            pReqInt->AioCB.aio_sigevent.sigev_notify_kqueue = pCtxInt->iKQueue;
            pReqInt->pCtxInt                                = pCtxInt;
            RTFILEAIOREQ_SET_STATE(pReqInt, SUBMITTED);

            if (pReqInt->fFlush)
                break;

            cReqsSubmit++;
            i++;
        }

        if (cReqsSubmit)
        {
            rcBSD = lio_listio(LIO_NOWAIT, (struct aiocb **)pahReqs, cReqsSubmit, NULL);
            if (RT_UNLIKELY(rcBSD < 0))
            {
                if (errno == EAGAIN)
                    rc = VERR_FILE_AIO_INSUFFICIENT_RESSOURCES;
                else
                    rc = RTErrConvertFromErrno(errno);

                /* Check which requests got actually submitted and which not. */
                for (i = 0; i < cReqs; i++)
                {
                    pReqInt = pahReqs[i];
                    rcBSD = aio_error(&pReqInt->AioCB);
                    if (   rcBSD == -1
                        && errno == EINVAL)
                    {
                        /* Was not submitted. */
                        RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
                        pReqInt->pCtxInt = NULL;
                    }
                    else if (rcBSD != EINPROGRESS)
                    {
                        /* The request encountered an error. */
                        RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
                        pReqInt->Rc = RTErrConvertFromErrno(rcBSD);
                        pReqInt->pCtxInt      = NULL;
                        pReqInt->cbTransfered = 0;
                    }
                }
                break;
            }

            ASMAtomicAddS32(&pCtxInt->cRequests, cReqsSubmit);
            cReqs   -= cReqsSubmit;
            pahReqs += cReqsSubmit;
        }

        /* Check if we have a flush request now. */
        if (cReqs && RT_SUCCESS_NP(rc))
        {
            pReqInt = pahReqs[0];
            RTFILEAIOREQ_VALID_RETURN(pReqInt);

            if (pReqInt->fFlush)
            {
                /*
                 * lio_listio does not work with flush requests so
                 * we have to use aio_fsync directly.
                 */
                 rcBSD = aio_fsync(O_SYNC, &pReqInt->AioCB);
                 if (RT_UNLIKELY(rcBSD < 0))
                 {
                    if (rcBSD == EAGAIN)
                    {
                        /* Was not submitted. */
                        RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
                        pReqInt->pCtxInt = NULL;
                        return VERR_FILE_AIO_INSUFFICIENT_RESSOURCES;
                    }
                    else
                    {
                        RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
                        pReqInt->Rc = RTErrConvertFromErrno(errno);
                        pReqInt->cbTransfered = 0;
                        return pReqInt->Rc;
                    }
                 }

                ASMAtomicIncS32(&pCtxInt->cRequests);
                cReqs--;
                pahReqs++;
            }
        }
    } while (cReqs);

    return rc;
}
RTDECL(int) RTFileAioCtxWait(RTFILEAIOCTX hAioCtx, size_t cMinReqs, RTMSINTERVAL cMillies,
                             PRTFILEAIOREQ pahReqs, size_t cReqs, uint32_t *pcReqs)
{
    int rc = VINF_SUCCESS;
    int cRequestsCompleted = 0;

    /*
     * Validate the parameters, making sure to always set pcReqs.
     */
    AssertPtrReturn(pcReqs, VERR_INVALID_POINTER);
    *pcReqs = 0; /* always set */
    PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;
    RTFILEAIOCTX_VALID_RETURN(pCtxInt);
    AssertPtrReturn(pahReqs, VERR_INVALID_POINTER);
    AssertReturn(cReqs != 0, VERR_INVALID_PARAMETER);
    AssertReturn(cReqs >= cMinReqs, VERR_OUT_OF_RANGE);

    if (    RT_UNLIKELY(ASMAtomicReadS32(&pCtxInt->cRequests) == 0)
        && !(pCtxInt->fFlags & RTFILEAIOCTX_FLAGS_WAIT_WITHOUT_PENDING_REQUESTS))
        return VERR_FILE_AIO_NO_REQUEST;

    /*
     * Convert the timeout if specified.
     */
    struct timespec    *pTimeout = NULL;
    struct timespec     Timeout = {0,0};
    uint64_t            StartNanoTS = 0;
    if (cMillies != RT_INDEFINITE_WAIT)
    {
        Timeout.tv_sec  = cMillies / 1000;
        Timeout.tv_nsec = cMillies % 1000 * 1000000;
        pTimeout = &Timeout;
        StartNanoTS = RTTimeNanoTS();
    }

    /* Wait for at least one. */
    if (!cMinReqs)
        cMinReqs = 1;

    while (   cMinReqs
           && RT_SUCCESS_NP(rc))
    {
        port_event_t aPortEvents[AIO_MAXIMUM_REQUESTS_PER_CONTEXT];
        uint_t cRequests    = cMinReqs;
        int cRequestsToWait = RT_MIN(cReqs, AIO_MAXIMUM_REQUESTS_PER_CONTEXT);
        int rcSol;
        uint64_t StartTime;

        rcSol = port_getn(pCtxInt->iPort, &aPortEvents[0], cRequestsToWait, &cRequests, pTimeout);

        if (RT_UNLIKELY(rcSol < 0))
            rc = RTErrConvertFromErrno(errno);

        /* Process received events. */
        for (uint_t i = 0; i < cRequests; i++)
        {
            if (aPortEvents[i].portev_source == PORT_SOURCE_ALERT)
            {
                Assert(aPortEvents[i].portev_events == AIO_CONTEXT_WAKEUP_EVENT);
                rc = VERR_INTERRUPTED; /* We've got interrupted. */
                /* Reset the port. */
                port_alert(pCtxInt->iPort, PORT_ALERT_SET, 0, NULL);
            }
            else
            {
                PRTFILEAIOREQINTERNAL pReqInt = (PRTFILEAIOREQINTERNAL)aPortEvents[i].portev_user;
                AssertPtr(pReqInt);
                Assert(pReqInt->u32Magic == RTFILEAIOREQ_MAGIC);

                /* A request has finished. */
                pahReqs[cRequestsCompleted++] = pReqInt;

                /* Mark the request as finished. */
                RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
            }
        }

        /*
         * Done Yet? If not advance and try again.
         */
        if (cRequests >= cMinReqs)
            break;
        cMinReqs -= cRequests;
        cReqs    -= cRequests;

        if (cMillies != RT_INDEFINITE_WAIT)
        {
            uint64_t NanoTS = RTTimeNanoTS();
            uint64_t cMilliesElapsed = (NanoTS - StartNanoTS) / 1000000;

            /* The syscall supposedly updates it, but we're paranoid. :-) */
            if (cMilliesElapsed < cMillies)
            {
                Timeout.tv_sec  = (cMillies - (RTMSINTERVAL)cMilliesElapsed) / 1000;
                Timeout.tv_nsec = (cMillies - (RTMSINTERVAL)cMilliesElapsed) % 1000 * 1000000;
            }
            else
            {
                Timeout.tv_sec  = 0;
                Timeout.tv_nsec = 0;
            }
        }
    }

    /*
     * Update the context state and set the return value.
     */
    *pcReqs = cRequestsCompleted;
    ASMAtomicSubS32(&pCtxInt->cRequests, cRequestsCompleted);

    return rc;
}
Beispiel #11
0
/**
 * Attempts to start the service, creating it if necessary.
 *
 * @returns VBox status code.
 */
static int suplibOsStartService(void)
{
    /*
     * Check if the driver service is there.
     */
    SC_HANDLE hSMgr = OpenSCManager(NULL, NULL, SERVICE_QUERY_STATUS | SERVICE_START);
    if (hSMgr == NULL)
    {
        DWORD dwErr = GetLastError();
        AssertMsgFailed(("couldn't open service manager in SERVICE_QUERY_CONFIG | SERVICE_QUERY_STATUS mode! (dwErr=%d)\n", dwErr));
        return RTErrConvertFromWin32(dwErr);
    }

    /*
     * Try open our service to check it's status.
     */
    SC_HANDLE hService = OpenService(hSMgr, SERVICE_NAME, SERVICE_QUERY_STATUS | SERVICE_START);
    if (!hService)
    {
        /*
         * Create the service.
         */
        int rc = suplibOsCreateService();
        if (RT_FAILURE(rc))
            return rc;

        /*
         * Try open the service.
         */
        hService = OpenService(hSMgr, SERVICE_NAME, SERVICE_QUERY_STATUS | SERVICE_START);
    }

    /*
     * Check if open and on demand create succeeded.
     */
    int rc;
    if (hService)
    {

        /*
         * Query service status to see if we need to start it or not.
         */
        SERVICE_STATUS Status;
        BOOL fRc = QueryServiceStatus(hService, &Status);
        Assert(fRc);
        if (Status.dwCurrentState == SERVICE_RUNNING)
            rc = VINF_ALREADY_INITIALIZED;
        else
        {
            if (Status.dwCurrentState == SERVICE_START_PENDING)
                rc = VINF_SUCCESS;
            else
            {
                /*
                 * Start it.
                 */
                if (StartService(hService, 0, NULL))
                    rc = VINF_SUCCESS;
                else
                {
                    DWORD dwErr = GetLastError();
                    AssertMsg(fRc, ("StartService failed with dwErr=%Rwa\n", dwErr));
                    rc = RTErrConvertFromWin32(dwErr);
                }
            }

            /*
             * Wait for the service to finish starting.
             * We'll wait for 10 seconds then we'll give up.
             */
            QueryServiceStatus(hService, &Status);
            if (Status.dwCurrentState == SERVICE_START_PENDING)
            {
                int iWait;
                for (iWait = 100; iWait > 0 && Status.dwCurrentState == SERVICE_START_PENDING; iWait--)
                {
                    Sleep(100);
                    QueryServiceStatus(hService, &Status);
                }
                DWORD dwErr = GetLastError(); NOREF(dwErr);
                AssertMsg(Status.dwCurrentState != SERVICE_RUNNING,
                          ("Failed to start. dwErr=%Rwa iWait=%d status=%d\n", dwErr, iWait, Status.dwCurrentState));
            }

            if (Status.dwCurrentState == SERVICE_RUNNING)
                rc = VINF_SUCCESS;
            else if (RT_SUCCESS_NP(rc))
                rc = VERR_GENERAL_FAILURE;
        }

        /*
         * Close open handles.
         */
        CloseServiceHandle(hService);
    }
    else
    {
        DWORD dwErr = GetLastError();
        AssertMsgFailed(("OpenService failed! LastError=%Rwa\n", dwErr));
        rc = RTErrConvertFromWin32(dwErr);
    }
    if (!CloseServiceHandle(hSMgr))
        AssertFailed();

    return rc;
}