/**
 * Implements the SUPDRV component factor interface query method.
 *
 * @returns Pointer to an interface. NULL if not supported.
 *
 * @param   pSupDrvFactory      Pointer to the component factory registration structure.
 * @param   pSession            The session - unused.
 * @param   pszInterfaceUuid    The factory interface id.
 */
static DECLCALLBACK(void *) vboxNetAdpQueryFactoryInterface(PCSUPDRVFACTORY pSupDrvFactory, PSUPDRVSESSION pSession, const char *pszInterfaceUuid)
{
    PVBOXNETADPGLOBALS pGlobals = (PVBOXNETADPGLOBALS)((uint8_t *)pSupDrvFactory - RT_OFFSETOF(VBOXNETADPGLOBALS, SupDrvFactory));

    /*
     * Convert the UUID strings and compare them.
     */
    RTUUID UuidReq;
    int rc = RTUuidFromStr(&UuidReq, pszInterfaceUuid);
    if (RT_SUCCESS(rc))
    {
        if (!RTUuidCompareStr(&UuidReq, INTNETTRUNKFACTORY_UUID_STR))
        {
            ASMAtomicIncS32(&pGlobals->cFactoryRefs);
            return &pGlobals->TrunkFactory;
        }
#ifdef LOG_ENABLED
        else
            Log(("VBoxNetAdp: unknown factory interface query (%s)\n", pszInterfaceUuid));
#endif
    }
    else
        Log(("VBoxNetAdp: rc=%Rrc, uuid=%s\n", rc, pszInterfaceUuid));

    return NULL;
}
Example #2
0
/**
 * Common worker for the debug and normal APIs.
 *
 * @retval  VINF_SUCCESS on success.
 * @retval  VERR_SEM_BUSY if the critsect was owned.
 * @retval  VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
 * @retval  VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
 *
 * @param   pCritSect   The critical section.
 */
static int pdmCritSectTryEnter(PPDMCRITSECT pCritSect, PCRTLOCKVALSRCPOS pSrcPos)
{
    /*
     * If the critical section has already been destroyed, then inform the caller.
     */
    AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
                    ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
                    VERR_SEM_DESTROYED);

    /*
     * See if we're lucky.
     */
    /* NOP ... */
    if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
        return VINF_SUCCESS;

    RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
    /* ... not owned ... */
    if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
        return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);

    /* ... or nested. */
    if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
    {
        ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
        ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
        Assert(pCritSect->s.Core.cNestings > 1);
        return VINF_SUCCESS;
    }

    /* no spinning */

    /*
     * Return busy.
     */
#ifdef IN_RING3
    STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
#else
    STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
#endif
    LogFlow(("PDMCritSectTryEnter: locked\n"));
    return VERR_SEM_BUSY;
}
Example #3
0
/**
 * Initializes the ring-0 driver runtime library.
 *
 * @returns iprt status code.
 * @param   fReserved       Flags reserved for the future.
 */
RTR0DECL(int) RTR0Init(unsigned fReserved)
{
    int rc;
    uint32_t cNewUsers;
    Assert(fReserved == 0);
#ifndef RT_OS_SOLARIS       /* On Solaris our thread preemption information is only obtained in rtR0InitNative().*/
    RT_ASSERT_PREEMPTIBLE();
#endif

    /*
     * The first user initializes it.
     * We rely on the module loader to ensure that there are no
     * initialization races should two modules share the IPRT.
     */
    cNewUsers = ASMAtomicIncS32(&g_crtR0Users);
    if (cNewUsers != 1)
    {
        if (cNewUsers > 1)
            return VINF_SUCCESS;
        ASMAtomicDecS32(&g_crtR0Users);
        return VERR_INTERNAL_ERROR_3;
    }

    rc = rtR0InitNative();
    if (RT_SUCCESS(rc))
    {
#ifdef RTR0MEM_WITH_EF_APIS
        rtR0MemEfInit();
#endif
        rc = rtThreadInit();
        if (RT_SUCCESS(rc))
        {
#ifndef IN_GUEST /* play safe for now */
            rc = rtR0MpNotificationInit();
            if (RT_SUCCESS(rc))
            {
                rc = rtR0PowerNotificationInit();
                if (RT_SUCCESS(rc))
                    return rc;
                rtR0MpNotificationTerm();
            }
#else
            if (RT_SUCCESS(rc))
                return rc;
#endif
            rtThreadTerm();
        }
#ifdef RTR0MEM_WITH_EF_APIS
        rtR0MemEfTerm();
#endif
        rtR0TermNative();
    }
    return rc;
}
Example #4
0
RTDECL(void) RTThreadPreemptDisable(PRTTHREADPREEMPTSTATE pState)
{
    AssertPtr(pState);
    Assert(pState->u32Reserved == 0);

    /* No preemption on OS/2, so do our own accounting. */
    int32_t c = ASMAtomicIncS32(&g_acPreemptDisabled[ASMGetApicId()]);
    AssertMsg(c > 0 && c < 32, ("%d\n", c));
    pState->u32Reserved = c;
    RT_ASSERT_PREEMPT_CPUID_DISABLE(pState);
}
Example #5
0
/**
 * Terminates the ring-0 driver runtime library.
 */
RTR0DECL(void) RTR0Term(void)
{
    int32_t cNewUsers;
    RT_ASSERT_PREEMPTIBLE();

    cNewUsers = ASMAtomicDecS32(&g_crtR0Users);
    Assert(cNewUsers >= 0);
    if (cNewUsers == 0)
        rtR0Term();
    else if (cNewUsers < 0)
        ASMAtomicIncS32(&g_crtR0Users);
}
Example #6
0
/**
 * Deals with the contended case in ring-3 and ring-0.
 *
 * @returns VINF_SUCCESS or VERR_SEM_DESTROYED.
 * @param   pCritSect           The critsect.
 * @param   hNativeSelf         The native thread handle.
 */
static int pdmR3R0CritSectEnterContended(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
{
    /*
     * Start waiting.
     */
    if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
        return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
# ifdef IN_RING3
    STAM_COUNTER_INC(&pCritSect->s.StatContentionR3);
# else
    STAM_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
# endif

    /*
     * The wait loop.
     */
    PSUPDRVSESSION  pSession    = pCritSect->s.CTX_SUFF(pVM)->pSession;
    SUPSEMEVENT     hEvent      = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
# ifdef IN_RING3
#  ifdef PDMCRITSECT_STRICT
    RTTHREAD        hThreadSelf = RTThreadSelfAutoAdopt();
    int rc2 = RTLockValidatorRecExclCheckOrder(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
    if (RT_FAILURE(rc2))
        return rc2;
#  else
    RTTHREAD        hThreadSelf = RTThreadSelf();
#  endif
# endif
    for (;;)
    {
# ifdef PDMCRITSECT_STRICT
        int rc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos,
                                                      !(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NO_NESTING),
                                                      RT_INDEFINITE_WAIT, RTTHREADSTATE_CRITSECT, true);
        if (RT_FAILURE(rc9))
            return rc9;
# elif defined(IN_RING3)
        RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, true);
# endif
        int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
# ifdef IN_RING3
        RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_CRITSECT);
# endif

        if (RT_UNLIKELY(pCritSect->s.Core.u32Magic != RTCRITSECT_MAGIC))
            return VERR_SEM_DESTROYED;
        if (rc == VINF_SUCCESS)
            return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
        AssertMsg(rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));
    }
    /* won't get here */
}
Example #7
0
DECL_FORCE_INLINE(int) rtCritSectTryEnter(PRTCRITSECT pCritSect, PCRTLOCKVALSRCPOS pSrcPos)
{
    Assert(pCritSect);
    Assert(pCritSect->u32Magic == RTCRITSECT_MAGIC);
    /*AssertReturn(pCritSect->u32Magic == RTCRITSECT_MAGIC, VERR_SEM_DESTROYED);*/

    /*
     * Return straight away if NOP.
     */
    if (pCritSect->fFlags & RTCRITSECT_FLAGS_NOP)
        return VINF_SUCCESS;

    /*
     * Try take the lock. (cLockers is -1 if it's free)
     */
    RTNATIVETHREAD NativeThreadSelf = RTThreadNativeSelf();
    if (!ASMAtomicCmpXchgS32(&pCritSect->cLockers, 0, -1))
    {
        /*
         * Somebody is owning it (or will be soon). Perhaps it's us?
         */
        if (pCritSect->NativeThreadOwner == NativeThreadSelf)
        {
            if (!(pCritSect->fFlags & RTCRITSECT_FLAGS_NO_NESTING))
            {
#ifdef RTCRITSECT_STRICT
                int rc9 = RTLockValidatorRecExclRecursion(pCritSect->pValidatorRec, pSrcPos);
                if (RT_FAILURE(rc9))
                    return rc9;
#endif
                ASMAtomicIncS32(&pCritSect->cLockers);
                pCritSect->cNestings++;
                return VINF_SUCCESS;
            }
            AssertMsgFailed(("Nested entry of critsect %p\n", pCritSect));
            return VERR_SEM_NESTED;
        }
        return VERR_SEM_BUSY;
    }

    /*
     * First time
     */
    pCritSect->cNestings = 1;
    ASMAtomicWriteHandle(&pCritSect->NativeThreadOwner, NativeThreadSelf);
#ifdef RTCRITSECT_STRICT
    RTLockValidatorRecExclSetOwner(pCritSect->pValidatorRec, NIL_RTTHREAD, pSrcPos, true);
#endif

    return VINF_SUCCESS;
}
/**
 * Locks the stuff for reading.
 *
 * This is just cheap stuff to make sure the caller is doing the right thing.
 */
DECLINLINE(void) rtstrFormatTypeReadLock(void)
{
#if defined(RTSTRFORMATTYPE_WITH_LOCKING)
    if (RT_UNLIKELY(ASMAtomicIncS32(&g_i32Spinlock) < 0))
    {
        unsigned volatile i;

        AssertFailed();
        for (i = 0;; i++)
            if (ASMAtomicUoReadS32(&g_i32Spinlock) > 0)
                break;
    }
#endif
}
Example #9
0
HRESULT ListenerRecord::dequeue(IEvent **aEvent,
                                LONG aTimeout,
                                AutoLockBase &aAlock)
{
    if (mActive)
        return VBOX_E_INVALID_OBJECT_STATE;

    // retain listener record
    RecordHolder<ListenerRecord> holder(this);

    ::RTCritSectEnter(&mcsQLock);

    mLastRead = RTTimeMilliTS();

    /*
     * If waiting both desired and necessary, then try grab the event
     * semaphore and mark it busy.  If it's NIL we've been shut down already.
     */
    if (aTimeout != 0 && mQueue.empty())
    {
        RTSEMEVENT hEvt = mQEvent;
        if (hEvt != NIL_RTSEMEVENT)
        {
            ASMAtomicIncS32(&mQEventBusyCnt);
            ::RTCritSectLeave(&mcsQLock);

            // release lock while waiting, listener will not go away due to above holder
            aAlock.release();

            ::RTSemEventWait(hEvt, aTimeout);
            ASMAtomicDecS32(&mQEventBusyCnt);

            // reacquire lock
            aAlock.acquire();
            ::RTCritSectEnter(&mcsQLock);
        }
    }

    if (mQueue.empty())
        *aEvent = NULL;
    else
    {
        mQueue.front().queryInterfaceTo(aEvent);
        mQueue.pop_front();
    }

    ::RTCritSectLeave(&mcsQLock);
    return S_OK;
}
Example #10
0
/**
 * Internal initialization worker.
 *
 * @returns IPRT status code.
 * @param   fFlags          Flags, see RTR3INIT_XXX.
 * @param   cArgs           Pointer to the argument count.
 * @param   ppapszArgs      Pointer to the argument vector pointer. NULL
 *                          allowed if @a cArgs is 0.
 * @param   pszProgramPath  The program path.  Pass NULL if we're to figure it
 *                          out ourselves.
 */
static int rtR3Init(uint32_t fFlags, int cArgs, char ***papszArgs, const char *pszProgramPath)
{
    /* no entry log flow, because prefixes and thread may freak out. */
    Assert(!(fFlags & ~(RTR3INIT_FLAGS_DLL | RTR3INIT_FLAGS_SUPLIB)));
    Assert(!(fFlags & RTR3INIT_FLAGS_DLL) || cArgs == 0);

    /*
     * Do reference counting, only initialize the first time around.
     *
     * We are ASSUMING that nobody will be able to race RTR3Init* calls when the
     * first one, the real init, is running (second assertion).
     */
    int32_t cUsers = ASMAtomicIncS32(&g_cUsers);
    if (cUsers != 1)
    {
        AssertMsg(cUsers > 1, ("%d\n", cUsers));
        Assert(!g_fInitializing);
#if !defined(IN_GUEST) && !defined(RT_NO_GIP)
        if (fFlags & RTR3INIT_FLAGS_SUPLIB)
            SUPR3Init(NULL);
#endif
        if (!pszProgramPath)
            return VINF_SUCCESS;

        int rc = rtR3InitProgramPath(pszProgramPath);
        if (RT_SUCCESS(rc))
            rc = rtR3InitArgv(fFlags, cArgs, papszArgs);
        return rc;
    }
    ASMAtomicWriteBool(&g_fInitializing, true);

    /*
     * Do the initialization.
     */
    int rc = rtR3InitBody(fFlags, cArgs, papszArgs, pszProgramPath);
    if (RT_FAILURE(rc))
    {
        /* failure */
        ASMAtomicWriteBool(&g_fInitializing, false);
        ASMAtomicDecS32(&g_cUsers);
        return rc;
    }

    /* success */
    LogFlow(("rtR3Init: returns VINF_SUCCESS\n"));
    ASMAtomicWriteBool(&g_fInitializing, false);
    return VINF_SUCCESS;
}
/**
 * Get and retain handle.
 *
 * @returns Pointer to a reference handle or NULL if not our handle.
 * @param   hHandle             The handle.
 */
DECLINLINE(PMSIHACKHANDLE) MsiHackHandleRetain(HANDLE hHandle)
{
    uintptr_t const idxHandle = MSI_HACK_HANDLE_TO_INDEX(hHandle);
    EnterCriticalSection(&g_CritSect);
    if (idxHandle < g_cHandles)
    {
        PMSIHACKHANDLE pHandle = g_papHandles[idxHandle];
        if (pHandle)
        {
            ASMAtomicIncS32(&pHandle->cRefs);
            LeaveCriticalSection(&g_CritSect);
            return pHandle;
        }
    }
    LeaveCriticalSection(&g_CritSect);
    return NULL;
}
RTDECL(int) RTSemSpinMutexTryRequest(RTSEMSPINMUTEX hSpinMtx)
{
    RTSEMSPINMUTEXINTERNAL *pThis = hSpinMtx;
    RTNATIVETHREAD          hSelf = RTThreadNativeSelf();
    RTSEMSPINMUTEXSTATE     State;
    bool                    fRc;
    int                     rc;

    Assert(hSelf != NIL_RTNATIVETHREAD);
    RTSEMSPINMUTEX_VALIDATE_RETURN(pThis);

    /*
     * Check context, disable preemption and save flags if necessary.
     */
    rc = rtSemSpinMutexEnter(&State, pThis);
    if (RT_FAILURE(rc))
        return rc;

    /*
     * Try take the ownership.
     */
    ASMAtomicCmpXchgHandle(&pThis->hOwner, hSelf, NIL_RTNATIVETHREAD, fRc);
    if (!fRc)
    {
        /* Busy, too bad. Check for attempts at nested access. */
        rc = VERR_SEM_BUSY;
        if (RT_UNLIKELY(pThis->hOwner == hSelf))
        {
            AssertMsgFailed(("%p attempt at nested access\n"));
            rc = VERR_SEM_NESTED;
        }

        rtSemSpinMutexLeave(&State);
        return rc;
    }

    /*
     * We're the semaphore owner.
     */
    ASMAtomicIncS32(&pThis->cLockers);
    pThis->SavedState = State;
    return VINF_SUCCESS;
}
RTDECL(void) RTThreadPreemptDisable(PRTTHREADPREEMPTSTATE pState)
{
#ifdef CONFIG_PREEMPT
    AssertPtr(pState);
    Assert(pState->u32Reserved == 0);
    pState->u32Reserved = 42;
    preempt_disable();
    RT_ASSERT_PREEMPT_CPUID_DISABLE(pState);

#else /* !CONFIG_PREEMPT */
    int32_t c;
    AssertPtr(pState);
    Assert(pState->u32Reserved == 0);

    /* Do our own accounting. */
    c = ASMAtomicIncS32(&g_acPreemptDisabled[smp_processor_id()]);
    AssertMsg(c > 0 && c < 32, ("%d\n", c));
    pState->u32Reserved = c;
    RT_ASSERT_PREEMPT_CPUID_DISABLE(pState);
#endif
}
Example #14
0
HRESULT ListenerRecord::enqueue(IEvent *aEvent)
{
    AssertMsg(!mActive, ("must be passive\n"));

    // put an event the queue
    ::RTCritSectEnter(&mcsQLock);

    // If there was no events reading from the listener for the long time,
    // and events keep coming, or queue is oversized we shall unregister this listener.
    uint64_t sinceRead = RTTimeMilliTS() - mLastRead;
    size_t queueSize = mQueue.size();
    if (queueSize > 1000 || (queueSize > 500 && sinceRead > 60 * 1000))
    {
        ::RTCritSectLeave(&mcsQLock);
        return E_ABORT;
    }


    RTSEMEVENT hEvt = mQEvent;
    if (queueSize != 0 && mQueue.back() == aEvent)
        /* if same event is being pushed multiple times - it's reusable event and
           we don't really need multiple instances of it in the queue */
        hEvt = NIL_RTSEMEVENT;
    else if (hEvt != NIL_RTSEMEVENT) /* don't bother queuing after shutdown */
    {
        mQueue.push_back(aEvent);
        ASMAtomicIncS32(&mQEventBusyCnt);
    }

    ::RTCritSectLeave(&mcsQLock);

    // notify waiters unless we've been shut down.
    if (hEvt != NIL_RTSEMEVENT)
    {
        ::RTSemEventSignal(hEvt);
        ASMAtomicDecS32(&mQEventBusyCnt);
    }

    return S_OK;
}
Example #15
0
/**
 * Implements the SUPDRV component factor interface query method.
 *
 * @returns Pointer to an interface. NULL if not supported.
 *
 * @param   pSupDrvFactory      Pointer to the component factory registration structure.
 * @param   pSession            The session - unused.
 * @param   pszInterfaceUuid    The factory interface id.
 */
static DECLCALLBACK(void *) vboxPciQueryFactoryInterface(PCSUPDRVFACTORY pSupDrvFactory, PSUPDRVSESSION pSession, const char *pszInterfaceUuid)
{
    PVBOXRAWPCIGLOBALS pGlobals = (PVBOXRAWPCIGLOBALS)((uint8_t *)pSupDrvFactory - RT_OFFSETOF(VBOXRAWPCIGLOBALS, SupDrvFactory));

    /*
     * Convert the UUID strings and compare them.
     */
    RTUUID UuidReq;
    int rc = RTUuidFromStr(&UuidReq, pszInterfaceUuid);
    if (RT_SUCCESS(rc))
    {
        if (!RTUuidCompareStr(&UuidReq, RAWPCIFACTORY_UUID_STR))
        {
            ASMAtomicIncS32(&pGlobals->cFactoryRefs);
            return &pGlobals->RawPciFactory;
        }
    }
    else
        Log(("VBoxRawPci: rc=%Rrc, uuid=%s\n", rc, pszInterfaceUuid));

    return NULL;
}
Example #16
0
/**
 * Start the client service.
 */
bool org_virtualbox_SupDrvClient::start(IOService *pProvider)
{
    LogFlow(("org_virtualbox_SupDrvClient::start([%p], %p) (cur pid=%d proc=%p)\n",
             this, pProvider, RTProcSelf(), RTR0ProcHandleSelf() ));
    AssertMsgReturn((RTR0PROCESS)m_Task == RTR0ProcHandleSelf(),
                    ("%p %p\n", m_Task, RTR0ProcHandleSelf()),
                    false);

    if (IOUserClient::start(pProvider))
    {
        m_pProvider = OSDynamicCast(org_virtualbox_SupDrv, pProvider);
        if (m_pProvider)
        {
            Assert(!m_pSession);

            /*
             * Create a new session.
             */
            int rc = supdrvCreateSession(&g_DevExt, true /* fUser */, false /*fUnrestricted*/, &m_pSession);
            if (RT_SUCCESS(rc))
            {
                m_pSession->fOpened = false;
                /* The Uid, Gid and fUnrestricted fields are set on open. */

                /*
                 * Insert it into the hash table, checking that there isn't
                 * already one for this process first. (One session per proc!)
                 */
                unsigned iHash = SESSION_HASH(m_pSession->Process);
                RTSpinlockAcquire(g_Spinlock);

                PSUPDRVSESSION pCur = g_apSessionHashTab[iHash];
                if (pCur && pCur->Process != m_pSession->Process)
                {
                    do pCur = pCur->pNextHash;
                    while (pCur && pCur->Process != m_pSession->Process);
                }
                if (!pCur)
                {
                    m_pSession->pNextHash = g_apSessionHashTab[iHash];
                    g_apSessionHashTab[iHash] = m_pSession;
                    m_pSession->pvSupDrvClient = this;
                    ASMAtomicIncS32(&g_cSessions);
                    rc = VINF_SUCCESS;
                }
                else
                    rc = VERR_ALREADY_LOADED;

                RTSpinlockReleaseNoInts(g_Spinlock);
                if (RT_SUCCESS(rc))
                {
                    Log(("org_virtualbox_SupDrvClient::start: created session %p for pid %d\n", m_pSession, (int)RTProcSelf()));
                    return true;
                }

                LogFlow(("org_virtualbox_SupDrvClient::start: already got a session for this process (%p)\n", pCur));
                supdrvCloseSession(&g_DevExt, m_pSession);
            }

            m_pSession = NULL;
            LogFlow(("org_virtualbox_SupDrvClient::start: rc=%Rrc from supdrvCreateSession\n", rc));
        }
        else
            LogFlow(("org_virtualbox_SupDrvClient::start: %p isn't org_virtualbox_SupDrv\n", pProvider));
    }
    return false;
}
Example #17
0
/**
 * Common worker for the debug and normal APIs.
 *
 * @returns VINF_SUCCESS if entered successfully.
 * @returns rcBusy when encountering a busy critical section in GC/R0.
 * @returns VERR_SEM_DESTROYED if the critical section is dead.
 *
 * @param   pCritSect           The PDM critical section to enter.
 * @param   rcBusy              The status code to return when we're in GC or R0
 *                              and the section is busy.
 */
DECL_FORCE_INLINE(int) pdmCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy, PCRTLOCKVALSRCPOS pSrcPos)
{
    Assert(pCritSect->s.Core.cNestings < 8);  /* useful to catch incorrect locking */
    Assert(pCritSect->s.Core.cNestings >= 0);

    /*
     * If the critical section has already been destroyed, then inform the caller.
     */
    AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
                    ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
                    VERR_SEM_DESTROYED);

    /*
     * See if we're lucky.
     */
    /* NOP ... */
    if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
        return VINF_SUCCESS;

    RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
    /* ... not owned ... */
    if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
        return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);

    /* ... or nested. */
    if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
    {
        ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
        ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
        Assert(pCritSect->s.Core.cNestings > 1);
        return VINF_SUCCESS;
    }

    /*
     * Spin for a bit without incrementing the counter.
     */
    /** @todo Move this to cfgm variables since it doesn't make sense to spin on UNI
     *        cpu systems. */
    int32_t cSpinsLeft = CTX_SUFF(PDMCRITSECT_SPIN_COUNT_);
    while (cSpinsLeft-- > 0)
    {
        if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
            return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
        ASMNopPause();
        /** @todo Should use monitor/mwait on e.g. &cLockers here, possibly with a
           cli'ed pendingpreemption check up front using sti w/ instruction fusing
           for avoiding races. Hmm ... This is assuming the other party is actually
           executing code on another CPU ... which we could keep track of if we
           wanted. */
    }

#ifdef IN_RING3
    /*
     * Take the slow path.
     */
    return pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);

#else
# ifdef IN_RING0
    /** @todo If preemption is disabled it means we're in VT-x/AMD-V context
     *        and would be better off switching out of that while waiting for
     *        the lock.  Several of the locks jumps back to ring-3 just to
     *        get the lock, the ring-3 code will then call the kernel to do
     *        the lock wait and when the call return it will call ring-0
     *        again and resume via in setjmp style.  Not very efficient. */
#  if 0
    if (ASMIntAreEnabled()) /** @todo this can be handled as well by changing
                             * callers not prepared for longjmp/blocking to
                             * use PDMCritSectTryEnter. */
    {
        /*
         * Leave HWACCM context while waiting if necessary.
         */
        int rc;
        if (RTThreadPreemptIsEnabled(NIL_RTTHREAD))
        {
            STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock,    1000000);
            rc = pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
        }
        else
        {
            STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock, 1000000000);
            PVM     pVM   = pCritSect->s.CTX_SUFF(pVM);
            PVMCPU  pVCpu = VMMGetCpu(pVM);
            HWACCMR0Leave(pVM, pVCpu);
            RTThreadPreemptRestore(NIL_RTTHREAD, ????);

            rc = pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);

            RTThreadPreemptDisable(NIL_RTTHREAD, ????);
            HWACCMR0Enter(pVM, pVCpu);
        }
        return rc;
    }
#  else
    /*
     * We preemption hasn't been disabled, we can block here in ring-0.
     */
    if (   RTThreadPreemptIsEnabled(NIL_RTTHREAD)
        && ASMIntAreEnabled())
        return pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
#  endif
#endif /* IN_RING0 */

    STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);

    /*
     * Call ring-3 to acquire the critical section?
     */
    if (rcBusy == VINF_SUCCESS)
    {
        PVM     pVM   = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
        PVMCPU  pVCpu = VMMGetCpu(pVM);             AssertPtr(pVCpu);
        return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_ENTER, MMHyperCCToR3(pVM, pCritSect));
    }

    /*
     * Return busy.
     */
    LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
    return rcBusy;
#endif /* !IN_RING3 */
}
Example #18
0
RTDECL(int) RTFileAioCtxSubmit(RTFILEAIOCTX hAioCtx, PRTFILEAIOREQ pahReqs, size_t cReqs)
{
    /*
     * Parameter validation.
     */
    int rc = VINF_SUCCESS;
    PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;
    RTFILEAIOCTX_VALID_RETURN(pCtxInt);
    AssertReturn(cReqs > 0,  VERR_INVALID_PARAMETER);
    AssertPtrReturn(pahReqs, VERR_INVALID_POINTER);
    size_t i = cReqs;

    do
    {
        int rcSol = 0;
        size_t cReqsSubmit = 0;
        PRTFILEAIOREQINTERNAL pReqInt;

        while(i-- > 0)
        {
            pReqInt = pahReqs[i];
            if (RTFILEAIOREQ_IS_NOT_VALID(pReqInt))
            {
                /* Undo everything and stop submitting. */
                for (size_t iUndo = 0; iUndo < i; iUndo++)
                {
                    pReqInt = pahReqs[iUndo];
                    RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
                    pReqInt->pCtxInt = NULL;
                }
                rc = VERR_INVALID_HANDLE;
                break;
            }

            pReqInt->PortNotifier.portnfy_port = pCtxInt->iPort;
            pReqInt->pCtxInt                   = pCtxInt;
            RTFILEAIOREQ_SET_STATE(pReqInt, SUBMITTED);

            if (pReqInt->fFlush)
                break;

            cReqsSubmit++;
        }

        if (cReqsSubmit)
        {
            rcSol = lio_listio(LIO_NOWAIT, (struct aiocb **)pahReqs, cReqsSubmit, NULL);
            if (RT_UNLIKELY(rcSol < 0))
            {
                if (rcSol == EAGAIN)
                    rc = VERR_FILE_AIO_INSUFFICIENT_RESSOURCES;
                else
                    rc = RTErrConvertFromErrno(errno);

                /* Check which requests got actually submitted and which not. */
                for (i = 0; i < cReqs; i++)
                {
                    pReqInt = pahReqs[i];
                    rcSol = aio_error(&pReqInt->AioCB);
                    if (rcSol == EINVAL)
                    {
                        /* Was not submitted. */
                        RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
                        pReqInt->pCtxInt = NULL;
                    }
                    else if (rcSol != EINPROGRESS)
                    {
                        /* The request encountered an error. */
                        RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
                    }
                }
                break;
            }

            ASMAtomicAddS32(&pCtxInt->cRequests, cReqsSubmit);
            cReqs   -= cReqsSubmit;
            pahReqs += cReqsSubmit;
        }

        if (cReqs)
        {
            pReqInt = pahReqs[0];
            RTFILEAIOREQ_VALID_RETURN(pReqInt);

            /*
             * If there are still requests left we have a flush request.
             * lio_listio does not work with this requests so
             * we have to use aio_fsync directly.
             */
            rcSol = aio_fsync(O_SYNC, &pReqInt->AioCB);
            if (RT_UNLIKELY(rcSol < 0))
            {
                RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
                rc = RTErrConvertFromErrno(errno);
                break;
            }

            ASMAtomicIncS32(&pCtxInt->cRequests);
            cReqs--;
            pahReqs++;
        }
    } while (cReqs);

    return rc;
}
Example #19
0
/**
 * Retain a VBoxFUSE node.
 *
 * @param   pNode   The node.
 */
static void vboxfuseNodeRetain(PVBOXFUSENODE pNode)
{
    int32_t cNewRefs = ASMAtomicIncS32(&pNode->cRefs);
    Assert(cNewRefs != 1);
}
static int rtSemEventWait(RTSEMEVENT hEventSem, RTMSINTERVAL cMillies, bool fAutoResume)
{
    PCRTLOCKVALSRCPOS pSrcPos = NULL;

    /*
     * Validate input.
     */
    struct RTSEMEVENTINTERNAL *pThis = hEventSem;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertReturn(pThis->iMagic == RTSEMEVENT_MAGIC, VERR_INVALID_HANDLE);

    /*
     * Quickly check whether it's signaled.
     */
    /** @todo this isn't fair if someone is already waiting on it.  They should
     *        have the first go at it!
     *  (ASMAtomicReadS32(&pThis->cWaiters) == 0 || !cMillies) && ... */
    if (ASMAtomicCmpXchgU32(&pThis->fSignalled, 0, 1))
        return VINF_SUCCESS;

    /*
     * Convert the timeout value.
     */
    struct timespec ts;
    struct timespec *pTimeout = NULL;
    uint64_t u64End = 0; /* shut up gcc */
    if (cMillies != RT_INDEFINITE_WAIT)
    {
        if (!cMillies)
            return VERR_TIMEOUT;
        ts.tv_sec  = cMillies / 1000;
        ts.tv_nsec = (cMillies % 1000) * UINT32_C(1000000);
        u64End = RTTimeSystemNanoTS() + cMillies * UINT64_C(1000000);
        pTimeout = &ts;
    }

    ASMAtomicIncS32(&pThis->cWaiters);

    /*
     * The wait loop.
     */
#ifdef RTSEMEVENT_STRICT
    RTTHREAD hThreadSelf = !(pThis->fFlags & RTSEMEVENT_FLAGS_BOOTSTRAP_HACK)
                         ? RTThreadSelfAutoAdopt()
                         : RTThreadSelf();
#else
    RTTHREAD hThreadSelf = RTThreadSelf();
#endif
    int rc = VINF_SUCCESS;
    for (;;)
    {
#ifdef RTSEMEVENT_STRICT
        if (pThis->fEverHadSignallers)
        {
            rc = RTLockValidatorRecSharedCheckBlocking(&pThis->Signallers, hThreadSelf, pSrcPos, false,
                                                       cMillies, RTTHREADSTATE_EVENT, true);
            if (RT_FAILURE(rc))
                break;
        }
#endif
        RTThreadBlocking(hThreadSelf, RTTHREADSTATE_EVENT, true);
        long lrc = sys_futex(&pThis->fSignalled, FUTEX_WAIT, 0, pTimeout, NULL, 0);
        RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_EVENT);
        if (RT_UNLIKELY(pThis->iMagic != RTSEMEVENT_MAGIC))
        {
            rc = VERR_SEM_DESTROYED;
            break;
        }

        if (RT_LIKELY(lrc == 0 || lrc == -EWOULDBLOCK))
        {
            /* successful wakeup or fSignalled > 0 in the meantime */
            if (ASMAtomicCmpXchgU32(&pThis->fSignalled, 0, 1))
                break;
        }
        else if (lrc == -ETIMEDOUT)
        {
            rc = VERR_TIMEOUT;
            break;
        }
        else if (lrc == -EINTR)
        {
            if (!fAutoResume)
            {
                rc = VERR_INTERRUPTED;
                break;
            }
        }
        else
        {
            /* this shouldn't happen! */
            AssertMsgFailed(("rc=%ld errno=%d\n", lrc, errno));
            rc = RTErrConvertFromErrno(lrc);
            break;
        }
        /* adjust the relative timeout */
        if (pTimeout)
        {
            int64_t i64Diff = u64End - RTTimeSystemNanoTS();
            if (i64Diff < 1000)
            {
                rc = VERR_TIMEOUT;
                break;
            }
            ts.tv_sec  = (uint64_t)i64Diff / UINT32_C(1000000000);
            ts.tv_nsec = (uint64_t)i64Diff % UINT32_C(1000000000);
        }
    }

    ASMAtomicDecS32(&pThis->cWaiters);
    return rc;
}
Example #21
0
void BusAssignmentManager::AddRef()
{
    ASMAtomicIncS32(&pState->cRefCnt);
}
Example #22
0
/**
 * Thread method to wait for XPCOM events and notify the SDL thread.
 *
 * @returns Error code
 * @param   thread  Thread ID
 * @param   pvUser  User specific parameter, the file descriptor
 *                  of the event queue socket
 */
DECLCALLBACK(int) xpcomEventThread(RTTHREAD hThreadSelf, void *pvUser)
{
    RT_NOREF(hThreadSelf);
    int eqFD = (intptr_t)pvUser;
    unsigned cErrors = 0;
    int rc;

    /* Wait with the processing till the main thread needs it. */
    RTSemEventWait(g_EventSemXPCOMQueueThread, 2500);

    do
    {
        fd_set fdset;
        FD_ZERO(&fdset);
        FD_SET(eqFD, &fdset);
        int n = select(eqFD + 1, &fdset, NULL, NULL, NULL);

        /* are there any events to process? */
        if ((n > 0) && !g_fTerminateXPCOMQueueThread)
        {
            /*
             * Wait until all XPCOM events are processed. 1s just for sanity.
             */
            int iWait = 1000;
            /*
             * Don't post an event if there is a pending XPCOM event to prevent an
             * overflow of the SDL event queue.
             */
            if (g_s32XPCOMEventsPending < 1)
            {
                /*
                 * Post the event and wait for it to be processed. If we don't wait,
                 * we'll flood the queue on SMP systems and when the main thread is busy.
                 * In the event of a push error, we'll yield the timeslice and retry.
                 */
                SDL_Event event = {0};
                event.type = SDL_USEREVENT;
                event.user.type = SDL_USER_EVENT_XPCOM_EVENTQUEUE;
                rc = SDL_PushEvent(&event);
                if (!rc)
                {
                    /* success */
                    ASMAtomicIncS32(&g_s32XPCOMEventsPending);
                    cErrors = 0;
                }
                else
                {
                    /* failure */
                    cErrors++;
                    if (!RTThreadYield())
                        RTThreadSleep(2);
                    iWait = (cErrors >= 10) ? RT_MIN(cErrors - 8, 50) : 0;
                }
            }
            else
                Log2(("not enqueueing SDL XPCOM event (%d)\n", g_s32XPCOMEventsPending));

            if (iWait)
                RTSemEventWait(g_EventSemXPCOMQueueThread, iWait);
        }
    } while (!g_fTerminateXPCOMQueueThread);
    return VINF_SUCCESS;
}
Example #23
0
DECL_FORCE_INLINE(int) rtCritSectEnter(PRTCRITSECT pCritSect, PCRTLOCKVALSRCPOS pSrcPos)
{
    AssertPtr(pCritSect);
    AssertReturn(pCritSect->u32Magic == RTCRITSECT_MAGIC, VERR_SEM_DESTROYED);

    /*
     * Return straight away if NOP.
     */
    if (pCritSect->fFlags & RTCRITSECT_FLAGS_NOP)
        return VINF_SUCCESS;

    /*
     * How is calling and is the order right?
     */
    RTNATIVETHREAD  NativeThreadSelf = RTThreadNativeSelf();
#ifdef RTCRITSECT_STRICT
    RTTHREAD        hThreadSelf = pCritSect->pValidatorRec
                                ? RTThreadSelfAutoAdopt()
                                : RTThreadSelf();
    int             rc9;
    if (pCritSect->pValidatorRec) /* (bootstap) */
    {
         rc9 = RTLockValidatorRecExclCheckOrder(pCritSect->pValidatorRec, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
         if (RT_FAILURE(rc9))
             return rc9;
    }
#endif

    /*
     * Increment the waiter counter.
     * This becomes 0 when the section is free.
     */
    if (ASMAtomicIncS32(&pCritSect->cLockers) > 0)
    {
        /*
         * Nested?
         */
        if (pCritSect->NativeThreadOwner == NativeThreadSelf)
        {
            if (!(pCritSect->fFlags & RTCRITSECT_FLAGS_NO_NESTING))
            {
#ifdef RTCRITSECT_STRICT
                rc9 = RTLockValidatorRecExclRecursion(pCritSect->pValidatorRec, pSrcPos);
                if (RT_FAILURE(rc9))
                {
                    ASMAtomicDecS32(&pCritSect->cLockers);
                    return rc9;
                }
#endif
                pCritSect->cNestings++;
                return VINF_SUCCESS;
            }

            AssertBreakpoint(); /* don't do normal assertion here, the logger uses this code too. */
            ASMAtomicDecS32(&pCritSect->cLockers);
            return VERR_SEM_NESTED;
        }

        /*
         * Wait for the current owner to release it.
         */
#ifndef RTCRITSECT_STRICT
        RTTHREAD hThreadSelf = RTThreadSelf();
#endif
        for (;;)
        {
#ifdef RTCRITSECT_STRICT
            rc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->pValidatorRec, hThreadSelf, pSrcPos,
                                                      !(pCritSect->fFlags & RTCRITSECT_FLAGS_NO_NESTING),
                                                      RT_INDEFINITE_WAIT, RTTHREADSTATE_CRITSECT, false);
            if (RT_FAILURE(rc9))
            {
                ASMAtomicDecS32(&pCritSect->cLockers);
                return rc9;
            }
#else
            RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, false);
#endif
            int rc = RTSemEventWait(pCritSect->EventSem, RT_INDEFINITE_WAIT);
            RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_CRITSECT);

            if (pCritSect->u32Magic != RTCRITSECT_MAGIC)
                return VERR_SEM_DESTROYED;
            if (rc == VINF_SUCCESS)
                break;
            AssertMsg(rc == VERR_TIMEOUT || rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));
        }
        AssertMsg(pCritSect->NativeThreadOwner == NIL_RTNATIVETHREAD, ("pCritSect->NativeThreadOwner=%p\n", pCritSect->NativeThreadOwner));
    }

    /*
     * First time
     */
    pCritSect->cNestings = 1;
    ASMAtomicWriteHandle(&pCritSect->NativeThreadOwner, NativeThreadSelf);
#ifdef RTCRITSECT_STRICT
    RTLockValidatorRecExclSetOwner(pCritSect->pValidatorRec, hThreadSelf, pSrcPos, true);
#endif

    return VINF_SUCCESS;
}
RTDECL(int) RTFileAioCtxSubmit(RTFILEAIOCTX hAioCtx, PRTFILEAIOREQ pahReqs, size_t cReqs)
{
    int rc = VINF_SUCCESS;
    PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;

    /* Parameter checks */
    AssertPtrReturn(pCtxInt, VERR_INVALID_HANDLE);
    AssertReturn(cReqs != 0, VERR_INVALID_POINTER);
    AssertPtrReturn(pahReqs,  VERR_INVALID_PARAMETER);

    rtFileAioCtxDump(pCtxInt);

    /* Check that we don't exceed the limit */
    if (ASMAtomicUoReadS32(&pCtxInt->cRequests) + cReqs > pCtxInt->cMaxRequests)
        return VERR_FILE_AIO_LIMIT_EXCEEDED;

    PRTFILEAIOREQINTERNAL pHead = NULL;

    do
    {
        int rcPosix = 0;
        size_t cReqsSubmit = 0;
        size_t i = 0;
        PRTFILEAIOREQINTERNAL pReqInt;

        while (   (i < cReqs)
               && (i < AIO_LISTIO_MAX))
        {
            pReqInt = pahReqs[i];
            if (RTFILEAIOREQ_IS_NOT_VALID(pReqInt))
            {
                /* Undo everything and stop submitting. */
                for (size_t iUndo = 0; iUndo < i; iUndo++)
                {
                    pReqInt = pahReqs[iUndo];
                    RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
                    pReqInt->pCtxInt = NULL;

                    /* Unlink from the list again. */
                    PRTFILEAIOREQINTERNAL pNext, pPrev;
                    pNext = pReqInt->pNext;
                    pPrev = pReqInt->pPrev;
                    if (pNext)
                        pNext->pPrev = pPrev;
                    if (pPrev)
                        pPrev->pNext = pNext;
                    else
                        pHead = pNext;
                }
                rc = VERR_INVALID_HANDLE;
                break;
            }

            pReqInt->pCtxInt = pCtxInt;

            if (pReqInt->fFlush)
                break;

            /* Link them together. */
            pReqInt->pNext = pHead;
            if (pHead)
                pHead->pPrev = pReqInt;
            pReqInt->pPrev = NULL;
            pHead = pReqInt;
            RTFILEAIOREQ_SET_STATE(pReqInt, SUBMITTED);

            cReqsSubmit++;
            i++;
        }

        if (cReqsSubmit)
        {
            rcPosix = lio_listio(LIO_NOWAIT, (struct aiocb **)pahReqs, cReqsSubmit, NULL);
            if (RT_UNLIKELY(rcPosix < 0))
            {
                size_t cReqsSubmitted = cReqsSubmit;

                if (errno == EAGAIN)
                    rc = VERR_FILE_AIO_INSUFFICIENT_RESSOURCES;
                else
                    rc = RTErrConvertFromErrno(errno);

                /* Check which ones were not submitted. */
                for (i = 0; i < cReqsSubmit; i++)
                {
                    pReqInt = pahReqs[i];

                    rcPosix = aio_error(&pReqInt->AioCB);

                    if ((rcPosix != EINPROGRESS) && (rcPosix != 0))
                    {
                        cReqsSubmitted--;

#if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD)
                        if (errno == EINVAL)
#else
                        if (rcPosix == EINVAL)
#endif
                        {
                            /* Was not submitted. */
                            RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
                        }
                        else
                        {
                            /* An error occurred. */
                            RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);

                            /*
                             * Looks like Apple and glibc interpret the standard in different ways.
                             * glibc returns the error code which would be in errno but Apple returns
                             * -1 and sets errno to the appropriate value
                             */
#if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD)
                            Assert(rcPosix == -1);
                            pReqInt->Rc = RTErrConvertFromErrno(errno);
#elif defined(RT_OS_LINUX)
                            pReqInt->Rc = RTErrConvertFromErrno(rcPosix);
#endif
                            pReqInt->cbTransfered = 0;
                        }
                        /* Unlink from the list. */
                        PRTFILEAIOREQINTERNAL pNext, pPrev;
                        pNext = pReqInt->pNext;
                        pPrev = pReqInt->pPrev;
                        if (pNext)
                            pNext->pPrev = pPrev;
                        if (pPrev)
                            pPrev->pNext = pNext;
                        else
                            pHead = pNext;

                        pReqInt->pNext = NULL;
                        pReqInt->pPrev = NULL;
                    }
                }
                ASMAtomicAddS32(&pCtxInt->cRequests, cReqsSubmitted);
                AssertMsg(pCtxInt->cRequests >= 0, ("Adding requests resulted in overflow\n"));
                break;
            }

            ASMAtomicAddS32(&pCtxInt->cRequests, cReqsSubmit);
            AssertMsg(pCtxInt->cRequests >= 0, ("Adding requests resulted in overflow\n"));
            cReqs   -= cReqsSubmit;
            pahReqs += cReqsSubmit;
        }

        /*
         * Check if we have a flush request now.
         * If not we hit the AIO_LISTIO_MAX limit
         * and will continue submitting requests
         * above.
         */
        if (cReqs && RT_SUCCESS_NP(rc))
        {
            pReqInt = pahReqs[0];

            if (pReqInt->fFlush)
            {
                /*
                 * lio_listio does not work with flush requests so
                 * we have to use aio_fsync directly.
                 */
                rcPosix = aio_fsync(O_SYNC, &pReqInt->AioCB);
                if (RT_UNLIKELY(rcPosix < 0))
                {
                    if (errno == EAGAIN)
                    {
                        rc = VERR_FILE_AIO_INSUFFICIENT_RESSOURCES;
                        RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
                    }
                    else
                    {
                        rc = RTErrConvertFromErrno(errno);
                        RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
                        pReqInt->Rc = rc;
                    }
                    pReqInt->cbTransfered = 0;
                    break;
                }

                /* Link them together. */
                pReqInt->pNext = pHead;
                if (pHead)
                    pHead->pPrev = pReqInt;
                pReqInt->pPrev = NULL;
                pHead = pReqInt;
                RTFILEAIOREQ_SET_STATE(pReqInt, SUBMITTED);

                ASMAtomicIncS32(&pCtxInt->cRequests);
                AssertMsg(pCtxInt->cRequests >= 0, ("Adding requests resulted in overflow\n"));
                cReqs--;
                pahReqs++;
            }
        }
    } while (   cReqs
             && RT_SUCCESS_NP(rc));

    if (pHead)
    {
        /*
         * Forward successfully submitted requests to the thread waiting for requests.
         * We search for a free slot first and if we don't find one
         * we will grab the first one and append our list to the existing entries.
         */
        unsigned iSlot = 0;
        while (  (iSlot < RT_ELEMENTS(pCtxInt->apReqsNewHead))
               && !ASMAtomicCmpXchgPtr(&pCtxInt->apReqsNewHead[iSlot], pHead, NULL))
            iSlot++;

        if (iSlot == RT_ELEMENTS(pCtxInt->apReqsNewHead))
        {
            /* Nothing found. */
            PRTFILEAIOREQINTERNAL pOldHead = ASMAtomicXchgPtrT(&pCtxInt->apReqsNewHead[0], NULL, PRTFILEAIOREQINTERNAL);

            /* Find the end of the current head and link the old list to the current. */
            PRTFILEAIOREQINTERNAL pTail = pHead;
            while (pTail->pNext)
                pTail = pTail->pNext;

            pTail->pNext = pOldHead;

            ASMAtomicWritePtr(&pCtxInt->apReqsNewHead[0], pHead);
        }

        /* Set the internal wakeup flag and wakeup the thread if possible. */
        bool fWokenUp = ASMAtomicXchgBool(&pCtxInt->fWokenUpInternal, true);
        if (!fWokenUp)
            rtFileAioCtxWakeup(pCtxInt);
    }

    rtFileAioCtxDump(pCtxInt);

    return rc;
}
RTDECL(int) RTSemSpinMutexRequest(RTSEMSPINMUTEX hSpinMtx)
{
    RTSEMSPINMUTEXINTERNAL *pThis = hSpinMtx;
    RTNATIVETHREAD          hSelf = RTThreadNativeSelf();
    RTSEMSPINMUTEXSTATE     State;
    bool                    fRc;
    int                     rc;

    Assert(hSelf != NIL_RTNATIVETHREAD);
    RTSEMSPINMUTEX_VALIDATE_RETURN(pThis);

    /*
     * Check context, disable preemption and save flags if necessary.
     */
    rc = rtSemSpinMutexEnter(&State, pThis);
    if (RT_FAILURE(rc))
        return rc;

    /*
     * Try take the ownership.
     */
    ASMAtomicIncS32(&pThis->cLockers);
    ASMAtomicCmpXchgHandle(&pThis->hOwner, hSelf, NIL_RTNATIVETHREAD, fRc);
    if (!fRc)
    {
        uint32_t cSpins;

        /*
         * It's busy. Check if it's an attempt at nested access.
         */
        if (RT_UNLIKELY(pThis->hOwner == hSelf))
        {
            AssertMsgFailed(("%p attempt at nested access\n"));
            rtSemSpinMutexLeave(&State);
            return VERR_SEM_NESTED;
        }

        /*
         * Return if we're in interrupt context and the semaphore isn't
         * configure to be interrupt safe.
         */
        if (rc == VINF_SEM_BAD_CONTEXT)
        {
            rtSemSpinMutexLeave(&State);
            return VERR_SEM_BAD_CONTEXT;
        }

        /*
         * Ok, we have to wait.
         */
        if (State.fSpin)
        {
            for (cSpins = 0; ; cSpins++)
            {
                ASMAtomicCmpXchgHandle(&pThis->hOwner, hSelf, NIL_RTNATIVETHREAD, fRc);
                if (fRc)
                    break;
                ASMNopPause();
                if (RT_UNLIKELY(pThis->u32Magic != RTSEMSPINMUTEX_MAGIC))
                {
                    rtSemSpinMutexLeave(&State);
                    return VERR_SEM_DESTROYED;
                }

                /*
                 * "Yield" once in a while. This may lower our IRQL/PIL which
                 * may preempting us, and it will certainly stop the hammering
                 * of hOwner for a little while.
                 */
                if ((cSpins & 0x7f) == 0x1f)
                {
                    rtSemSpinMutexLeave(&State);
                    rtSemSpinMutexEnter(&State, pThis);
                    Assert(State.fSpin);
                }
            }
        }
        else
        {
            for (cSpins = 0;; cSpins++)
            {
                ASMAtomicCmpXchgHandle(&pThis->hOwner, hSelf, NIL_RTNATIVETHREAD, fRc);
                if (fRc)
                    break;
                ASMNopPause();
                if (RT_UNLIKELY(pThis->u32Magic != RTSEMSPINMUTEX_MAGIC))
                {
                    rtSemSpinMutexLeave(&State);
                    return VERR_SEM_DESTROYED;
                }

                if ((cSpins & 15) == 15) /* spin a bit before going sleep (again). */
                {
                    rtSemSpinMutexLeave(&State);

                    rc = RTSemEventWait(pThis->hEventSem, RT_INDEFINITE_WAIT);
                    ASMCompilerBarrier();
                    if (RT_SUCCESS(rc))
                        AssertReturn(pThis->u32Magic == RTSEMSPINMUTEX_MAGIC, VERR_SEM_DESTROYED);
                    else if (rc == VERR_INTERRUPTED)
                        AssertRC(rc);       /* shouldn't happen */
                    else
                    {
                        AssertRC(rc);
                        return rc;
                    }

                    rc = rtSemSpinMutexEnter(&State, pThis);
                    AssertRCReturn(rc, rc);
                    Assert(!State.fSpin);
                }
            }
        }
    }

    /*
     * We're the semaphore owner.
     */
    pThis->SavedState = State;
    Assert(pThis->hOwner == hSelf);
    return VINF_SUCCESS;
}
Example #26
0
 void addRef()
 {
     ASMAtomicIncS32(&mRefCnt);
 }
Example #27
0
/**
 * Deals with the contended case in ring-3 and ring-0.
 *
 * @retval  VINF_SUCCESS on success.
 * @retval  VERR_SEM_DESTROYED if destroyed.
 *
 * @param   pCritSect           The critsect.
 * @param   hNativeSelf         The native thread handle.
 */
static int pdmR3R0CritSectEnterContended(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
{
    /*
     * Start waiting.
     */
    if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
        return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
# ifdef IN_RING3
    STAM_COUNTER_INC(&pCritSect->s.StatContentionR3);
# else
    STAM_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
# endif

    /*
     * The wait loop.
     */
    PSUPDRVSESSION  pSession    = pCritSect->s.CTX_SUFF(pVM)->pSession;
    SUPSEMEVENT     hEvent      = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
# ifdef IN_RING3
#  ifdef PDMCRITSECT_STRICT
    RTTHREAD        hThreadSelf = RTThreadSelfAutoAdopt();
    int rc2 = RTLockValidatorRecExclCheckOrder(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
    if (RT_FAILURE(rc2))
        return rc2;
#  else
    RTTHREAD        hThreadSelf = RTThreadSelf();
#  endif
# endif
    for (;;)
    {
        /*
         * Do the wait.
         *
         * In ring-3 this gets cluttered by lock validation and thread state
         * maintainence.
         *
         * In ring-0 we have to deal with the possibility that the thread has
         * been signalled and the interruptible wait function returning
         * immediately.  In that case we do normal R0/RC rcBusy handling.
         */
# ifdef IN_RING3
#  ifdef PDMCRITSECT_STRICT
        int rc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos,
                                                      !(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NO_NESTING),
                                                      RT_INDEFINITE_WAIT, RTTHREADSTATE_CRITSECT, true);
        if (RT_FAILURE(rc9))
            return rc9;
#  else
        RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, true);
#  endif
        int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
        RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_CRITSECT);
# else  /* IN_RING0 */
        int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
# endif /* IN_RING0 */

        /*
         * Deal with the return code and critsect destruction.
         */
        if (RT_UNLIKELY(pCritSect->s.Core.u32Magic != RTCRITSECT_MAGIC))
            return VERR_SEM_DESTROYED;
        if (rc == VINF_SUCCESS)
            return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
        AssertMsg(rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));

# ifdef IN_RING0
        /* Something is pending (signal, APC, debugger, whatever), just go back
           to ring-3 so the kernel can deal with it when leaving kernel context.

           Note! We've incremented cLockers already and cannot safely decrement
                 it without creating a race with PDMCritSectLeave, resulting in
                 spurious wakeups. */
        PVM     pVM   = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
        PVMCPU  pVCpu = VMMGetCpu(pVM);             AssertPtr(pVCpu);
        rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_PREEMPT, NULL);
        AssertRC(rc);
# endif
    }
    /* won't get here */
}
/**
 * Internal worker for the RTMpOn* APIs.
 *
 * @returns IPRT status code.
 * @param   pfnWorker   The callback.
 * @param   pvUser1     User argument 1.
 * @param   pvUser2     User argument 2.
 * @param   enmCpuid    What to do / is idCpu valid.
 * @param   idCpu       Used if enmCpuid is RT_NT_CPUID_SPECIFIC or
 *                      RT_NT_CPUID_PAIR, otherwise ignored.
 * @param   idCpu2      Used if enmCpuid is RT_NT_CPUID_PAIR, otherwise ignored.
 * @param   pcHits      Where to return the number of this. Optional.
 */
static int rtMpCallUsingDpcs(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2,
                             RT_NT_CPUID enmCpuid, RTCPUID idCpu, RTCPUID idCpu2, uint32_t *pcHits)
{
    PRTMPARGS pArgs;
    KDPC     *paExecCpuDpcs;

#if 0
    /* KeFlushQueuedDpcs must be run at IRQL PASSIVE_LEVEL according to MSDN, but the
     * driver verifier doesn't complain...
     */
    AssertMsg(KeGetCurrentIrql() == PASSIVE_LEVEL, ("%d != %d (PASSIVE_LEVEL)\n", KeGetCurrentIrql(), PASSIVE_LEVEL));
#endif

#ifdef IPRT_TARGET_NT4
    KAFFINITY Mask;
    /* g_pfnrtNt* are not present on NT anyway. */
    return VERR_NOT_SUPPORTED;
#else
    KAFFINITY Mask = KeQueryActiveProcessors();
#endif

    /* KeFlushQueuedDpcs is not present in Windows 2000; import it dynamically so we can just fail this call. */
    if (!g_pfnrtNtKeFlushQueuedDpcs)
        return VERR_NOT_SUPPORTED;

    pArgs = (PRTMPARGS)ExAllocatePoolWithTag(NonPagedPool, MAXIMUM_PROCESSORS*sizeof(KDPC) + sizeof(RTMPARGS), (ULONG)'RTMp');
    if (!pArgs)
        return VERR_NO_MEMORY;

    pArgs->pfnWorker = pfnWorker;
    pArgs->pvUser1   = pvUser1;
    pArgs->pvUser2   = pvUser2;
    pArgs->idCpu     = NIL_RTCPUID;
    pArgs->idCpu2    = NIL_RTCPUID;
    pArgs->cHits     = 0;
    pArgs->cRefs     = 1;

    paExecCpuDpcs = (KDPC *)(pArgs + 1);

    if (enmCpuid == RT_NT_CPUID_SPECIFIC)
    {
        KeInitializeDpc(&paExecCpuDpcs[0], rtmpNtDPCWrapper, pArgs);
        KeSetImportanceDpc(&paExecCpuDpcs[0], HighImportance);
        KeSetTargetProcessorDpc(&paExecCpuDpcs[0], (int)idCpu);
        pArgs->idCpu = idCpu;
    }
    else if (enmCpuid == RT_NT_CPUID_SPECIFIC)
    {
        KeInitializeDpc(&paExecCpuDpcs[0], rtmpNtDPCWrapper, pArgs);
        KeSetImportanceDpc(&paExecCpuDpcs[0], HighImportance);
        KeSetTargetProcessorDpc(&paExecCpuDpcs[0], (int)idCpu);
        pArgs->idCpu = idCpu;

        KeInitializeDpc(&paExecCpuDpcs[1], rtmpNtDPCWrapper, pArgs);
        KeSetImportanceDpc(&paExecCpuDpcs[1], HighImportance);
        KeSetTargetProcessorDpc(&paExecCpuDpcs[1], (int)idCpu2);
        pArgs->idCpu2 = idCpu2;
    }
    else
    {
        for (unsigned i = 0; i < MAXIMUM_PROCESSORS; i++)
        {
            KeInitializeDpc(&paExecCpuDpcs[i], rtmpNtDPCWrapper, pArgs);
            KeSetImportanceDpc(&paExecCpuDpcs[i], HighImportance);
            KeSetTargetProcessorDpc(&paExecCpuDpcs[i], i);
        }
    }

    /* Raise the IRQL to DISPATCH_LEVEL so we can't be rescheduled to another cpu.
     * KeInsertQueueDpc must also be executed at IRQL >= DISPATCH_LEVEL.
     */
    KIRQL oldIrql;
    KeRaiseIrql(DISPATCH_LEVEL, &oldIrql);

    /*
     * We cannot do other than assume a 1:1 relationship between the
     * affinity mask and the process despite the warnings in the docs.
     * If someone knows a better way to get this done, please let bird know.
     */
    ASMCompilerBarrier(); /* paranoia */
    if (enmCpuid == RT_NT_CPUID_SPECIFIC)
    {
        ASMAtomicIncS32(&pArgs->cRefs);
        BOOLEAN ret = KeInsertQueueDpc(&paExecCpuDpcs[0], 0, 0);
        Assert(ret);
    }
    else if (enmCpuid == RT_NT_CPUID_PAIR)
    {
        ASMAtomicIncS32(&pArgs->cRefs);
        BOOLEAN ret = KeInsertQueueDpc(&paExecCpuDpcs[0], 0, 0);
        Assert(ret);

        ASMAtomicIncS32(&pArgs->cRefs);
        ret = KeInsertQueueDpc(&paExecCpuDpcs[1], 0, 0);
        Assert(ret);
    }
    else
    {
        unsigned iSelf = KeGetCurrentProcessorNumber();

        for (unsigned i = 0; i < MAXIMUM_PROCESSORS; i++)
        {
            if (    (i != iSelf)
                &&  (Mask & RT_BIT_64(i)))
            {
                ASMAtomicIncS32(&pArgs->cRefs);
                BOOLEAN ret = KeInsertQueueDpc(&paExecCpuDpcs[i], 0, 0);
                Assert(ret);
            }
        }
        if (enmCpuid != RT_NT_CPUID_OTHERS)
            pfnWorker(iSelf, pvUser1, pvUser2);
    }

    KeLowerIrql(oldIrql);

    /* Flush all DPCs and wait for completion. (can take long!) */
    /** @todo Consider changing this to an active wait using some atomic inc/dec
     *  stuff (and check for the current cpu above in the specific case). */
    /** @todo Seems KeFlushQueuedDpcs doesn't wait for the DPCs to be completely
     *        executed. Seen pArgs being freed while some CPU was using it before
     *        cRefs was added. */
    g_pfnrtNtKeFlushQueuedDpcs();

    if (pcHits)
        *pcHits = pArgs->cHits;

    /* Dereference the argument structure. */
    int32_t cRefs = ASMAtomicDecS32(&pArgs->cRefs);
    Assert(cRefs >= 0);
    if (cRefs == 0)
        ExFreePool(pArgs);

    return VINF_SUCCESS;
}
Example #29
0
RTDECL(int) RTFileAioCtxSubmit(RTFILEAIOCTX hAioCtx, PRTFILEAIOREQ pahReqs, size_t cReqs)
{
    /*
     * Parameter validation.
     */
    int rc = VINF_SUCCESS;
    PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;
    RTFILEAIOCTX_VALID_RETURN(pCtxInt);
    AssertReturn(cReqs > 0,  VERR_INVALID_PARAMETER);
    AssertPtrReturn(pahReqs, VERR_INVALID_POINTER);

    do
    {
        int rcBSD = 0;
        size_t cReqsSubmit = 0;
        size_t i = 0;
        PRTFILEAIOREQINTERNAL pReqInt;

        while (   (i < cReqs)
               && (i < AIO_LISTIO_MAX))
        {
            pReqInt = pahReqs[i];
            if (RTFILEAIOREQ_IS_NOT_VALID(pReqInt))
            {
                /* Undo everything and stop submitting. */
                for (size_t iUndo = 0; iUndo < i; iUndo++)
                {
                    pReqInt = pahReqs[iUndo];
                    RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
                    pReqInt->pCtxInt = NULL;
                    pReqInt->AioCB.aio_sigevent.sigev_notify_kqueue = 0;
                }
                rc = VERR_INVALID_HANDLE;
                break;
            }

            pReqInt->AioCB.aio_sigevent.sigev_notify_kqueue = pCtxInt->iKQueue;
            pReqInt->pCtxInt                                = pCtxInt;
            RTFILEAIOREQ_SET_STATE(pReqInt, SUBMITTED);

            if (pReqInt->fFlush)
                break;

            cReqsSubmit++;
            i++;
        }

        if (cReqsSubmit)
        {
            rcBSD = lio_listio(LIO_NOWAIT, (struct aiocb **)pahReqs, cReqsSubmit, NULL);
            if (RT_UNLIKELY(rcBSD < 0))
            {
                if (errno == EAGAIN)
                    rc = VERR_FILE_AIO_INSUFFICIENT_RESSOURCES;
                else
                    rc = RTErrConvertFromErrno(errno);

                /* Check which requests got actually submitted and which not. */
                for (i = 0; i < cReqs; i++)
                {
                    pReqInt = pahReqs[i];
                    rcBSD = aio_error(&pReqInt->AioCB);
                    if (   rcBSD == -1
                        && errno == EINVAL)
                    {
                        /* Was not submitted. */
                        RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
                        pReqInt->pCtxInt = NULL;
                    }
                    else if (rcBSD != EINPROGRESS)
                    {
                        /* The request encountered an error. */
                        RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
                        pReqInt->Rc = RTErrConvertFromErrno(rcBSD);
                        pReqInt->pCtxInt      = NULL;
                        pReqInt->cbTransfered = 0;
                    }
                }
                break;
            }

            ASMAtomicAddS32(&pCtxInt->cRequests, cReqsSubmit);
            cReqs   -= cReqsSubmit;
            pahReqs += cReqsSubmit;
        }

        /* Check if we have a flush request now. */
        if (cReqs && RT_SUCCESS_NP(rc))
        {
            pReqInt = pahReqs[0];
            RTFILEAIOREQ_VALID_RETURN(pReqInt);

            if (pReqInt->fFlush)
            {
                /*
                 * lio_listio does not work with flush requests so
                 * we have to use aio_fsync directly.
                 */
                 rcBSD = aio_fsync(O_SYNC, &pReqInt->AioCB);
                 if (RT_UNLIKELY(rcBSD < 0))
                 {
                    if (rcBSD == EAGAIN)
                    {
                        /* Was not submitted. */
                        RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
                        pReqInt->pCtxInt = NULL;
                        return VERR_FILE_AIO_INSUFFICIENT_RESSOURCES;
                    }
                    else
                    {
                        RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
                        pReqInt->Rc = RTErrConvertFromErrno(errno);
                        pReqInt->cbTransfered = 0;
                        return pReqInt->Rc;
                    }
                 }

                ASMAtomicIncS32(&pCtxInt->cRequests);
                cReqs--;
                pahReqs++;
            }
        }
    } while (cReqs);

    return rc;
}