/**
 * Destruct a driver instance.
 *
 * Most VM resources are freed by the VM. This callback is provided so that any non-VM
 * resources can be freed correctly.
 *
 * @param   pDrvIns     The driver instance data.
 */
static DECLCALLBACK(void) drvscsiDestruct(PPDMDRVINS pDrvIns)
{
    PDRVSCSI pThis = PDMINS_2_DATA(pDrvIns, PDRVSCSI);
    PDMDRV_CHECK_VERSIONS_RETURN_VOID(pDrvIns);

    if (pThis->hQueueRequests != NIL_RTREQQUEUE)
    {
        if (!drvscsiAsyncIOLoopNoPendingDummy(pThis, 100 /*ms*/))
            LogRel(("drvscsiDestruct#%u: previous dummy request is still pending\n", pDrvIns->iInstance));

        int rc = RTReqQueueDestroy(pThis->hQueueRequests);
        AssertMsgRC(rc, ("Failed to destroy queue rc=%Rrc\n", rc));
        pThis->hQueueRequests = NIL_RTREQQUEUE;
    }

    /* Free the VSCSI device and LUN handle. */
    if (pThis->hVScsiDevice)
    {
        VSCSILUN hVScsiLun;
        int rc = VSCSIDeviceLunDetach(pThis->hVScsiDevice, 0, &hVScsiLun);
        AssertRC(rc);

        Assert(hVScsiLun == pThis->hVScsiLun);
        rc = VSCSILunDestroy(hVScsiLun);
        AssertRC(rc);
        rc = VSCSIDeviceDestroy(pThis->hVScsiDevice);
        AssertRC(rc);

        pThis->hVScsiDevice = NULL;
        pThis->hVScsiLun    = NULL;
    }
}
Esempio n. 2
0
/**
 * Release all locks and free the allocated memory.
 *
 * @param   pVM     The cross context VM structure.
 * @thread  The Emulation Thread.
 */
void mmR3PagePoolTerm(PVM pVM)
{
    if (pVM->mm.s.pPagePoolR3)
    {
        /*
         * Unlock all memory held by subpools and free the memory.
         * (The MM Heap will free the memory used for internal stuff.)
         */
        Assert(!pVM->mm.s.pPagePoolR3->fLow);
        PMMPAGESUBPOOL  pSubPool = pVM->mm.s.pPagePoolR3->pHead;
        while (pSubPool)
        {
            int rc = SUPR3PageFreeEx(pSubPool->pvPages, pSubPool->cPages);
            AssertMsgRC(rc, ("SUPR3PageFreeEx(%p) failed with rc=%Rrc\n", pSubPool->pvPages, rc));
            pSubPool->pvPages = NULL;

            /* next */
            pSubPool = pSubPool->pNext;
        }
        pVM->mm.s.pPagePoolR3 = NULL;
#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
        pVM->mm.s.pPagePoolR0 = NIL_RTR0PTR;
#endif
    }

    if (pVM->mm.s.pPagePoolLowR3)
    {
        /*
         * Free the memory.
         */
        Assert(pVM->mm.s.pPagePoolLowR3->fLow);
        PMMPAGESUBPOOL  pSubPool = pVM->mm.s.pPagePoolLowR3->pHead;
        while (pSubPool)
        {
            int rc = SUPR3LowFree(pSubPool->pvPages, pSubPool->cPages);
            AssertMsgRC(rc, ("SUPR3LowFree(%p) failed with rc=%d\n", pSubPool->pvPages, rc));
            pSubPool->pvPages = NULL;

            /* next */
            pSubPool = pSubPool->pNext;
        }
        pVM->mm.s.pPagePoolLowR3 = NULL;
#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
        pVM->mm.s.pPagePoolLowR0 = NIL_RTR0PTR;
#endif
    }
}
Esempio n. 3
0
/**
 * Initializes the guest object.
 */
HRESULT Guest::init(Console *aParent)
{
    LogFlowThisFunc(("aParent=%p\n", aParent));

    ComAssertRet(aParent, E_INVALIDARG);

    /* Enclose the state transition NotReady->InInit->Ready */
    AutoInitSpan autoInitSpan(this);
    AssertReturn(autoInitSpan.isOk(), E_FAIL);

    unconst(mParent) = aParent;

    /* Confirm a successful initialization when it's the case */
    autoInitSpan.setSucceeded();

    ULONG aMemoryBalloonSize;
    HRESULT ret = mParent->machine()->COMGETTER(MemoryBalloonSize)(&aMemoryBalloonSize);
    if (ret == S_OK)
        mMemoryBalloonSize = aMemoryBalloonSize;
    else
        mMemoryBalloonSize = 0;                     /* Default is no ballooning */

    BOOL fPageFusionEnabled;
    ret = mParent->machine()->COMGETTER(PageFusionEnabled)(&fPageFusionEnabled);
    if (ret == S_OK)
        mfPageFusionEnabled = fPageFusionEnabled;
    else
        mfPageFusionEnabled = false;                /* Default is no page fusion*/

    mStatUpdateInterval = 0;                    /* Default is not to report guest statistics at all */
    mCollectVMMStats = false;

    /* Clear statistics. */
    mNetStatRx = mNetStatTx = 0;
    mNetStatLastTs = RTTimeNanoTS();
    for (unsigned i = 0 ; i < GUESTSTATTYPE_MAX; i++)
        mCurrentGuestStat[i] = 0;
    mVmValidStats = pm::VMSTATMASK_NONE;

    mMagic = GUEST_MAGIC;
    int vrc = RTTimerLRCreate(&mStatTimer, 1000 /* ms */,
                              &Guest::staticUpdateStats, this);
    AssertMsgRC(vrc, ("Failed to create guest statistics update timer(%Rra)\n", vrc));

    try
    {
#ifdef VBOX_WITH_DRAG_AND_DROP
        m_pGuestDnD = new GuestDnD(this);
        AssertPtr(m_pGuestDnD);
#endif
    }
    catch(std::bad_alloc &)
    {
        return E_OUTOFMEMORY;
    }

    return S_OK;
}
Esempio n. 4
0
/**
 * The common thread main function.
 * This is called by rtThreadNativeMain().
 *
 * @returns The status code of the thread.
 *          pThread is dereference by the thread before returning!
 * @param   pThread         The thread structure.
 * @param   NativeThread    The native thread id.
 * @param   pszThreadName   The name of the thread (purely a dummy for backtrace).
 */
DECLCALLBACK(DECLHIDDEN(int)) rtThreadMain(PRTTHREADINT pThread, RTNATIVETHREAD NativeThread, const char *pszThreadName)
{
    int rc;
    NOREF(pszThreadName);
    rtThreadInsert(pThread, NativeThread);
    Log(("rtThreadMain: Starting: pThread=%p NativeThread=%RTnthrd Name=%s pfnThread=%p pvUser=%p\n",
         pThread, NativeThread, pThread->szName, pThread->pfnThread, pThread->pvUser));

    /*
     * Change the priority.
     */
    rc = rtThreadNativeSetPriority(pThread, pThread->enmType);
#ifdef IN_RING3
    AssertMsgRC(rc, ("Failed to set priority of thread %p (%RTnthrd / %s) to enmType=%d enmPriority=%d rc=%Rrc\n",
                     pThread, NativeThread, pThread->szName, pThread->enmType, g_enmProcessPriority, rc));
#else
    AssertMsgRC(rc, ("Failed to set priority of thread %p (%RTnthrd / %s) to enmType=%d rc=%Rrc\n",
                     pThread, NativeThread, pThread->szName, pThread->enmType, rc));
#endif

    /*
     * Call thread function and terminate when it returns.
     */
    rtThreadSetState(pThread, RTTHREADSTATE_RUNNING);
    rc = pThread->pfnThread(pThread, pThread->pvUser);

    /*
     * Paranoia checks for leftover resources.
     */
#ifdef RTSEMRW_STRICT
    int32_t cWrite = ASMAtomicReadS32(&pThread->cWriteLocks);
    Assert(!cWrite);
    int32_t cRead = ASMAtomicReadS32(&pThread->cReadLocks);
    Assert(!cRead);
#endif

    Log(("rtThreadMain: Terminating: rc=%d pThread=%p NativeThread=%RTnthrd Name=%s pfnThread=%p pvUser=%p\n",
         rc, pThread, NativeThread, pThread->szName, pThread->pfnThread, pThread->pvUser));
    rtThreadTerminate(pThread, rc);
    return rc;
}
Esempio n. 5
0
DECLHIDDEN(int) rtThreadNativeSetPriority(PRTTHREADINT pThread, RTTHREADTYPE enmType)
{
    Assert(pThread->Core.Key == pthread_self());
    Assert(enmType > RTTHREADTYPE_INVALID && enmType < RTTHREADTYPE_END);
    AssertMsg(g_pProcessPriority && g_pProcessPriority->aTypes[enmType].enmType == enmType,
              ("enmType=%d entry=%d\n", enmType, g_pProcessPriority->aTypes[enmType].enmType));

    /*
     * Get the current policy and params first since there are
     * opaque members in the param structure and we don't wish to
     * change the policy.
     */
    int iSchedPolicy = SCHED_OTHER;
    struct sched_param SchedParam = {0, {0,0,0,0} };
    int err = pthread_getschedparam((pthread_t)pThread->Core.Key, &iSchedPolicy, &SchedParam);
    if (!err)
    {
        int const iDesiredBasePriority = g_pProcessPriority->aTypes[enmType].iBasePriority;
        int       iPriority            = g_pProcessPriority->aTypes[enmType].iPriority;

        /*
         * First try with the given pthread priority number.
         * Then make adjustments in case we missed the desired base priority (interface
         * changed or whatever - its using an obsolete mach api).
         */
        SchedParam.sched_priority = iPriority;
        err = pthread_setschedparam((pthread_t)pThread->Core.Key, iSchedPolicy, &SchedParam);
        if (!err)
        {
            int i = 0;
            int iBasePriority = rtSchedDarwinGetBasePriority();

            while (!err && iBasePriority < iDesiredBasePriority && i++ < 256)
            {
                SchedParam.sched_priority = ++iPriority;
                err = pthread_setschedparam((pthread_t)pThread->Core.Key, iSchedPolicy, &SchedParam);
                iBasePriority = rtSchedDarwinGetBasePriority();
            }

            while (!err && iBasePriority > iDesiredBasePriority && i++ < 256)
            {
                SchedParam.sched_priority = --iPriority;
                err = pthread_setschedparam((pthread_t)pThread->Core.Key, iSchedPolicy, &SchedParam);
                iBasePriority = rtSchedDarwinGetBasePriority();
            }
        }
    }
    int rc = RTErrConvertFromErrno(err);
    AssertMsgRC(rc, ("rc=%Rrc err=%d iSchedPolicy=%d sched_priority=%d\n",
                     rc, err, iSchedPolicy, SchedParam.sched_priority));
    return rc;
}
static int drvscsihostAsyncIOLoopWakeup(PPDMDRVINS pDrvIns, PPDMTHREAD pThread)
{
    int rc;
    PDRVSCSIHOST pThis = PDMINS_2_DATA(pDrvIns, PDRVSCSIHOST);
    PRTREQ pReq;

    AssertReturn(pThis->hQueueRequests != NIL_RTREQQUEUE, VERR_INVALID_STATE);

    rc = RTReqQueueCall(pThis->hQueueRequests, &pReq, 10000 /* 10 sec. */, (PFNRT)drvscsihostAsyncIOLoopWakeupFunc, 0);
    AssertMsgRC(rc, ("Inserting request into queue failed rc=%Rrc\n"));

    return rc;
}
Esempio n. 7
0
/**
 * Uninitializes the instance and sets the ready flag to FALSE.
 * Called either from FinalRelease() or by the parent when it gets destroyed.
 */
void Guest::uninit()
{
    LogFlowThisFunc(("\n"));

    /* Enclose the state transition Ready->InUninit->NotReady */
    AutoUninitSpan autoUninitSpan(this);
    if (autoUninitSpan.uninitDone())
        return;

#ifdef VBOX_WITH_GUEST_CONTROL
    /* Scope write lock as much as possible. */
    {
        /*
         * Cleanup must be done *before* AutoUninitSpan to cancel all
         * all outstanding waits in API functions (which hold AutoCaller
         * ref counts).
         */
        AutoWriteLock alock(this COMMA_LOCKVAL_SRC_POS);

        /* Notify left over callbacks that we are about to shutdown ... */
        CallbackMapIter it;
        for (it = mCallbackMap.begin(); it != mCallbackMap.end(); it++)
        {
            int rc2 = callbackNotifyEx(it->first, VERR_CANCELLED,
                                       Guest::tr("VM is shutting down, canceling uncompleted guest requests ..."));
            AssertRC(rc2);
        }

        /* Destroy left over callback data. */
        for (it = mCallbackMap.begin(); it != mCallbackMap.end(); it++)
            callbackDestroy(it->first);

        /* Clear process map (remove all callbacks). */
        mGuestProcessMap.clear();
    }
#endif

    /* Destroy stat update timer */
    int vrc = RTTimerLRDestroy (mStatTimer);
    AssertMsgRC (vrc, ("Failed to create guest statistics "
                       "update timer(%Rra)\n", vrc));
    mStatTimer = NULL;
    mMagic     = 0;

#ifdef VBOX_WITH_DRAG_AND_DROP
    delete m_pGuestDnD;
    m_pGuestDnD = NULL;
#endif

    unconst(mParent) = NULL;
}
Esempio n. 8
0
/**
 * Uninitializes the instance and sets the ready flag to FALSE.
 * Called either from FinalRelease() or by the parent when it gets destroyed.
 */
void Guest::uninit()
{
    LogFlowThisFunc(("\n"));

    /* Enclose the state transition Ready->InUninit->NotReady */
    AutoUninitSpan autoUninitSpan(this);
    if (autoUninitSpan.uninitDone())
        return;

    /* Destroy stat update timer */
    int vrc = RTTimerLRDestroy(mStatTimer);
    AssertMsgRC(vrc, ("Failed to create guest statistics update timer(%Rra)\n", vrc));
    mStatTimer = NULL;
    mMagic     = 0;

#ifdef VBOX_WITH_GUEST_CONTROL
    LogFlowThisFunc(("Closing sessions (%RU64 total)\n",
                     mData.mGuestSessions.size()));
    GuestSessions::iterator itSessions = mData.mGuestSessions.begin();
    while (itSessions != mData.mGuestSessions.end())
    {
#ifdef DEBUG
        ULONG cRefs = itSessions->second->AddRef();
        LogFlowThisFunc(("pSession=%p, cRefs=%RU32\n", (GuestSession *)itSessions->second, cRefs > 0 ? cRefs - 1 : 0));
        itSessions->second->Release();
#endif
        itSessions->second->uninit();
        itSessions++;
    }
    mData.mGuestSessions.clear();
#endif

#ifdef VBOX_WITH_DRAG_AND_DROP
    if (m_pGuestDnD)
    {
        delete m_pGuestDnD;
        m_pGuestDnD = NULL;
    }
#endif

#ifdef VBOX_WITH_GUEST_CONTROL
    unconst(mEventSource).setNull();
#endif
    unconst(mParent) = NULL;

    LogFlowFuncLeave();
}
/**
 * Destruct a driver instance.
 *
 * Most VM resources are freed by the VM. This callback is provided so that any non-VM
 * resources can be freed correctly.
 *
 * @param   pDrvIns     The driver instance data.
 */
static DECLCALLBACK(void) drvscsihostDestruct(PPDMDRVINS pDrvIns)
{
    PDRVSCSIHOST pThis = PDMINS_2_DATA(pDrvIns, PDRVSCSIHOST);
    PDMDRV_CHECK_VERSIONS_RETURN_VOID(pDrvIns);

    RTFileClose(pThis->hDeviceFile);
    pThis->hDeviceFile = NIL_RTFILE;

    if (pThis->pszDevicePath)
    {
        MMR3HeapFree(pThis->pszDevicePath);
        pThis->pszDevicePath = NULL;
    }

    if (pThis->hQueueRequests != NIL_RTREQQUEUE)
    {
        int rc = RTReqQueueDestroy(pThis->hQueueRequests);
        AssertMsgRC(rc, ("Failed to destroy queue rc=%Rrc\n", rc));
        pThis->hQueueRequests = NIL_RTREQQUEUE;
    }

}
Esempio n. 10
0
RTDECL(int) RTSemRWReleaseWrite(RTSEMRW hRWSem)
{

    /*
     * Validate handle.
     */
    struct RTSEMRWINTERNAL *pThis = hRWSem;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);

    /*
     * Take critsect.
     */
    int rc = RTCritSectEnter(&pThis->CritSect);
    AssertRCReturn(rc, rc);

    /*
     * Check if owner.
     */
    RTNATIVETHREAD hNativeSelf = pThis->CritSect.NativeThreadOwner;
    if (pThis->hWriter != hNativeSelf)
    {
        RTCritSectLeave(&pThis->CritSect);
        AssertMsgFailed(("Not read-write owner of rwsem %p.\n", hRWSem));
        return VERR_NOT_OWNER;
    }

#ifdef RTSEMRW_STRICT
    if (pThis->cWrites > 1 || !pThis->cWriterReads) /* don't check+release if VERR_WRONG_ORDER */
    {
        int rc9 = RTLockValidatorRecExclReleaseOwner(&pThis->ValidatorWrite, pThis->cWrites == 1);
        if (RT_FAILURE(rc9))
        {
            RTCritSectLeave(&pThis->CritSect);
            return rc9;
        }
    }
#endif

    /*
     * Release ownership and remove ourselves from the writers count.
     */
    Assert(pThis->cWrites > 0);
    pThis->cWrites--;
    if (!pThis->cWrites)
    {
        if (RT_UNLIKELY(pThis->cWriterReads > 0))
        {
            pThis->cWrites++;
            RTCritSectLeave(&pThis->CritSect);
            AssertMsgFailed(("All recursive read locks need to be released prior to the final write lock! (%p)n\n", pThis));
            return VERR_WRONG_ORDER;
        }

        pThis->hWriter = NIL_RTNATIVETHREAD;
    }

    /*
     * Release the readers if no more writers waiting, otherwise the writers.
     */
    if (!pThis->cWritesWaiting)
    {
        rc = RTSemEventMultiSignal(pThis->ReadEvent);
        AssertMsgRC(rc, ("RTSemEventMultiSignal failed for rwsem %p, rc=%Rrc.\n", hRWSem, rc));
        pThis->fNeedResetReadEvent = true;
    }
    else
    {
        rc = RTSemEventSignal(pThis->WriteEvent);
        AssertMsgRC(rc, ("Failed to signal writers on rwsem %p, rc=%Rrc\n", hRWSem, rc));
    }
    RTCritSectLeave(&pThis->CritSect);

    return rc;
}
Esempio n. 11
0
/**
 * \#PF Handler callback for virtual access handler ranges. (CSAM self-modifying
 * code monitor)
 *
 * Important to realize that a physical page in a range can have aliases, and
 * for ALL and WRITE handlers these will also trigger.
 *
 * @returns VBox status code (appropriate for GC return).
 * @param   pVM         Pointer to the VM.
 * @param   uErrorCode   CPU Error code.
 * @param   pRegFrame   Trap register frame.
 * @param   pvFault     The fault address (cr2).
 * @param   pvRange     The base address of the handled virtual range.
 * @param   offRange    The offset of the access into this range.
 *                      (If it's a EIP range this is the EIP, if not it's pvFault.)
 */
VMMRCDECL(int) CSAMGCCodePageWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange)
{
    PPATMGCSTATE pPATMGCState;
    bool         fPatchCode = PATMIsPatchGCAddr(pVM, pRegFrame->eip);
    int          rc;
    PVMCPU       pVCpu = VMMGetCpu0(pVM);
    NOREF(uErrorCode);

    Assert(pVM->csam.s.cDirtyPages < CSAM_MAX_DIRTY_PAGES);

#ifdef VBOX_WITH_REM
    /* Flush the recompilers translation block cache as the guest seems to be modifying instructions. */
    REMFlushTBs(pVM);
#endif

    pPATMGCState = PATMGetGCState(pVM);
    Assert(pPATMGCState);

    Assert(pPATMGCState->fPIF || fPatchCode);
    /** When patch code is executing instructions that must complete, then we must *never* interrupt it. */
    if (!pPATMGCState->fPIF && fPatchCode)
    {
        Log(("CSAMGCCodePageWriteHandler: fPIF=0 -> stack fault in patch generated code at %08RX32!\n", pRegFrame->eip));
        /** @note there are cases when pages previously used for code are now used for stack; patch generated code will fault (pushf))
         *  Just make the page r/w and continue.
         */
        /*
         * Make this particular page R/W.
         */
        rc = PGMShwMakePageWritable(pVCpu, pvFault, PGM_MK_PG_IS_WRITE_FAULT);
        AssertMsgRC(rc, ("PGMShwModifyPage -> rc=%Rrc\n", rc));
        ASMInvalidatePage((void *)(uintptr_t)pvFault);
        return VINF_SUCCESS;
    }

    uint32_t cpl;

    if (pRegFrame->eflags.Bits.u1VM)
        cpl = 3;
    else
        cpl = (pRegFrame->ss.Sel & X86_SEL_RPL);

    Log(("CSAMGCCodePageWriteHandler: code page write at %RGv original address %RGv (cpl=%d)\n", pvFault, (RTGCUINTPTR)pvRange + offRange, cpl));

    /* If user code is modifying one of our monitored pages, then we can safely make it r/w as it's no longer being used for supervisor code. */
    if (cpl != 3)
    {
        rc = PATMRCHandleWriteToPatchPage(pVM, pRegFrame, (RTRCPTR)((RTRCUINTPTR)pvRange + offRange), 4 /** @todo */);
        if (rc == VINF_SUCCESS)
            return rc;
        if (rc == VINF_EM_RAW_EMULATE_INSTR)
        {
            STAM_COUNTER_INC(&pVM->csam.s.StatDangerousWrite);
            return VINF_EM_RAW_EMULATE_INSTR;
        }
        Assert(rc == VERR_PATCH_NOT_FOUND);
    }

    VMCPU_FF_SET(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION);

    /* Note that pvFault might be a different address in case of aliases. So use pvRange + offset instead!. */
    pVM->csam.s.pvDirtyBasePage[pVM->csam.s.cDirtyPages] = (RTRCPTR)((RTRCUINTPTR)pvRange + offRange);
    pVM->csam.s.pvDirtyFaultPage[pVM->csam.s.cDirtyPages] = (RTRCPTR)((RTRCUINTPTR)pvRange + offRange);
    if (++pVM->csam.s.cDirtyPages == CSAM_MAX_DIRTY_PAGES)
        return VINF_CSAM_PENDING_ACTION;

    /*
     * Make this particular page R/W. The VM_FF_CSAM_FLUSH_DIRTY_PAGE handler will reset it to readonly again.
     */
    Log(("CSAMGCCodePageWriteHandler: enabled r/w for page %RGv\n", pvFault));
    rc = PGMShwMakePageWritable(pVCpu, pvFault, PGM_MK_PG_IS_WRITE_FAULT);
    AssertMsgRC(rc, ("PGMShwModifyPage -> rc=%Rrc\n", rc));
    ASMInvalidatePage((void *)(uintptr_t)pvFault);

    STAM_COUNTER_INC(&pVM->csam.s.StatCodePageModified);
    return VINF_SUCCESS;
}
Esempio n. 12
0
/**
 * Initializes the guest object.
 */
HRESULT Guest::init(Console *aParent)
{
    LogFlowThisFunc(("aParent=%p\n", aParent));

    ComAssertRet(aParent, E_INVALIDARG);

    /* Enclose the state transition NotReady->InInit->Ready */
    AutoInitSpan autoInitSpan(this);
    AssertReturn(autoInitSpan.isOk(), E_FAIL);

    unconst(mParent) = aParent;

    /* Confirm a successful initialization when it's the case */
    autoInitSpan.setSucceeded();

    ULONG aMemoryBalloonSize;
    HRESULT hr = mParent->i_machine()->COMGETTER(MemoryBalloonSize)(&aMemoryBalloonSize);
    if (hr == S_OK) /** @todo r=andy SUCCEEDED? */
        mMemoryBalloonSize = aMemoryBalloonSize;
    else
        mMemoryBalloonSize = 0; /* Default is no ballooning */

    BOOL fPageFusionEnabled;
    hr = mParent->i_machine()->COMGETTER(PageFusionEnabled)(&fPageFusionEnabled);
    if (hr == S_OK) /** @todo r=andy SUCCEEDED? */
        mfPageFusionEnabled = fPageFusionEnabled;
    else
        mfPageFusionEnabled = false; /* Default is no page fusion*/

    mStatUpdateInterval = 0; /* Default is not to report guest statistics at all */
    mCollectVMMStats = false;

    /* Clear statistics. */
    mNetStatRx = mNetStatTx = 0;
    mNetStatLastTs = RTTimeNanoTS();
    for (unsigned i = 0 ; i < GUESTSTATTYPE_MAX; i++)
        mCurrentGuestStat[i] = 0;
    mVmValidStats = pm::VMSTATMASK_NONE;
    RT_ZERO(mCurrentGuestCpuUserStat);
    RT_ZERO(mCurrentGuestCpuKernelStat);
    RT_ZERO(mCurrentGuestCpuIdleStat);

    mMagic = GUEST_MAGIC;
    int vrc = RTTimerLRCreate(&mStatTimer, 1000 /* ms */,
                              &Guest::i_staticUpdateStats, this);
    AssertMsgRC(vrc, ("Failed to create guest statistics update timer (%Rrc)\n", vrc));

    hr = unconst(mEventSource).createObject();
    if (SUCCEEDED(hr))
        hr = mEventSource->init();

    mCpus = 1;

#ifdef VBOX_WITH_DRAG_AND_DROP
    try
    {
        GuestDnD::createInstance(this /* pGuest */);
        hr = unconst(mDnDSource).createObject();
        if (SUCCEEDED(hr))
            hr = mDnDSource->init(this /* pGuest */);
        if (SUCCEEDED(hr))
        {
            hr = unconst(mDnDTarget).createObject();
            if (SUCCEEDED(hr))
                hr = mDnDTarget->init(this /* pGuest */);
        }

        LogFlowFunc(("Drag and drop initializied with hr=%Rhrc\n", hr));
    }
    catch (std::bad_alloc &)
    {
        hr = E_OUTOFMEMORY;
    }
#endif

    LogFlowFunc(("hr=%Rhrc\n", hr));
    return hr;
}
Esempio n. 13
0
RTDECL(int) RTSemRWDestroy(RTSEMRW hRWSem)
{
    struct RTSEMRWINTERNAL *pThis = hRWSem;

    /*
     * Validate handle.
     */
    if (pThis == NIL_RTSEMRW)
        return VINF_SUCCESS;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);

    /*
     * Check if busy.
     */
    int rc = RTCritSectTryEnter(&pThis->CritSect);
    if (RT_SUCCESS(rc))
    {
        if (!pThis->cReads && !pThis->cWrites)
        {
            /*
             * Make it invalid and unusable.
             */
            ASMAtomicWriteU32(&pThis->u32Magic, ~RTSEMRW_MAGIC);
            pThis->cReads = ~0;

            /*
             * Do actual cleanup. None of these can now fail.
             */
            rc = RTSemEventMultiDestroy(pThis->ReadEvent);
            AssertMsgRC(rc, ("RTSemEventMultiDestroy failed! rc=%Rrc\n", rc));
            pThis->ReadEvent = NIL_RTSEMEVENTMULTI;

            rc = RTSemEventDestroy(pThis->WriteEvent);
            AssertMsgRC(rc, ("RTSemEventDestroy failed! rc=%Rrc\n", rc));
            pThis->WriteEvent = NIL_RTSEMEVENT;

            RTCritSectLeave(&pThis->CritSect);
            rc = RTCritSectDelete(&pThis->CritSect);
            AssertMsgRC(rc, ("RTCritSectDelete failed! rc=%Rrc\n", rc));

#ifdef RTSEMRW_STRICT
            RTLockValidatorRecSharedDelete(&pThis->ValidatorRead);
            RTLockValidatorRecExclDelete(&pThis->ValidatorWrite);
#endif
            RTMemFree(pThis);
            rc = VINF_SUCCESS;
        }
        else
        {
            rc = VERR_SEM_BUSY;
            RTCritSectLeave(&pThis->CritSect);
        }
    }
    else
    {
        AssertMsgRC(rc, ("RTCritSectTryEnter failed! rc=%Rrc\n", rc));
        rc = VERR_SEM_BUSY;
    }

    return rc;
}
static int drvscsihostProcessRequestOne(PDRVSCSIHOST pThis, PPDMSCSIREQUEST pRequest)
{
    int rc = VINF_SUCCESS;
    unsigned uTxDir;

    LogFlowFunc(("Entered\n"));

#ifdef DEBUG
    drvscsihostDumpScsiRequest(pRequest);
#endif

    /* We implement only one device. */
    if (pRequest->uLogicalUnit != 0)
    {
        switch (pRequest->pbCDB[0])
        {
            case SCSI_INQUIRY:
            {
                SCSIINQUIRYDATA ScsiInquiryReply;

                memset(&ScsiInquiryReply, 0, sizeof(ScsiInquiryReply));

                ScsiInquiryReply.u5PeripheralDeviceType = SCSI_INQUIRY_DATA_PERIPHERAL_DEVICE_TYPE_UNKNOWN;
                ScsiInquiryReply.u3PeripheralQualifier = SCSI_INQUIRY_DATA_PERIPHERAL_QUALIFIER_NOT_CONNECTED_NOT_SUPPORTED;
                drvscsihostScatterGatherListCopyFromBuffer(pRequest, &ScsiInquiryReply, sizeof(SCSIINQUIRYDATA));
                drvscsihostCmdOk(pRequest);
                break;
            }
            default:
                AssertMsgFailed(("Command not implemented for attached device\n"));
                drvscsiCmdError(pRequest, SCSI_SENSE_ILLEGAL_REQUEST, SCSI_ASC_NONE);
        }
    }
    else
    {
#if defined(RT_OS_LINUX)
        sg_io_hdr_t ScsiIoReq;
        sg_iovec_t  *paSG = NULL;

        /* Setup SCSI request. */
        memset(&ScsiIoReq, 0, sizeof(sg_io_hdr_t));
        ScsiIoReq.interface_id = 'S';

        if (pRequest->uDataDirection == PDMSCSIREQUESTTXDIR_UNKNOWN)
            uTxDir = drvscsihostGetTransferDirectionFromCommand(pRequest->pbCDB[0]);
        else
            uTxDir = pRequest->uDataDirection;

        if (uTxDir == PDMSCSIREQUESTTXDIR_NONE)
            ScsiIoReq.dxfer_direction = SG_DXFER_NONE;
        else if (uTxDir == PDMSCSIREQUESTTXDIR_TO_DEVICE)
            ScsiIoReq.dxfer_direction = SG_DXFER_TO_DEV;
        else if (uTxDir == PDMSCSIREQUESTTXDIR_FROM_DEVICE)
            ScsiIoReq.dxfer_direction = SG_DXFER_FROM_DEV;
        else
            AssertMsgFailed(("Invalid transfer direction %u\n", uTxDir));

        ScsiIoReq.cmd_len     = pRequest->cbCDB;
        ScsiIoReq.mx_sb_len   = pRequest->cbSenseBuffer;
        ScsiIoReq.dxfer_len   = pRequest->cbScatterGather;

        if (pRequest->cScatterGatherEntries > 0)
        {
            if (pRequest->cScatterGatherEntries == 1)
            {
                ScsiIoReq.iovec_count = 0;
                ScsiIoReq.dxferp      = pRequest->paScatterGatherHead[0].pvSeg;
            }
            else
            {
                ScsiIoReq.iovec_count = pRequest->cScatterGatherEntries;

                paSG = (sg_iovec_t *)RTMemAllocZ(pRequest->cScatterGatherEntries * sizeof(sg_iovec_t));
                AssertReturn(paSG, VERR_NO_MEMORY);

                for (unsigned i = 0; i < pRequest->cScatterGatherEntries; i++)
                {
                    paSG[i].iov_base = pRequest->paScatterGatherHead[i].pvSeg;
                    paSG[i].iov_len  = pRequest->paScatterGatherHead[i].cbSeg;
                }
                ScsiIoReq.dxferp = paSG;
            }
        }

        ScsiIoReq.cmdp    = pRequest->pbCDB;
        ScsiIoReq.sbp     = pRequest->pbSenseBuffer;
        ScsiIoReq.timeout = UINT_MAX;
        ScsiIoReq.flags  |= SG_FLAG_DIRECT_IO;

        /* Issue command. */
        rc = ioctl(RTFileToNative(pThis->hDeviceFile), SG_IO, &ScsiIoReq);
        if (rc < 0)
        {
            AssertMsgFailed(("Ioctl failed with rc=%d\n", rc));
        }

        /* Request processed successfully. */
        Log(("Command successfully processed\n"));
        if (ScsiIoReq.iovec_count > 0)
            RTMemFree(paSG);
#endif
    }
    /* Notify device that request finished. */
    rc = pThis->pDevScsiPort->pfnSCSIRequestCompleted(pThis->pDevScsiPort, pRequest, SCSI_STATUS_OK, false, VINF_SUCCESS);
    AssertMsgRC(rc, ("Notifying device above failed rc=%Rrc\n", rc));

    return rc;

}
Esempio n. 15
0
DECL_FORCE_INLINE(int) rtSemRWRequestRead(RTSEMRW hRWSem, RTMSINTERVAL cMillies, bool fInterruptible, PCRTLOCKVALSRCPOS pSrcPos)
{
    /*
     * Validate handle.
     */
    struct RTSEMRWINTERNAL *pThis = hRWSem;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);

    RTMSINTERVAL    cMilliesInitial = cMillies;
    uint64_t        tsStart = 0;
    if (cMillies != RT_INDEFINITE_WAIT && cMillies != 0)
        tsStart = RTTimeNanoTS();

#ifdef RTSEMRW_STRICT
    RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
    if (cMillies > 0)
    {
        int rc9;
        if (pThis->hWriter != NIL_RTTHREAD && pThis->hWriter == RTThreadNativeSelf())
            rc9 = RTLockValidatorRecExclCheckOrder(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, cMillies);
        else
            rc9 = RTLockValidatorRecSharedCheckOrder(&pThis->ValidatorRead, hThreadSelf, pSrcPos, cMillies);
        if (RT_FAILURE(rc9))
            return rc9;
    }
#endif

    /*
     * Take critsect.
     */
    int rc = RTCritSectEnter(&pThis->CritSect);
    if (RT_FAILURE(rc))
    {
        AssertMsgFailed(("RTCritSectEnter failed on rwsem %p, rc=%Rrc\n", hRWSem, rc));
        return rc;
    }

    /*
     * Check if the state of affairs allows read access.
     * Do not block further readers if there is a writer waiting, as
     * that will break/deadlock reader recursion.
     */
    if (    pThis->hWriter == NIL_RTNATIVETHREAD
#if 0
        && (   !pThis->cWritesWaiting
            ||  pThis->cReads)
#endif
       )
    {
        pThis->cReads++;
        Assert(pThis->cReads > 0);
#ifdef RTSEMRW_STRICT
        RTLockValidatorRecSharedAddOwner(&pThis->ValidatorRead, hThreadSelf, pSrcPos);
#endif

        RTCritSectLeave(&pThis->CritSect);
        return VINF_SUCCESS;
    }

    RTNATIVETHREAD hNativeSelf = pThis->CritSect.NativeThreadOwner;
    if (pThis->hWriter == hNativeSelf)
    {
#ifdef RTSEMRW_STRICT
        int rc9 = RTLockValidatorRecExclRecursionMixed(&pThis->ValidatorWrite, &pThis->ValidatorRead.Core, pSrcPos);
        if (RT_FAILURE(rc9))
        {
            RTCritSectLeave(&pThis->CritSect);
            return rc9;
        }
#endif

        pThis->cWriterReads++;
        Assert(pThis->cWriterReads > 0);

        RTCritSectLeave(&pThis->CritSect);
        return VINF_SUCCESS;
    }

    RTCritSectLeave(&pThis->CritSect);

    /*
     * Wait till it's ready for reading.
     */
    if (cMillies == 0)
        return VERR_TIMEOUT;

#ifndef RTSEMRW_STRICT
    RTTHREAD hThreadSelf = RTThreadSelf();
#endif
    for (;;)
    {
        if (cMillies != RT_INDEFINITE_WAIT)
        {
            int64_t tsDelta = RTTimeNanoTS() - tsStart;
            if (tsDelta >= 1000000)
            {
                tsDelta /= 1000000;
                if ((uint64_t)tsDelta < cMilliesInitial)
                    cMilliesInitial = (RTMSINTERVAL)tsDelta;
                else
                    cMilliesInitial = 1;
            }
        }
#ifdef RTSEMRW_STRICT
        rc = RTLockValidatorRecSharedCheckBlocking(&pThis->ValidatorRead, hThreadSelf, pSrcPos, true,
                                                   cMillies, RTTHREADSTATE_RW_READ, false);
        if (RT_FAILURE(rc))
            break;
#else
        RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false);
#endif
        int rcWait;
        if (fInterruptible)
            rcWait = rc = RTSemEventMultiWaitNoResume(pThis->ReadEvent, cMillies);
        else
            rcWait = rc = RTSemEventMultiWait(pThis->ReadEvent, cMillies);
        RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_READ);
        if (RT_FAILURE(rc) && rc != VERR_TIMEOUT) /* handle timeout below */
        {
            AssertMsgRC(rc, ("RTSemEventMultiWait failed on rwsem %p, rc=%Rrc\n", hRWSem, rc));
            break;
        }

        if (pThis->u32Magic != RTSEMRW_MAGIC)
        {
            rc = VERR_SEM_DESTROYED;
            break;
        }

        /*
         * Re-take critsect and repeat the check we did before the loop.
         */
        rc = RTCritSectEnter(&pThis->CritSect);
        if (RT_FAILURE(rc))
        {
            AssertMsgFailed(("RTCritSectEnter failed on rwsem %p, rc=%Rrc\n", hRWSem, rc));
            break;
        }

        if (    pThis->hWriter == NIL_RTNATIVETHREAD
#if 0
            && (   !pThis->cWritesWaiting
                ||  pThis->cReads)
#endif
           )
        {
            pThis->cReads++;
            Assert(pThis->cReads > 0);
#ifdef RTSEMRW_STRICT
            RTLockValidatorRecSharedAddOwner(&pThis->ValidatorRead, hThreadSelf, pSrcPos);
#endif

            RTCritSectLeave(&pThis->CritSect);
            return VINF_SUCCESS;
        }

        RTCritSectLeave(&pThis->CritSect);

        /*
         * Quit if the wait already timed out.
         */
        if (rcWait == VERR_TIMEOUT)
        {
            rc = VERR_TIMEOUT;
            break;
        }
    }

    /* failed */
    return rc;
}
Esempio n. 16
0
RTDECL(int) RTSemRWReleaseRead(RTSEMRW hRWSem)
{
    struct RTSEMRWINTERNAL *pThis = hRWSem;

    /*
     * Validate handle.
     */
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);

    /*
     * Take critsect.
     */
    int rc = RTCritSectEnter(&pThis->CritSect);
    if (RT_SUCCESS(rc))
    {
        if (pThis->hWriter == NIL_RTNATIVETHREAD)
        {
#ifdef RTSEMRW_STRICT
            rc = RTLockValidatorRecSharedCheckAndRelease(&pThis->ValidatorRead, NIL_RTTHREAD);
            if (RT_SUCCESS(rc))
#endif
            {
                if (RT_LIKELY(pThis->cReads > 0))
                {
                    pThis->cReads--;

                    /* Kick off a writer if appropriate. */
                    if (    pThis->cWritesWaiting > 0
                        &&  !pThis->cReads)
                    {
                        rc = RTSemEventSignal(pThis->WriteEvent);
                        AssertMsgRC(rc, ("Failed to signal writers on rwsem %p, rc=%Rrc\n", hRWSem, rc));
                    }
                }
                else
                {
                    AssertFailed();
                    rc = VERR_NOT_OWNER;
                }
            }
        }
        else
        {
            RTNATIVETHREAD hNativeSelf = pThis->CritSect.NativeThreadOwner;
            if (pThis->hWriter == hNativeSelf)
            {
                if (pThis->cWriterReads > 0)
                {
#ifdef RTSEMRW_STRICT
                    rc = RTLockValidatorRecExclUnwindMixed(&pThis->ValidatorWrite, &pThis->ValidatorRead.Core);
                    if (RT_SUCCESS(rc))
#endif
                    {
                        pThis->cWriterReads--;
                    }
                }
                else
                {
                    AssertFailed();
                    rc = VERR_NOT_OWNER;
                }
            }
            else
            {
                AssertFailed();
                rc = VERR_NOT_OWNER;
            }
        }

        RTCritSectLeave(&pThis->CritSect);
    }
    else
        AssertMsgFailed(("RTCritSectEnter failed on rwsem %p, rc=%Rrc\n", hRWSem, rc));

    return rc;
}
Esempio n. 17
0
DECL_FORCE_INLINE(int) rtSemRWRequestWrite(RTSEMRW hRWSem, RTMSINTERVAL cMillies, bool fInterruptible, PCRTLOCKVALSRCPOS pSrcPos)
{
    /*
     * Validate handle.
     */
    struct RTSEMRWINTERNAL *pThis   = hRWSem;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);

    RTMSINTERVAL    cMilliesInitial = cMillies;
    uint64_t        tsStart         = 0;
    if (cMillies != RT_INDEFINITE_WAIT && cMillies != 0)
        tsStart = RTTimeNanoTS();

#ifdef RTSEMRW_STRICT
    RTTHREAD hThreadSelf = NIL_RTTHREAD;
    if (cMillies)
    {
        hThreadSelf = RTThreadSelfAutoAdopt();
        int rc9 = RTLockValidatorRecExclCheckOrder(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, cMillies);
        if (RT_FAILURE(rc9))
            return rc9;
    }
#endif

    /*
     * Take critsect.
     */
    int rc = RTCritSectEnter(&pThis->CritSect);
    if (RT_FAILURE(rc))
    {
        AssertMsgFailed(("RTCritSectEnter failed on rwsem %p, rc=%Rrc\n", hRWSem, rc));
        return rc;
    }

    /*
     * Check if the state of affairs allows write access.
     */
    RTNATIVETHREAD hNativeSelf = pThis->CritSect.NativeThreadOwner;
    if (    !pThis->cReads
        &&  (   (   !pThis->cWrites
                 && (   !pThis->cWritesWaiting /* play fair if we can wait */
                     || !cMillies)
                )
             || pThis->hWriter == hNativeSelf
            )
       )
    {
        /*
         * Reset the reader event semaphore if necessary.
         */
        if (pThis->fNeedResetReadEvent)
        {
            pThis->fNeedResetReadEvent = false;
            rc = RTSemEventMultiReset(pThis->ReadEvent);
            AssertMsgRC(rc, ("Failed to reset readers, rwsem %p, rc=%Rrc.\n", hRWSem, rc));
        }

        pThis->cWrites++;
        pThis->hWriter = hNativeSelf;
#ifdef RTSEMRW_STRICT
        RTLockValidatorRecExclSetOwner(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, pThis->cWrites == 1);
#endif
        RTCritSectLeave(&pThis->CritSect);
        return VINF_SUCCESS;
    }

    /*
     * Signal writer presence.
     */
    if (cMillies != 0)
        pThis->cWritesWaiting++;

    RTCritSectLeave(&pThis->CritSect);

    /*
     * Wait till it's ready for writing.
     */
    if (cMillies == 0)
        return VERR_TIMEOUT;

#ifndef RTSEMRW_STRICT
    RTTHREAD hThreadSelf = RTThreadSelf();
#endif
    for (;;)
    {
        if (cMillies != RT_INDEFINITE_WAIT)
        {
            int64_t tsDelta = RTTimeNanoTS() - tsStart;
            if (tsDelta >= 1000000)
            {
                tsDelta /= 1000000;
                if ((uint64_t)tsDelta < cMilliesInitial)
                    cMilliesInitial = (RTMSINTERVAL)tsDelta;
                else
                    cMilliesInitial = 1;
            }
        }

#ifdef RTSEMRW_STRICT
        rc = RTLockValidatorRecExclCheckBlocking(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, true,
                                                 cMillies, RTTHREADSTATE_RW_WRITE, false);
        if (RT_FAILURE(rc))
            break;
#else
        RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE, false);
#endif
        int rcWait;
        if (fInterruptible)
            rcWait = rc = RTSemEventWaitNoResume(pThis->WriteEvent, cMillies);
        else
            rcWait = rc = RTSemEventWait(pThis->WriteEvent, cMillies);
        RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
        if (RT_UNLIKELY(RT_FAILURE_NP(rc) && rc != VERR_TIMEOUT)) /* timeouts are handled below */
        {
            AssertMsgRC(rc, ("RTSemEventWait failed on rwsem %p, rc=%Rrc\n", hRWSem, rc));
            break;
        }

        if (RT_UNLIKELY(pThis->u32Magic != RTSEMRW_MAGIC))
        {
            rc = VERR_SEM_DESTROYED;
            break;
        }

        /*
         * Re-take critsect and repeat the check we did prior to this loop.
         */
        rc = RTCritSectEnter(&pThis->CritSect);
        if (RT_FAILURE(rc))
        {
            AssertMsgFailed(("RTCritSectEnter failed on rwsem %p, rc=%Rrc\n", hRWSem, rc));
            break;
        }

        if (!pThis->cReads && (!pThis->cWrites || pThis->hWriter == hNativeSelf))
        {
            /*
             * Reset the reader event semaphore if necessary.
             */
            if (pThis->fNeedResetReadEvent)
            {
                pThis->fNeedResetReadEvent = false;
                rc = RTSemEventMultiReset(pThis->ReadEvent);
                AssertMsgRC(rc, ("Failed to reset readers, rwsem %p, rc=%Rrc.\n", hRWSem, rc));
            }

            pThis->cWrites++;
            pThis->hWriter = hNativeSelf;
            pThis->cWritesWaiting--;
#ifdef RTSEMRW_STRICT
            RTLockValidatorRecExclSetOwner(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, true);
#endif

            RTCritSectLeave(&pThis->CritSect);
            return VINF_SUCCESS;
        }

        RTCritSectLeave(&pThis->CritSect);

        /*
         * Quit if the wait already timed out.
         */
        if (rcWait == VERR_TIMEOUT)
        {
            rc = VERR_TIMEOUT;
            break;
        }
    }

    /*
     * Timeout/error case, clean up.
     */
    if (pThis->u32Magic == RTSEMRW_MAGIC)
    {
        RTCritSectEnter(&pThis->CritSect);
        /* Adjust this counter, whether we got the critsect or not. */
        pThis->cWritesWaiting--;
        RTCritSectLeave(&pThis->CritSect);
    }
    return rc;
}