示例#1
0
/**
 * Rendezvous callback that will be called once.
 *
 * @returns VBox strict status code.
 * @param   pVM                 Pointer to the VM.
 * @param   pVCpu               Pointer to the VMCPU of the calling EMT.
 * @param   pvUser              Pointer to a VMCPUID with the requester's ID.
 */
static DECLCALLBACK(VBOXSTRICTRC) pgmR3SharedModuleRegRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
{
    VMCPUID idCpu = *(VMCPUID *)pvUser;

    /* Execute on the VCPU that issued the original request to make sure we're in the right cr3 context. */
    if (pVCpu->idCpu != idCpu)
    {
        Assert(pVM->cCpus > 1);
        return VINF_SUCCESS;
    }


    /* Flush all pending handy page operations before changing any shared page assignments. */
    int rc = PGMR3PhysAllocateHandyPages(pVM);
    AssertRC(rc);

    /*
     * Lock it here as we can't deal with busy locks in this ring-0 path.
     */
    LogFlow(("pgmR3SharedModuleRegRendezvous: start (%d)\n", pVM->pgm.s.cSharedPages));

    pgmLock(pVM);
    pgmR3PhysAssertSharedPageChecksums(pVM);
    rc = GMMR3CheckSharedModules(pVM);
    pgmR3PhysAssertSharedPageChecksums(pVM);
    pgmUnlock(pVM);
    AssertLogRelRC(rc);

    LogFlow(("pgmR3SharedModuleRegRendezvous: done (%d)\n", pVM->pgm.s.cSharedPages));
    return rc;
}
示例#2
0
/**
 * Yield the critical section if someone is waiting on it.
 *
 * When yielding, we'll leave the critical section and try to make sure the
 * other waiting threads get a chance of entering before we reclaim it.
 *
 * @retval  true if yielded.
 * @retval  false if not yielded.
 * @param   pCritSect           The critical section.
 */
VMMR3DECL(bool) PDMR3CritSectYield(PPDMCRITSECT pCritSect)
{
    AssertPtrReturn(pCritSect, false);
    AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, false);
    Assert(pCritSect->s.Core.NativeThreadOwner == RTThreadNativeSelf());
    Assert(!(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP));

    /* No recursion allowed here. */
    int32_t const cNestings = pCritSect->s.Core.cNestings;
    AssertReturn(cNestings == 1, false);

    int32_t const cLockers  = ASMAtomicReadS32(&pCritSect->s.Core.cLockers);
    if (cLockers < cNestings)
        return false;

#ifdef PDMCRITSECT_STRICT
    RTLOCKVALSRCPOS const SrcPos = pCritSect->s.Core.pValidatorRec->SrcPos;
#endif
    PDMCritSectLeave(pCritSect);

    /*
     * If we're lucky, then one of the waiters has entered the lock already.
     * We spin a little bit in hope for this to happen so we can avoid the
     * yield detour.
     */
    if (ASMAtomicUoReadS32(&pCritSect->s.Core.cNestings) == 0)
    {
        int cLoops = 20;
        while (   cLoops > 0
               && ASMAtomicUoReadS32(&pCritSect->s.Core.cNestings) == 0
               && ASMAtomicUoReadS32(&pCritSect->s.Core.cLockers)  >= 0)
        {
            ASMNopPause();
            cLoops--;
        }
        if (cLoops == 0)
            RTThreadYield();
    }

#ifdef PDMCRITSECT_STRICT
    int rc = PDMCritSectEnterDebug(pCritSect, VERR_IGNORED,
                                   SrcPos.uId, SrcPos.pszFile, SrcPos.uLine, SrcPos.pszFunction);
#else
    int rc = PDMCritSectEnter(pCritSect, VERR_IGNORED);
#endif
    AssertLogRelRC(rc);
    return true;
}
示例#3
0
/**
 * Rendezvous callback that will be called once.
 *
 * @returns VBox strict status code.
 * @param   pVM                 Pointer to the VM.
 * @param   pVCpu               Pointer to the VMCPU of the calling EMT.
 * @param   pvUser              The new g_aHaltMethods index.
 */
static DECLCALLBACK(VBOXSTRICTRC) vmR3SetHaltMethodCallback(PVM pVM, PVMCPU pVCpu, void *pvUser)
{
    PUVM        pUVM = pVM->pUVM;
    uintptr_t   i    = (uintptr_t)pvUser;
    Assert(i < RT_ELEMENTS(g_aHaltMethods));
    NOREF(pVCpu);

    /*
     * Terminate the old one.
     */
    if (    pUVM->vm.s.enmHaltMethod != VMHALTMETHOD_INVALID
        &&  g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnTerm)
    {
        g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnTerm(pUVM);
        pUVM->vm.s.enmHaltMethod = VMHALTMETHOD_INVALID;
    }

    /* Assert that the failure fallback is where we expect. */
    Assert(g_aHaltMethods[0].enmHaltMethod == VMHALTMETHOD_BOOTSTRAP);
    Assert(!g_aHaltMethods[0].pfnTerm && !g_aHaltMethods[0].pfnInit);

    /*
     * Init the new one.
     */
    int rc = VINF_SUCCESS;
    memset(&pUVM->vm.s.Halt, 0, sizeof(pUVM->vm.s.Halt));
    if (g_aHaltMethods[i].pfnInit)
    {
        rc = g_aHaltMethods[i].pfnInit(pUVM);
        if (RT_FAILURE(rc))
        {
            /* Fall back on the bootstrap method. This requires no
               init/term (see assertion above), and will always work. */
            AssertLogRelRC(rc);
            i = 0;
        }
    }

    /*
     * Commit it.
     */
    pUVM->vm.s.enmHaltMethod = g_aHaltMethods[i].enmHaltMethod;
    ASMAtomicWriteU32(&pUVM->vm.s.iHaltMethod, i);

    return rc;
}
示例#4
0
/**
 * Frees allocated pages, for bailing out on failure.
 *
 * This will not call VMSetError on failure but will use AssertLogRel instead.
 *
 * @param   pVM         The cross context VM structure.
 * @param   pAllocReq   The allocation request to undo.
 */
GMMR3DECL(void) GMMR3FreeAllocatedPages(PVM pVM, GMMALLOCATEPAGESREQ const *pAllocReq)
{
    uint32_t cb = RT_OFFSETOF(GMMFREEPAGESREQ, aPages[pAllocReq->cPages]);
    PGMMFREEPAGESREQ pReq = (PGMMFREEPAGESREQ)RTMemTmpAllocZ(cb);
    AssertLogRelReturnVoid(pReq);

    pReq->Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
    pReq->Hdr.cbReq = cb;
    pReq->enmAccount = pAllocReq->enmAccount;
    pReq->cPages = pAllocReq->cPages;
    uint32_t iPage = pAllocReq->cPages;
    while (iPage-- > 0)
    {
        Assert(pAllocReq->aPages[iPage].idPage != NIL_GMM_PAGEID);
        pReq->aPages[iPage].idPage = pAllocReq->aPages[iPage].idPage;
    }

    int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_FREE_PAGES, 0, &pReq->Hdr);
    AssertLogRelRC(rc);

    RTMemTmpFree(pReq);
}
示例#5
0
/**
 * The emulation thread main function, with Virtual CPU ID for debugging.
 *
 * @returns Thread exit code.
 * @param   ThreadSelf  The handle to the executing thread.
 * @param   pUVCpu      Pointer to the user mode per-VCpu structure.
 * @param   idCpu       The virtual CPU ID, for backtrace purposes.
 */
int vmR3EmulationThreadWithId(RTTHREAD ThreadSelf, PUVMCPU pUVCpu, VMCPUID idCpu)
{
    PUVM    pUVM = pUVCpu->pUVM;
    int     rc;

    AssertReleaseMsg(VALID_PTR(pUVM) && pUVM->u32Magic == UVM_MAGIC,
                     ("Invalid arguments to the emulation thread!\n"));

    rc = RTTlsSet(pUVM->vm.s.idxTLS, pUVCpu);
    AssertReleaseMsgRCReturn(rc, ("RTTlsSet %x failed with %Rrc\n", pUVM->vm.s.idxTLS, rc), rc);

    if (   pUVM->pVmm2UserMethods
        && pUVM->pVmm2UserMethods->pfnNotifyEmtInit)
        pUVM->pVmm2UserMethods->pfnNotifyEmtInit(pUVM->pVmm2UserMethods, pUVM, pUVCpu);

    /*
     * The request loop.
     */
    rc = VINF_SUCCESS;
    Log(("vmR3EmulationThread: Emulation thread starting the days work... Thread=%#x pUVM=%p\n", ThreadSelf, pUVM));
    VMSTATE enmBefore = VMSTATE_CREATED; /* (only used for logging atm.) */
    for (;;)
    {
        /*
         * During early init there is no pVM, so make a special path
         * for that to keep things clearly separate.
         */
        if (!pUVM->pVM)
        {
            /*
             * Check for termination first.
             */
            if (pUVM->vm.s.fTerminateEMT)
            {
                rc = VINF_EM_TERMINATE;
                break;
            }

            /*
             * Only the first VCPU may initialize the VM during early init
             * and must therefore service all VMCPUID_ANY requests.
             * See also VMR3Create
             */
            if (    (pUVM->vm.s.pNormalReqs || pUVM->vm.s.pPriorityReqs)
                &&  pUVCpu->idCpu == 0)
            {
                /*
                 * Service execute in any EMT request.
                 */
                rc = VMR3ReqProcessU(pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
                Log(("vmR3EmulationThread: Req rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), pUVM->pVM ? VMR3GetStateName(pUVM->pVM->enmVMState) : "CREATING"));
            }
            else if (pUVCpu->vm.s.pNormalReqs || pUVCpu->vm.s.pPriorityReqs)
            {
                /*
                 * Service execute in specific EMT request.
                 */
                rc = VMR3ReqProcessU(pUVM, pUVCpu->idCpu, false /*fPriorityOnly*/);
                Log(("vmR3EmulationThread: Req (cpu=%u) rc=%Rrc, VM state %s -> %s\n", pUVCpu->idCpu, rc, VMR3GetStateName(enmBefore), pUVM->pVM ? VMR3GetStateName(pUVM->pVM->enmVMState) : "CREATING"));
            }
            else
            {
                /*
                 * Nothing important is pending, so wait for something.
                 */
                rc = VMR3WaitU(pUVCpu);
                if (RT_FAILURE(rc))
                {
                    AssertLogRelMsgFailed(("VMR3WaitU failed with %Rrc\n", rc));
                    break;
                }
            }
        }
        else
        {
            /*
             * Pending requests which needs servicing?
             *
             * We check for state changes in addition to status codes when
             * servicing requests. (Look after the ifs.)
             */
            PVM pVM = pUVM->pVM;
            enmBefore = pVM->enmVMState;
            if (pUVM->vm.s.fTerminateEMT)
            {
                rc = VINF_EM_TERMINATE;
                break;
            }

            if (VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS))
            {
                rc = VMMR3EmtRendezvousFF(pVM, &pVM->aCpus[idCpu]);
                Log(("vmR3EmulationThread: Rendezvous rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState)));
            }
            else if (pUVM->vm.s.pNormalReqs || pUVM->vm.s.pPriorityReqs)
            {
                /*
                 * Service execute in any EMT request.
                 */
                rc = VMR3ReqProcessU(pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
                Log(("vmR3EmulationThread: Req rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState)));
            }
            else if (pUVCpu->vm.s.pNormalReqs || pUVCpu->vm.s.pPriorityReqs)
            {
                /*
                 * Service execute in specific EMT request.
                 */
                rc = VMR3ReqProcessU(pUVM, pUVCpu->idCpu, false /*fPriorityOnly*/);
                Log(("vmR3EmulationThread: Req (cpu=%u) rc=%Rrc, VM state %s -> %s\n", pUVCpu->idCpu, rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState)));
            }
            else if (VM_FF_ISSET(pVM, VM_FF_DBGF))
            {
                /*
                 * Service the debugger request.
                 */
                rc = DBGFR3VMMForcedAction(pVM);
                Log(("vmR3EmulationThread: Dbg rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState)));
            }
            else if (VM_FF_TESTANDCLEAR(pVM, VM_FF_RESET))
            {
                /*
                 * Service a delayed reset request.
                 */
                rc = VMR3Reset(pVM);
                VM_FF_CLEAR(pVM, VM_FF_RESET);
                Log(("vmR3EmulationThread: Reset rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState)));
            }
            else
            {
                /*
                 * Nothing important is pending, so wait for something.
                 */
                rc = VMR3WaitU(pUVCpu);
                if (RT_FAILURE(rc))
                {
                    AssertLogRelMsgFailed(("VMR3WaitU failed with %Rrc\n", rc));
                    break;
                }
            }

            /*
             * Check for termination requests, these have extremely high priority.
             */
            if (    rc == VINF_EM_TERMINATE
                ||  pUVM->vm.s.fTerminateEMT)
                break;
        }

        /*
         * Some requests (both VMR3Req* and the DBGF) can potentially resume
         * or start the VM, in that case we'll get a change in VM status
         * indicating that we're now running.
         */
        if (    RT_SUCCESS(rc)
            &&  pUVM->pVM)
        {
            PVM     pVM   = pUVM->pVM;
            PVMCPU  pVCpu = &pVM->aCpus[idCpu];
            if (    pVM->enmVMState == VMSTATE_RUNNING
                &&  VMCPUSTATE_IS_STARTED(VMCPU_GET_STATE(pVCpu)))
            {
                rc = EMR3ExecuteVM(pVM, pVCpu);
                Log(("vmR3EmulationThread: EMR3ExecuteVM() -> rc=%Rrc, enmVMState=%d\n", rc, pVM->enmVMState));
                if (EMGetState(pVCpu) == EMSTATE_GURU_MEDITATION)
                    vmR3SetGuruMeditation(pVM);
            }
        }

    } /* forever */


    /*
     * Cleanup and exit.
     */
    Log(("vmR3EmulationThread: Terminating emulation thread! Thread=%#x pUVM=%p rc=%Rrc enmBefore=%d enmVMState=%d\n",
         ThreadSelf, pUVM, rc, enmBefore, pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_TERMINATED));
    if (   idCpu == 0
        && pUVM->pVM)
    {
        PVM pVM = pUVM->pVM;
        vmR3SetTerminated(pVM);
        pUVM->pVM = NULL;

        /** @todo SMP: This isn't 100% safe. We should wait for the other
         *        threads to finish before destroy the VM. */
        int rc2 = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_GVMM_DESTROY_VM, 0, NULL);
        AssertLogRelRC(rc2);
    }

    if (   pUVM->pVmm2UserMethods
        && pUVM->pVmm2UserMethods->pfnNotifyEmtTerm)
        pUVM->pVmm2UserMethods->pfnNotifyEmtTerm(pUVM->pVmm2UserMethods, pUVM, pUVCpu);

    pUVCpu->vm.s.NativeThreadEMT = NIL_RTNATIVETHREAD;
    Log(("vmR3EmulationThread: EMT is terminated.\n"));
    return rc;
}
/**
 * Populate DBGF_AS_RC with PATM symbols.
 *
 * Called by dbgfR3AsLazyPopulate when DBGF_AS_RC or DBGF_AS_RC_AND_GC_GLOBAL is
 * accessed for the first time.
 *
 * @param   pVM         The cross context VM structure.
 * @param   hDbgAs      The DBGF_AS_RC address space handle.
 */
VMMR3_INT_DECL(void) PATMR3DbgPopulateAddrSpace(PVM pVM, RTDBGAS hDbgAs)
{
    AssertReturnVoid(VM_IS_RAW_MODE_ENABLED(pVM));

    /*
     * Add a fake debug module for the PATMGCSTATE structure.
     */
    RTDBGMOD hDbgMod;
    int rc = RTDbgModCreate(&hDbgMod, "patmgcstate", sizeof(PATMGCSTATE), 0 /*fFlags*/);
    if (RT_SUCCESS(rc))
    {
        ADD_MEMBER(hDbgMod, PATMGCSTATE, uVMFlags,                  "uVMFlags");
        ADD_MEMBER(hDbgMod, PATMGCSTATE, uPendingAction,            "uPendingAction");
        ADD_MEMBER(hDbgMod, PATMGCSTATE, uPatchCalls,               "uPatchCalls");
        ADD_MEMBER(hDbgMod, PATMGCSTATE, uScratch,                  "uScratch");
        ADD_MEMBER(hDbgMod, PATMGCSTATE, uIretEFlags,               "uIretEFlags");
        ADD_MEMBER(hDbgMod, PATMGCSTATE, uIretCS,                   "uIretCS");
        ADD_MEMBER(hDbgMod, PATMGCSTATE, uIretEIP,                  "uIretEIP");
        ADD_MEMBER(hDbgMod, PATMGCSTATE, Psp,                       "Psp");
        ADD_MEMBER(hDbgMod, PATMGCSTATE, fPIF,                      "fPIF");
        ADD_MEMBER(hDbgMod, PATMGCSTATE, GCPtrInhibitInterrupts,    "GCPtrInhibitInterrupts");
        ADD_MEMBER(hDbgMod, PATMGCSTATE, GCCallPatchTargetAddr,     "GCCallPatchTargetAddr");
        ADD_MEMBER(hDbgMod, PATMGCSTATE, GCCallReturnAddr,          "GCCallReturnAddr");
        ADD_MEMBER(hDbgMod, PATMGCSTATE, Restore.uEAX,              "Restore.uEAX");
        ADD_MEMBER(hDbgMod, PATMGCSTATE, Restore.uECX,              "Restore.uECX");
        ADD_MEMBER(hDbgMod, PATMGCSTATE, Restore.uEDI,              "Restore.uEDI");
        ADD_MEMBER(hDbgMod, PATMGCSTATE, Restore.eFlags,            "Restore.eFlags");
        ADD_MEMBER(hDbgMod, PATMGCSTATE, Restore.uFlags,            "Restore.uFlags");

        rc = RTDbgAsModuleLink(hDbgAs, hDbgMod, pVM->patm.s.pGCStateGC, 0 /*fFlags*/);
        AssertLogRelRC(rc);
        RTDbgModRelease(hDbgMod);
    }

    /*
     * Add something for the stats so we get some kind of symbols for
     * references to them while disassembling patches.
     */
    rc = RTDbgModCreate(&hDbgMod, "patmstats", PATM_STAT_MEMSIZE, 0 /*fFlags*/);
    if (RT_SUCCESS(rc))
    {
        ADD_FUNC(hDbgMod, pVM->patm.s.pStatsGC, pVM->patm.s.pStatsGC, PATM_STAT_MEMSIZE, "PATMMemStatsStart");

        rc = RTDbgAsModuleLink(hDbgAs, hDbgMod, pVM->patm.s.pStatsGC, 0 /*fFlags*/);
        AssertLogRelRC(rc);
        RTDbgModRelease(hDbgMod);
    }

    /*
     * Add a fake debug module for the patches and stack.
     */
    rc = RTDbgModCreate(&hDbgMod, "patches", pVM->patm.s.cbPatchMem + PATM_STACK_TOTAL_SIZE + PAGE_SIZE, 0 /*fFlags*/);
    if (RT_SUCCESS(rc))
    {
        pVM->patm.s.hDbgModPatchMem = hDbgMod;
        patmR3DbgAddPatches(pVM, hDbgMod);

        rc = RTDbgAsModuleLink(hDbgAs, hDbgMod, pVM->patm.s.pPatchMemGC, 0 /*fFlags*/);
        AssertLogRelRC(rc);
    }
}