コード例 #1
0
ファイル: SELMRC.cpp プロジェクト: LastRitter/vbox-haiku
/**
 * \#PF Virtual Handler callback for Guest write access to the Guest's own current TSS.
 *
 * @returns VBox status code (appropriate for trap handling and GC return).
 * @param   pVM         VM Handle.
 * @param   uErrorCode  CPU Error code.
 * @param   pRegFrame   Trap register frame.
 * @param   pvFault     The fault address (cr2).
 * @param   pvRange     The base address of the handled virtual range.
 * @param   offRange    The offset of the access into this range.
 *                      (If it's a EIP range this is the EIP, if not it's pvFault.)
 */
VMMRCDECL(int) selmRCGuestTSSWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange)
{
    PVMCPU pVCpu = VMMGetCpu0(pVM);

    LogFlow(("selmRCGuestTSSWriteHandler errcode=%x fault=%RGv offRange=%08x\n", (uint32_t)uErrorCode, pvFault, offRange));

    /*
     * Try emulate the access.
     */
    uint32_t cb;
    int rc = EMInterpretInstruction(pVM, pVCpu, pRegFrame, (RTGCPTR)(RTRCUINTPTR)pvFault, &cb);
    if (RT_SUCCESS(rc) && cb)
    {
        rc = VINF_SUCCESS;

        /*
         * If it's on the same page as the esp0 and ss0 fields or actually one of them,
         * then check if any of these has changed.
         */
        PCVBOXTSS pGuestTss = (PVBOXTSS)(uintptr_t)pVM->selm.s.GCPtrGuestTss;
        if (    PAGE_ADDRESS(&pGuestTss->esp0) == PAGE_ADDRESS(&pGuestTss->padding_ss0)
            &&  PAGE_ADDRESS(&pGuestTss->esp0) == PAGE_ADDRESS((uint8_t *)pGuestTss + offRange)
            &&  (    pGuestTss->esp0 !=  pVM->selm.s.Tss.esp1
                 ||  pGuestTss->ss0  != (pVM->selm.s.Tss.ss1 & ~1)) /* undo raw-r0 */
           )
        {
            Log(("selmRCGuestTSSWriteHandler: R0 stack: %RTsel:%RGv -> %RTsel:%RGv\n",
                 (RTSEL)(pVM->selm.s.Tss.ss1 & ~1), (RTGCPTR)pVM->selm.s.Tss.esp1, (RTSEL)pGuestTss->ss0, (RTGCPTR)pGuestTss->esp0));
            pVM->selm.s.Tss.esp1 = pGuestTss->esp0;
            pVM->selm.s.Tss.ss1  = pGuestTss->ss0 | 1;
            STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged);
        }
        /* Handle misaligned TSS in a safe manner (just in case). */
        else if (   offRange >= RT_UOFFSETOF(VBOXTSS, esp0)
                 && offRange < RT_UOFFSETOF(VBOXTSS, padding_ss0))
        {
            struct
            {
                uint32_t esp0;
                uint16_t ss0;
                uint16_t padding_ss0;
            } s;
            AssertCompileSize(s, 8);
            rc = selmRCReadTssBits(pVM, &s, &pGuestTss->esp0, sizeof(s));
            if (    rc == VINF_SUCCESS
                &&  (    s.esp0 !=  pVM->selm.s.Tss.esp1
                     ||  s.ss0  != (pVM->selm.s.Tss.ss1 & ~1)) /* undo raw-r0 */
               )
            {
                Log(("selmRCGuestTSSWriteHandler: R0 stack: %RTsel:%RGv -> %RTsel:%RGv [x-page]\n",
                     (RTSEL)(pVM->selm.s.Tss.ss1 & ~1), (RTGCPTR)pVM->selm.s.Tss.esp1, (RTSEL)s.ss0, (RTGCPTR)s.esp0));
                pVM->selm.s.Tss.esp1 = s.esp0;
                pVM->selm.s.Tss.ss1  = s.ss0 | 1;
                STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged);
            }
        }

        /*
         * If VME is enabled we need to check if the interrupt redirection bitmap
         * needs updating.
         */
        if (    offRange >= RT_UOFFSETOF(VBOXTSS, offIoBitmap)
            &&  (CPUMGetGuestCR4(pVCpu) & X86_CR4_VME))
        {
            if (offRange - RT_UOFFSETOF(VBOXTSS, offIoBitmap) < sizeof(pGuestTss->offIoBitmap))
            {
                uint16_t offIoBitmap = pGuestTss->offIoBitmap;
                if (offIoBitmap != pVM->selm.s.offGuestIoBitmap)
                {
                    Log(("TSS offIoBitmap changed: old=%#x new=%#x -> resync in ring-3\n", pVM->selm.s.offGuestIoBitmap, offIoBitmap));
                    VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
                    VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
                }
                else
                    Log(("TSS offIoBitmap: old=%#x new=%#x [unchanged]\n", pVM->selm.s.offGuestIoBitmap, offIoBitmap));
            }
            else
            {
                /** @todo not sure how the partial case is handled; probably not allowed */
                uint32_t offIntRedirBitmap = pVM->selm.s.offGuestIoBitmap - sizeof(pVM->selm.s.Tss.IntRedirBitmap);
                if (   offIntRedirBitmap <= offRange
                    && offIntRedirBitmap + sizeof(pVM->selm.s.Tss.IntRedirBitmap) >= offRange + cb
                    && offIntRedirBitmap + sizeof(pVM->selm.s.Tss.IntRedirBitmap) <= pVM->selm.s.cbGuestTss)
                {
                    Log(("TSS IntRedirBitmap Changed: offIoBitmap=%x offIntRedirBitmap=%x cbTSS=%x offRange=%x cb=%x\n",
                         pVM->selm.s.offGuestIoBitmap, offIntRedirBitmap, pVM->selm.s.cbGuestTss, offRange, cb));

                    /** @todo only update the changed part. */
                    for (uint32_t i = 0; i < sizeof(pVM->selm.s.Tss.IntRedirBitmap) / 8; i++)
                    {
                        rc = selmRCReadTssBits(pVM, &pVM->selm.s.Tss.IntRedirBitmap[i * 8],
                                               (uint8_t *)pGuestTss + offIntRedirBitmap + i * 8, 8);
                        if (rc != VINF_SUCCESS)
                            break;
                    }
                    STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSRedir);
                }
            }
        }

        /* Return to ring-3 for a full resync if any of the above fails... (?) */
        if (rc != VINF_SUCCESS)
        {
            VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
            VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
            if (RT_SUCCESS(rc))
                rc = VINF_SUCCESS;
        }

        STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandled);
    }
    else
    {
        Assert(RT_FAILURE(rc));
        VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
        STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSUnhandled);
        if (rc == VERR_EM_INTERPRETER)
            rc = VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT;
    }
    return rc;
}
コード例 #2
0
ファイル: vm.c プロジェクト: skarpenko/phlox
/* software page fault handler */
static status_t vm_soft_page_fault(addr_t addr, bool is_write, bool is_exec, bool is_user)
{
    vm_address_space_t *aspace;
    vm_mapping_t *mapping;
    vm_upage_t *upage;
    vm_page_t *page;
    unsigned long irqstate;
    status_t err;

    /* get faulted address space */
    if(is_kernel_address(addr)) {
        aspace = vm_get_kernel_aspace();
    } else {
        aspace = vm_get_current_user_aspace();
        if(!aspace)
            panic("vm_soft_page_fault: no user address space!\n");
    }

    /* increment address space faults counter */
    atomic_inc((atomic_t*)&aspace->faults_count);

    /** TODO: spinlocks are temporary solution here. **/

    /* acquire lock before touching address space */
    irqstate = spin_lock_irqsave(&aspace->lock);

    /* get faulted mapping */
    err = vm_aspace_get_mapping(aspace, addr, &mapping);
    if(err != NO_ERROR)
        panic("vm_soft_page_fault: can't get mapping at address %x, err = %x!\n", addr, err);

    /* this page fault handler deals only with mapped objects */
    if(mapping->type != VM_MAPPING_TYPE_OBJECT)
        panic("vm_soft_page_fault: wrong mapping type!\n");

    /* lock mapped object */
    spin_lock(&mapping->object->lock);

    /* get universal page for mapping its data */
    err = vm_object_get_or_add_upage(mapping->object,
                                     addr - mapping->start + mapping->offset,
                                     &upage);
    if(err != NO_ERROR)
        panic("vm_soft_page_fault: can't get upage, err = %x!\n", err);

    /* allocate new physical page or just map existing page */
    if(upage->state == VM_UPAGE_STATE_UNWIRED) {
        /* upage is not wired with physical page.
         * so... allocate new one.
         */
        page = vm_page_alloc(VM_PAGE_STATE_CLEAR);
        if(page == NULL)
           panic("vm_soft_page_fault: out of physical memory!\n");
        /* stick physical page into upage */
        upage->state = VM_UPAGE_STATE_RESIDENT;
        upage->ppn = page->ppn;
    } else if(upage->state == VM_UPAGE_STATE_RESIDENT) {
        /* upage has resident physical page.
         * just get it for further mapping.
         */
        page = vm_page_lookup(upage->ppn);
        if(page == NULL)
            panic("vm_soft_page_fault: wrong physical page number!\n");
    } else {
        /* all other upage states is not supported for now */
        panic("vm_soft_page_fault: invalid universal page state!\n");
        page = NULL; /* keep compiler happy */
    }

    /* now unlock object */
    spin_unlock(&mapping->object->lock);

    /* ... and lock translation map */
    aspace->tmap.ops->lock(&aspace->tmap);

    /* map page into address space */
    aspace->tmap.ops->map(&aspace->tmap, addr, PAGE_ADDRESS(page->ppn), mapping->protect);

    /* unlock translation map */
    aspace->tmap.ops->unlock(&aspace->tmap);

    /* .. and finally unlock address space */
    spin_unlock_irqrstor(&aspace->lock, irqstate);

    /* return address space to the kernel */
    vm_put_aspace(aspace);

    /* page fault handled successfully */
    return NO_ERROR;
}
コード例 #3
0
ファイル: PATMGuest.cpp プロジェクト: bayasist/vbox
/**
 * Check Windows XP sysenter heuristics and install patch
 *
 * @returns VBox status code.
 * @param   pVM         Pointer to the VM.
 * @param   pInstrGC    GC Instruction pointer for sysenter
 * @param   pPatchRec   Patch structure
 *
 */
int PATMPatchSysenterXP(PVM pVM, RTGCPTR32 pInstrGC, PPATMPATCHREC pPatchRec)
{
    PPATCHINFO  pPatch = &pPatchRec->patch;
    uint8_t     uTemp[16];
    RTGCPTR32   lpfnKiFastSystemCall, lpfnKiIntSystemCall = 0; /* (initializing it to shut up warning.) */
    int         rc, i;
    PVMCPU      pVCpu = VMMGetCpu0(pVM);

    Assert(sizeof(uTemp) > sizeof(uFnKiIntSystemCall));
    Assert(sizeof(uTemp) > sizeof(uFnKiFastSystemCall));

    /* Guest OS specific patch; check heuristics first */

    /* check the epilog of KiFastSystemCall */
    lpfnKiFastSystemCall = pInstrGC - 2;
    rc = PGMPhysSimpleReadGCPtr(pVCpu, uTemp, lpfnKiFastSystemCall, sizeof(uFnKiFastSystemCall));
    if (    RT_FAILURE(rc)
        ||  memcmp(uFnKiFastSystemCall, uTemp, sizeof(uFnKiFastSystemCall)))
    {
        return VERR_PATCHING_REFUSED;
    }

    /* Now search for KiIntSystemCall */
    for (i=0;i<64;i++)
    {
        rc = PGMPhysSimpleReadGCPtr(pVCpu, uTemp, pInstrGC + i, sizeof(uFnKiIntSystemCall));
        if(RT_FAILURE(rc))
        {
            break;
        }
        if(!memcmp(uFnKiIntSystemCall, uTemp, sizeof(uFnKiIntSystemCall)))
        {
            lpfnKiIntSystemCall = pInstrGC + i;
            /* Found it! */
            break;
        }
    }
    if (i == 64)
    {
        Log(("KiIntSystemCall not found!!\n"));
        return VERR_PATCHING_REFUSED;
    }

    if (PAGE_ADDRESS(lpfnKiFastSystemCall) != PAGE_ADDRESS(lpfnKiIntSystemCall))
    {
        Log(("KiFastSystemCall and KiIntSystemCall not in the same page!!\n"));
        return VERR_PATCHING_REFUSED;
    }

    // make a copy of the guest code bytes that will be overwritten
    rc = PGMPhysSimpleReadGCPtr(pVCpu, pPatch->aPrivInstr, pPatch->pPrivInstrGC, SIZEOF_NEARJUMP32);
    AssertRC(rc);

    /* Now we simply jump from the fast version to the 'old and slow' system call */
    uTemp[0] = 0xE9;
    *(RTGCPTR32 *)&uTemp[1] = lpfnKiIntSystemCall - (pInstrGC + SIZEOF_NEARJUMP32);
    rc = PGMPhysSimpleDirtyWriteGCPtr(pVCpu, pInstrGC, uTemp, SIZEOF_NEARJUMP32);
    if (RT_FAILURE(rc))
    {
        Log(("PGMPhysSimpleDirtyWriteGCPtr failed with rc=%Rrc!!\n", rc));
        return VERR_PATCHING_REFUSED;
    }

#ifdef LOG_ENABLED
    Log(("Sysenter Patch code ----------------------------------------------------------\n"));
    PATMP2GLOOKUPREC cacheRec;
    RT_ZERO(cacheRec);
    cacheRec.pPatch = pPatch;

    patmr3DisasmCodeStream(pVM, pInstrGC, pInstrGC, patmr3DisasmCallback, &cacheRec);
    /* Free leftover lock if any. */
    if (cacheRec.Lock.pvMap)
        PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
    Log(("Sysenter Patch code ends -----------------------------------------------------\n"));
#endif

    pPatch->uState = PATCH_ENABLED;
    return VINF_SUCCESS;
}
コード例 #4
0
csamCodePageWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
                         PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
{
    Log(("csamCodePageWriteHandler: write to %RGv LB %zu\n", GCPtr, cbBuf));
    Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);
    Assert(VMCPU_IS_EMT(pVCpu));

    /*
     * Check if it's a dummy write that doesn't change anything.
     */
    if (   PAGE_ADDRESS(pvPtr) == PAGE_ADDRESS((uintptr_t)pvPtr + cbBuf - 1)
        && !memcmp(pvPtr, pvBuf, cbBuf))
    {
        Log(("csamCodePageWriteHandler: dummy write -> ignore\n"));
        return VINF_PGM_HANDLER_DO_DEFAULT;
    }

#ifdef IN_RING3
    /*
     * Ring-3: Do proper handling.
     */
    int rc = PATMR3PatchWrite(pVM, GCPtr, (uint32_t)cbBuf);
    AssertRC(rc);
    return VINF_PGM_HANDLER_DO_DEFAULT;

#else
    /*
     * Raw-mode: Try avoid needing to go to ring-3 (same as csamRCCodePageWritePfHandler).
     */
    uint32_t     const cpl            = CPUMGetGuestCPL(pVCpu);
    bool         const fPatchCode     = PATMIsPatchGCAddr(pVM, CPUMGetGuestRIP(pVCpu));
    PPATMGCSTATE       pPATMGCState   = PATMGetGCState(pVM);

    Assert(pVM->csam.s.cDirtyPages < CSAM_MAX_DIRTY_PAGES);
    Assert(pPATMGCState);
    Assert(pPATMGCState->fPIF || fPatchCode);

# ifdef VBOX_WITH_REM
    /* Flush the recompilers translation block cache as the guest seems to be modifying instructions. */
    /** @todo a bit overkill?? */
    REMFlushTBs(pVM);
# endif

    /*
     * When patch code is executing instructions that must complete, then we
     * must *never* interrupt it.
     */
    if (!pPATMGCState->fPIF && fPatchCode)
    {
        Log(("csamRCCodePageWriteHandler: fPIF=0 -> stack fault in patch generated code at %08RX32!\n", CPUMGetGuestRIP(pVCpu)));
        return VINF_PGM_HANDLER_DO_DEFAULT;
    }

    Log(("csamRCCodePageWriteHandler: code page write at %RGv (cpl=%d)\n", GCPtr, cpl));

    /*
     * If user code is modifying one of our monitored pages, then we can safely
     * write to it as it's no longer being used for supervisor code.
     */
    if (cpl != 3)
    {
        VBOXSTRICTRC rcStrict = PATMRCHandleWriteToPatchPage(pVM, NULL /* pRegFrame = no interpret */, (RTRCPTR)GCPtr, cbBuf);
        if (   rcStrict == VINF_PGM_HANDLER_DO_DEFAULT
            || rcStrict == VINF_SUCCESS)
            return rcStrict;
        if (rcStrict == VINF_EM_RAW_EMULATE_INSTR)
        {
            STAM_COUNTER_INC(&pVM->csam.s.StatDangerousWrite);
            return VINF_EM_RAW_EMULATE_INSTR;
        }
        Assert(rcStrict == VERR_PATCH_NOT_FOUND);
    }

    /*
     * Schedule ring-3 activity.
     * Note that GCPtr might be a different address in case of aliases.  So,
     * take down both alternatives.
     */
    VMCPU_FF_SET(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION);
    pVM->csam.s.pvDirtyBasePage[pVM->csam.s.cDirtyPages]  = (RTRCPTR)GCPtr;
    pVM->csam.s.pvDirtyFaultPage[pVM->csam.s.cDirtyPages] = (RTRCPTR)GCPtr;
    if (++pVM->csam.s.cDirtyPages == CSAM_MAX_DIRTY_PAGES)
        return VINF_CSAM_PENDING_ACTION;

    /*
     * Continue with the write. The VM_FF_CSAM_FLUSH_DIRTY_PAGE handler will reset it to readonly again.
     */
    Log(("csamRCCodePageWriteHandler: enabled r/w for page %RGv (%RGv)\n", GCPtr, GCPtr));
    STAM_COUNTER_INC(&pVM->csam.s.StatCodePageModified);
    return VINF_PGM_HANDLER_DO_DEFAULT;
#endif
}