Esempio n. 1
0
/**
 * Disables all host calls, except certain fatal ones.
 *
 * @param   pVCpu               The cross context virtual CPU structure of the calling EMT.
 * @thread  EMT.
 */
VMMRZDECL(void) VMMRZCallRing3Disable(PVMCPU pVCpu)
{
    VMCPU_ASSERT_EMT(pVCpu);
#if defined(LOG_ENABLED) && defined(IN_RING0)
    RTCCUINTREG fFlags = ASMIntDisableFlags(); /* preemption consistency. */
#endif

    Assert(pVCpu->vmm.s.cCallRing3Disabled < 16);
    if (ASMAtomicUoIncU32(&pVCpu->vmm.s.cCallRing3Disabled) == 1)
    {
        /** @todo it might make more sense to just disable logging here, then we
         * won't flush away important bits... but that goes both ways really. */
#ifdef IN_RC
        pVCpu->pVMRC->vmm.s.fRCLoggerFlushingDisabled = true;
#else
# ifdef LOG_ENABLED
        if (pVCpu->vmm.s.pR0LoggerR0)
            pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
# endif
#endif
    }

#if defined(LOG_ENABLED) && defined(IN_RING0)
    ASMSetFlags(fFlags);
#endif
}
Esempio n. 2
0
RTDECL(void) RTThreadPreemptDisable(PRTTHREADPREEMPTSTATE pState)
{
    AssertPtr(pState);
    Assert(pState->u32Reserved == 0);
    pState->u32Reserved = 42;

    /*
     * Disable to prevent preemption while we grab the per-cpu spin lock.
     * Note! Only take the lock on the first call or we end up spinning for ever.
     */
    RTCCUINTREG fSavedFlags = ASMIntDisableFlags();
    RTCPUID     idCpu       = RTMpCpuId();
    if (RT_UNLIKELY(idCpu < RT_ELEMENTS(g_aPreemptHacks)))
    {
        Assert(g_aPreemptHacks[idCpu].cRecursion < UINT32_MAX / 2);
        if (++g_aPreemptHacks[idCpu].cRecursion == 1)
        {
            lck_spin_t *pSpinLock = g_aPreemptHacks[idCpu].pSpinLock;
            if (pSpinLock)
                lck_spin_lock(pSpinLock);
            else
                AssertFailed();
        }
    }
    ASMSetFlags(fSavedFlags);
    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    RT_ASSERT_PREEMPT_CPUID_DISABLE(pState);
}
Esempio n. 3
0
VOID EFIAPI
DebugPrint(IN UINTN ErrorLevel, IN CONST CHAR8 *Format, ...)
{
    CHAR8       szBuf[256];
    VA_LIST     va;
    UINTN       cch;
    RTCCUINTREG SavedFlags;

    /* No pool noise, please. */
    if (ErrorLevel == DEBUG_POOL)
        return;

    VA_START(va, Format);
    cch = AsciiVSPrint(szBuf, sizeof(szBuf), Format, va);
    VA_END(va);

    /* make sure it's terminated and doesn't end with a newline */
    if (cch >= sizeof(szBuf))
        cch = sizeof(szBuf) - 1;
    while (cch > 0 && (szBuf[cch - 1] == '\n' || szBuf[cch - 1] == '\r'))
        cch--;
    szBuf[cch] = '\0';

    SavedFlags = ASMIntDisableFlags();

    VBoxPrintString("dbg/");
    VBoxPrintHex(ErrorLevel, sizeof(ErrorLevel));
    VBoxPrintChar(' ');
    VBoxPrintString(szBuf);
    VBoxPrintChar('\n');

    ASMSetFlags(SavedFlags);

}
Esempio n. 4
0
RTDECL(bool) RTThreadPreemptIsPending(RTTHREAD hThread)
{
    Assert(hThread == NIL_RTTHREAD); RT_NOREF1(hThread);

    /*
     * Read the globals and check if they are useful.
     */
/** @todo Should we check KPRCB.InterruptRequest and KPRCB.DpcInterruptRequested (older kernels).  */
    uint32_t const offQuantumEnd     = g_offrtNtPbQuantumEnd;
    uint32_t const cbQuantumEnd      = g_cbrtNtPbQuantumEnd;
    uint32_t const offDpcQueueDepth  = g_offrtNtPbDpcQueueDepth;
    if (!offQuantumEnd && !cbQuantumEnd && !offDpcQueueDepth)
        return false;
    Assert((offQuantumEnd && cbQuantumEnd) || (!offQuantumEnd && !cbQuantumEnd));

    /*
     * Disable interrupts so we won't be messed around.
     */
    bool            fPending;
    RTCCUINTREG     fSavedFlags  = ASMIntDisableFlags();

#ifdef RT_ARCH_X86
    PKPCR       pPcr   = (PKPCR)__readfsdword(RT_OFFSETOF(KPCR,SelfPcr));
    uint8_t    *pbPrcb = (uint8_t *)pPcr->Prcb;

#elif defined(RT_ARCH_AMD64)
    /* HACK ALERT! The offset is from windbg/vista64. */
    PKPCR       pPcr   = (PKPCR)__readgsqword(RT_OFFSETOF(KPCR,Self));
    uint8_t    *pbPrcb = (uint8_t *)pPcr->CurrentPrcb;

#else
# error "port me"
#endif

    /* Check QuantumEnd. */
    if (cbQuantumEnd == 1)
    {
        uint8_t volatile *pbQuantumEnd = (uint8_t volatile *)(pbPrcb + offQuantumEnd);
        fPending = *pbQuantumEnd == TRUE;
    }
    else if (cbQuantumEnd == sizeof(uint32_t))
    {
        uint32_t volatile *pu32QuantumEnd = (uint32_t volatile *)(pbPrcb + offQuantumEnd);
        fPending = *pu32QuantumEnd != 0;
    }
    else
        fPending = false;

    /* Check DpcQueueDepth. */
    if (    !fPending
        &&  offDpcQueueDepth)
    {
        uint32_t volatile *pu32DpcQueueDepth = (uint32_t volatile *)(pbPrcb + offDpcQueueDepth);
        fPending = *pu32DpcQueueDepth > 0;
    }

    ASMSetFlags(fSavedFlags);
    return fPending;
}
RTDECL(void) RTSpinlockAcquire(RTSPINLOCK Spinlock)
{
    PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock;
    RT_ASSERT_PREEMPT_CPUID_VAR();
    AssertPtr(pThis);
    Assert(pThis->u32Magic == RTSPINLOCK_MAGIC);

    if (pThis->fFlags & RTSPINLOCK_FLAGS_INTERRUPT_SAFE)
    {
        for (;;)
        {
            uint32_t fIntSaved = ASMIntDisableFlags();
            critical_enter();

            int c = 50;
            for (;;)
            {
                if (ASMAtomicCmpXchgU32(&pThis->fLocked, 1, 0))
                {
                    RT_ASSERT_PREEMPT_CPUID_SPIN_ACQUIRED(pThis);
                    pThis->fIntSaved = fIntSaved;
                    return;
                }
                if (--c <= 0)
                    break;
                cpu_spinwait();
            }

            /* Enable interrupts while we sleep. */
            ASMSetFlags(fIntSaved);
            critical_exit();
            DELAY(1);
        }
    }
    else
    {
        for (;;)
        {
            critical_enter();

            int c = 50;
            for (;;)
            {
                if (ASMAtomicCmpXchgU32(&pThis->fLocked, 1, 0))
                {
                    RT_ASSERT_PREEMPT_CPUID_SPIN_ACQUIRED(pThis);
                    return;
                }
                if (--c <= 0)
                    break;
                cpu_spinwait();
            }

            critical_exit();
            DELAY(1);
        }
    }
}
/**
 * Worker for supdrvOSMsrProberModify.
 */
static DECLCALLBACK(void) supdrvLnxMsrProberModifyOnCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2)
{
    PSUPMSRPROBER               pReq    = (PSUPMSRPROBER)pvUser1;
    register uint32_t           uMsr    = pReq->u.In.uMsr;
    bool const                  fFaster = pReq->u.In.enmOp == SUPMSRPROBEROP_MODIFY_FASTER;
    uint64_t                    uBefore;
    uint64_t                    uWritten;
    uint64_t                    uAfter;
    int                         rcBefore, rcWrite, rcAfter, rcRestore;
    RTCCUINTREG                 fOldFlags;

    /* Initialize result variables. */
    uBefore = uWritten = uAfter    = 0;
    rcWrite = rcAfter  = rcRestore = -EIO;

    /*
     * Do the job.
     */
    fOldFlags = ASMIntDisableFlags();
    ASMCompilerBarrier(); /* paranoia */
    if (!fFaster)
        ASMWriteBackAndInvalidateCaches();

    rcBefore = rdmsrl_safe(uMsr, &uBefore);
    if (rcBefore >= 0)
    {
        register uint64_t uRestore = uBefore;
        uWritten  = uRestore;
        uWritten &= pReq->u.In.uArgs.Modify.fAndMask;
        uWritten |= pReq->u.In.uArgs.Modify.fOrMask;

        rcWrite   = wrmsr_safe(uMsr, RT_LODWORD(uWritten), RT_HIDWORD(uWritten));
        rcAfter   = rdmsrl_safe(uMsr, &uAfter);
        rcRestore = wrmsr_safe(uMsr, RT_LODWORD(uRestore), RT_HIDWORD(uRestore));

        if (!fFaster)
        {
            ASMWriteBackAndInvalidateCaches();
            ASMReloadCR3();
            ASMNopPause();
        }
    }

    ASMCompilerBarrier(); /* paranoia */
    ASMSetFlags(fOldFlags);

    /*
     * Write out the results.
     */
    pReq->u.Out.uResults.Modify.uBefore    = uBefore;
    pReq->u.Out.uResults.Modify.uWritten   = uWritten;
    pReq->u.Out.uResults.Modify.uAfter     = uAfter;
    pReq->u.Out.uResults.Modify.fBeforeGp  = rcBefore  != 0;
    pReq->u.Out.uResults.Modify.fModifyGp  = rcWrite   != 0;
    pReq->u.Out.uResults.Modify.fAfterGp   = rcAfter   != 0;
    pReq->u.Out.uResults.Modify.fRestoreGp = rcRestore != 0;
    RT_ZERO(pReq->u.Out.uResults.Modify.afReserved);
}
Esempio n. 7
0
/**
 * Try undo the harm done by the init function.
 *
 * This may leave the system in an unstable state since we might have been
 * hijacking memory below 1MB that is in use by the kernel.
 */
void vmmR0TripleFaultHackTerm(void)
{
    /*
     * Restore overwritten memory.
     */
    if (   g_pvSavedLowCore
        && g_pbLowCore)
        memcpy(g_pbLowCore, g_pvSavedLowCore, PAGE_SIZE);

    if (g_pbPage0)
    {
        g_pbPage0[0x467+0] = RT_BYTE1(g_u32SavedVector);
        g_pbPage0[0x467+1] = RT_BYTE2(g_u32SavedVector);
        g_pbPage0[0x467+2] = RT_BYTE3(g_u32SavedVector);
        g_pbPage0[0x467+3] = RT_BYTE4(g_u32SavedVector);

        g_pbPage0[0x472+0] = RT_BYTE1(g_u16SavedCadIndicator);
        g_pbPage0[0x472+1] = RT_BYTE2(g_u16SavedCadIndicator);
    }

    /*
     * Fix the CMOS.
     */
    if (g_pvSavedLowCore)
    {
        uint32_t fSaved = ASMIntDisableFlags();

        ASMOutU8(0x70, 0x0f);
        ASMOutU8(0x71, 0x0a);

        ASMOutU8(0x70, 0x00);
        ASMInU8(0x71);

        ASMReloadCR3();
        ASMWriteBackAndInvalidateCaches();

        ASMSetFlags(fSaved);
    }

    /*
     * Release resources.
     */
    RTMemFree(g_pvSavedLowCore);
    g_pvSavedLowCore = NULL;

    RTR0MemObjFree(g_hMemLowCore, true /*fFreeMappings*/);
    g_hMemLowCore   = NIL_RTR0MEMOBJ;
    g_hMapLowCore   = NIL_RTR0MEMOBJ;
    g_pbLowCore     = NULL;
    g_HCPhysLowCore = NIL_RTHCPHYS;

    RTR0MemObjFree(g_hMemPage0, true /*fFreeMappings*/);
    g_hMemPage0     = NIL_RTR0MEMOBJ;
    g_hMapPage0     = NIL_RTR0MEMOBJ;
    g_pbPage0       = NULL;
}
Esempio n. 8
0
/**
 * Internal worker for getting the GIP CPU array index for the calling CPU.
 *
 * @returns Index into SUPGLOBALINFOPAGE::aCPUs or UINT16_MAX.
 * @param   pGip    The GIP.
 */
DECLINLINE(uint16_t) supGetGipCpuIndex(PSUPGLOBALINFOPAGE pGip)
{
    uint16_t iGipCpu;
#ifdef IN_RING3
    if (pGip->fGetGipCpu & SUPGIPGETCPU_IDTR_LIMIT_MASK_MAX_SET_CPUS)
    {
        /* Storing the IDTR is normally very fast. */
        uint16_t cbLim = ASMGetIdtrLimit();
        uint16_t iCpuSet = cbLim - 256 * (ARCH_BITS == 64 ? 16 : 8);
        iCpuSet  &= RTCPUSET_MAX_CPUS - 1;
        iGipCpu   = pGip->aiCpuFromCpuSetIdx[iCpuSet];
    }
    else if (pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS)
    {
        /* RDTSCP gives us what need need and more. */
        uint32_t iCpuSet;
        ASMReadTscWithAux(&iCpuSet);
        iCpuSet  &= RTCPUSET_MAX_CPUS - 1;
        iGipCpu   = pGip->aiCpuFromCpuSetIdx[iCpuSet];
    }
    else
    {
        /* Get APIC ID via the slow CPUID instruction. */
        uint8_t idApic = ASMGetApicId();
        iGipCpu = pGip->aiCpuFromApicId[idApic];
    }
#elif defined(IN_RING0)
    /* Ring-0: Use use RTMpCpuId() (disables cli to avoid host OS assertions about unsafe CPU number usage). */
    RTCCUINTREG uFlags  = ASMIntDisableFlags();
    int         iCpuSet = RTMpCpuIdToSetIndex(RTMpCpuId());
    if (RT_LIKELY((unsigned)iCpuSet < RT_ELEMENTS(pGip->aiCpuFromCpuSetIdx)))
        iGipCpu = pGip->aiCpuFromCpuSetIdx[iCpuSet];
    else
        iGipCpu = UINT16_MAX;
    ASMSetFlags(uFlags);

# elif defined(IN_RC)
    /* Raw-mode context: We can get the host CPU set index via VMCPU. */
    uint32_t    iCpuSet = VMMGetCpu(&g_VM)->iHostCpuSet;
    if (RT_LIKELY(iCpuSet < RT_ELEMENTS(pGip->aiCpuFromCpuSetIdx)))
        iGipCpu = pGip->aiCpuFromCpuSetIdx[iCpuSet];
    else
        iGipCpu = UINT16_MAX;
#else
# error "IN_RING3, IN_RC or IN_RING0 must be defined!"
#endif
    return iGipCpu;
}
Esempio n. 9
0
VOID EFIAPI
DebugAssert(IN CONST CHAR8 *FileName, IN UINTN LineNumber, IN CONST CHAR8 *Description)
{
    RTCCUINTREG SavedFlags = ASMIntDisableFlags();

    VBoxPrintString("EFI Assertion failed! File=");
    VBoxPrintString(FileName ? FileName : "<NULL>");
    VBoxPrintString(" line=0x");
    VBoxPrintHex(LineNumber, sizeof(LineNumber));
    VBoxPrintString("\nDescription: ");
    VBoxPrintString(Description ? Description : "<NULL>");

    ASMOutU8(EFI_PANIC_PORT, 2); /** @todo fix this. */

    ASMSetFlags(SavedFlags);
}
Esempio n. 10
0
VOID EFIAPI
DebugAssert(IN CONST CHAR8 *FileName, IN UINTN LineNumber, IN CONST CHAR8 *Description)
{
    RTCCUINTREG SavedFlags = ASMIntDisableFlags();

    ASMOutU8(EFI_PANIC_PORT, EFI_PANIC_CMD_START_MSG);
    VBoxPanicMsgString("EFI Assertion failed!"
                       "\nFile:  ");
    VBoxPanicMsgString(FileName ? FileName : "<NULL>");
    VBoxPanicMsgString("\nLine:  ");
    VBoxPanicMsgDecimalU32((uint32_t)LineNumber);
    VBoxPanicMsgString("\nEDescription: ");
    VBoxPanicMsgString(Description ? Description : "<NULL>");
    ASMOutU8(EFI_PANIC_PORT, EFI_PANIC_CMD_END_MSG);

    ASMSetFlags(SavedFlags);
}
RTDECL(void) RTSpinlockAcquire(RTSPINLOCK Spinlock)
{
    PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock;
    RT_ASSERT_PREEMPT_CPUID_VAR();
    AssertPtr(pThis);
    Assert(pThis->u32Magic == RTSPINLOCK_MAGIC);

    if (pThis->fFlags & RTSPINLOCK_FLAGS_INTERRUPT_SAFE)
    {
#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
        uint32_t fIntSaved = ASMIntDisableFlags();
#endif
        mutex_enter(&pThis->Mtx);

        /*
         * Solaris 10 doesn't preserve the interrupt flag, but since we're at PIL_MAX we should be
         * fine and not get interrupts while lock is held. Re-disable interrupts to not upset
         * assertions & assumptions callers might have.
         */
#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
        ASMIntDisable();
#endif

#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
        Assert(!ASMIntAreEnabled());
#endif
        pThis->fIntSaved = fIntSaved;
    }
    else
    {
#if defined(RT_STRICT) && (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86))
        bool fIntsOn = ASMIntAreEnabled();
#endif

        mutex_enter(&pThis->Mtx);

#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
        AssertMsg(fIntsOn == ASMIntAreEnabled(), ("fIntsOn=%RTbool\n", fIntsOn));
#endif
    }

    RT_ASSERT_PREEMPT_CPUID_SPIN_ACQUIRED(pThis);
}
Esempio n. 12
0
/**
 * Our own log worker function, avoid the dbg/00000xxx prefix and makes it clear
 * which log statements we added..
 *
 * @param   pszFormat           Format string. EFI style!
 * @param   ...                 Argument referneced in the format string.
 */
VOID EFIAPI
VBoxLogWorker(const char *pszFormat, ...)
{
    CHAR8       szBuf[384];
    VA_LIST     va;
    RTCCUINTREG SavedFlags;

    /* Format it. */
    VA_START(va, pszFormat);
    AsciiVSPrint(szBuf, sizeof(szBuf), pszFormat, va);
    VA_END(va);
    szBuf[sizeof(szBuf) - 1] = '\0';

    /* Output the log string. */
    SavedFlags = ASMIntDisableFlags();

    VBoxPrintString(szBuf);
    VBoxPrintChar('\n');

    ASMSetFlags(SavedFlags);
}
Esempio n. 13
0
/**
 * Counters VMMRZCallRing3Disable() and re-enables host calls.
 *
 * @param   pVCpu               The cross context virtual CPU structure of the calling EMT.
 * @thread  EMT.
 */
VMMRZDECL(void) VMMRZCallRing3Enable(PVMCPU pVCpu)
{
    VMCPU_ASSERT_EMT(pVCpu);
#if defined(LOG_ENABLED) && defined(IN_RING0)
    RTCCUINTREG fFlags = ASMIntDisableFlags(); /* preemption consistency. */
#endif

    Assert(pVCpu->vmm.s.cCallRing3Disabled > 0);
    if (ASMAtomicUoDecU32(&pVCpu->vmm.s.cCallRing3Disabled) == 0)
    {
#ifdef IN_RC
        pVCpu->pVMRC->vmm.s.fRCLoggerFlushingDisabled = false;
#else
# ifdef LOG_ENABLED
        if (pVCpu->vmm.s.pR0LoggerR0)
            pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
# endif
#endif
    }

#if defined(LOG_ENABLED) && defined(IN_RING0)
    ASMSetFlags(fFlags);
#endif
}
Esempio n. 14
0
/**
 * The slow case for SUPReadTsc where we need to apply deltas.
 *
 * Must only be called when deltas are applicable, so please do not call it
 * directly.
 *
 * @returns TSC with delta applied.
 * @param   pGip        Pointer to the GIP.
 *
 * @remarks May be called with interrupts disabled in ring-0!  This is why the
 *          ring-0 code doesn't attempt to figure the delta.
 *
 * @internal
 */
SUPDECL(uint64_t) SUPReadTscWithDelta(PSUPGLOBALINFOPAGE pGip)
{
    uint64_t            uTsc;
    uint16_t            iGipCpu;
    AssertCompile(RT_IS_POWER_OF_TWO(RTCPUSET_MAX_CPUS));
    AssertCompile(RT_ELEMENTS(pGip->aiCpuFromCpuSetIdx) >= RTCPUSET_MAX_CPUS);
    Assert(pGip->enmUseTscDelta > SUPGIPUSETSCDELTA_PRACTICALLY_ZERO);

    /*
     * Read the TSC and get the corresponding aCPUs index.
     */
#ifdef IN_RING3
    if (pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS)
    {
        /* RDTSCP gives us all we need, no loops/cli. */
        uint32_t iCpuSet;
        uTsc      = ASMReadTscWithAux(&iCpuSet);
        iCpuSet  &= RTCPUSET_MAX_CPUS - 1;
        iGipCpu   = pGip->aiCpuFromCpuSetIdx[iCpuSet];
    }
    else if (pGip->fGetGipCpu & SUPGIPGETCPU_IDTR_LIMIT_MASK_MAX_SET_CPUS)
    {
        /* Storing the IDTR is normally very quick, but we need to loop. */
        uint32_t cTries = 0;
        for (;;)
        {
            uint16_t cbLim = ASMGetIdtrLimit();
            uTsc = ASMReadTSC();
            if (RT_LIKELY(ASMGetIdtrLimit() == cbLim))
            {
                uint16_t iCpuSet = cbLim - 256 * (ARCH_BITS == 64 ? 16 : 8);
                iCpuSet &= RTCPUSET_MAX_CPUS - 1;
                iGipCpu  = pGip->aiCpuFromCpuSetIdx[iCpuSet];
                break;
            }
            if (cTries >= 16)
            {
                iGipCpu = UINT16_MAX;
                break;
            }
            cTries++;
        }
    }
    else
    {
        /* Get APIC ID via the slow CPUID instruction, requires looping. */
        uint32_t cTries = 0;
        for (;;)
        {
            uint8_t idApic = ASMGetApicId();
            uTsc = ASMReadTSC();
            if (RT_LIKELY(ASMGetApicId() == idApic))
            {
                iGipCpu = pGip->aiCpuFromApicId[idApic];
                break;
            }
            if (cTries >= 16)
            {
                iGipCpu = UINT16_MAX;
                break;
            }
            cTries++;
        }
    }
#elif defined(IN_RING0)
    /* Ring-0: Use use RTMpCpuId(), no loops. */
    RTCCUINTREG uFlags  = ASMIntDisableFlags();
    int         iCpuSet = RTMpCpuIdToSetIndex(RTMpCpuId());
    if (RT_LIKELY((unsigned)iCpuSet < RT_ELEMENTS(pGip->aiCpuFromCpuSetIdx)))
        iGipCpu = pGip->aiCpuFromCpuSetIdx[iCpuSet];
    else
        iGipCpu = UINT16_MAX;
    uTsc = ASMReadTSC();
    ASMSetFlags(uFlags);

# elif defined(IN_RC)
    /* Raw-mode context: We can get the host CPU set index via VMCPU, no loops. */
    RTCCUINTREG uFlags  = ASMIntDisableFlags(); /* Are already disable, but play safe. */
    uint32_t    iCpuSet = VMMGetCpu(&g_VM)->iHostCpuSet;
    if (RT_LIKELY(iCpuSet < RT_ELEMENTS(pGip->aiCpuFromCpuSetIdx)))
        iGipCpu = pGip->aiCpuFromCpuSetIdx[iCpuSet];
    else
        iGipCpu = UINT16_MAX;
    uTsc = ASMReadTSC();
    ASMSetFlags(uFlags);
#else
# error "IN_RING3, IN_RC or IN_RING0 must be defined!"
#endif

    /*
     * If the delta is valid, apply it.
     */
    if (RT_LIKELY(iGipCpu < pGip->cCpus))
    {
        int64_t iTscDelta = pGip->aCPUs[iGipCpu].i64TSCDelta;
        if (RT_LIKELY(iTscDelta != INT64_MAX))
            return uTsc - iTscDelta;

# ifdef IN_RING3
        /*
         * The delta needs calculating, call supdrv to get the TSC.
         */
        int rc = SUPR3ReadTsc(&uTsc, NULL);
        if (RT_SUCCESS(rc))
            return uTsc;
        AssertMsgFailed(("SUPR3ReadTsc -> %Rrc\n", rc));
        uTsc = ASMReadTSC();
# endif /* IN_RING3 */
    }

    /*
     * This shouldn't happen, especially not in ring-3 and raw-mode context.
     * But if it does, return something that's half useful.
     */
    AssertMsgFailed(("iGipCpu=%d (%#x) cCpus=%d fGetGipCpu=%#x\n", iGipCpu, iGipCpu, pGip->cCpus, pGip->fGetGipCpu));
    return uTsc;
}
Esempio n. 15
0
DECLHIDDEN(int) rtR0InitNative(void)
{
    /*
     * IPRT has not yet been initialized at this point, so use Solaris' native cmn_err() for logging.
     */
    int rc = RTR0DbgKrnlInfoOpen(&g_hKrnlDbgInfo, 0 /* fFlags */);
    if (RT_SUCCESS(rc))
    {
#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
        /*
         * Detect whether spl*() is preserving the interrupt flag or not.
         * This is a problem on S10.
         */
        RTCCUINTREG uOldFlags = ASMIntDisableFlags();
        int iOld = splr(DISP_LEVEL);
        if (ASMIntAreEnabled())
            g_frtSolSplSetsEIF = true;
        splx(iOld);
        if (ASMIntAreEnabled())
            g_frtSolSplSetsEIF = true;
        ASMSetFlags(uOldFlags);
#else
        /* PORTME: See if the amd64/x86 problem applies to this architecture. */
#endif
        /*
         * Mandatory: Preemption offsets.
         */
        rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, NULL, "cpu_t", "cpu_runrun", &g_offrtSolCpuPreempt);
        if (RT_FAILURE(rc))
        {
            cmn_err(CE_NOTE, "Failed to find cpu_t::cpu_runrun!\n");
            goto errorbail;
        }

        rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, NULL, "cpu_t", "cpu_kprunrun", &g_offrtSolCpuForceKernelPreempt);
        if (RT_FAILURE(rc))
        {
            cmn_err(CE_NOTE, "Failed to find cpu_t::cpu_kprunrun!\n");
            goto errorbail;
        }

        rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, NULL, "kthread_t", "t_preempt", &g_offrtSolThreadPreempt);
        if (RT_FAILURE(rc))
        {
            cmn_err(CE_NOTE, "Failed to find kthread_t::t_preempt!\n");
            goto errorbail;
        }

        rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, NULL, "kthread_t", "t_did", &g_offrtSolThreadId);
        if (RT_FAILURE(rc))
        {
            cmn_err(CE_NOTE, "Failed to find kthread_t::t_did!\n");
            goto errorbail;
        }

        rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, NULL, "kthread_t", "t_intr", &g_offrtSolThreadIntrThread);
        if (RT_FAILURE(rc))
        {
            cmn_err(CE_NOTE, "Failed to find kthread_t::t_intr!\n");
            goto errorbail;
        }

        rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, NULL, "kthread_t", "t_lockp", &g_offrtSolThreadLock);
        if (RT_FAILURE(rc))
        {
            cmn_err(CE_NOTE, "Failed to find kthread_t::t_lockp!\n");
            goto errorbail;
        }

        rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, NULL, "kthread_t", "t_procp", &g_offrtSolThreadProc);
        if (RT_FAILURE(rc))
        {
            cmn_err(CE_NOTE, "Failed to find kthread_t::t_procp!\n");
            goto errorbail;
        }
        cmn_err(CE_CONT, "!cpu_t::cpu_runrun @ 0x%lx (%ld)\n",    g_offrtSolCpuPreempt, g_offrtSolCpuPreempt);
        cmn_err(CE_CONT, "!cpu_t::cpu_kprunrun @ 0x%lx (%ld)\n",  g_offrtSolCpuForceKernelPreempt, g_offrtSolCpuForceKernelPreempt);
        cmn_err(CE_CONT, "!kthread_t::t_preempt @ 0x%lx (%ld)\n", g_offrtSolThreadPreempt, g_offrtSolThreadPreempt);
        cmn_err(CE_CONT, "!kthread_t::t_did @ 0x%lx (%ld)\n",     g_offrtSolThreadId, g_offrtSolThreadId);
        cmn_err(CE_CONT, "!kthread_t::t_intr @ 0x%lx (%ld)\n",    g_offrtSolThreadIntrThread, g_offrtSolThreadIntrThread);
        cmn_err(CE_CONT, "!kthread_t::t_lockp @ 0x%lx (%ld)\n",   g_offrtSolThreadLock, g_offrtSolThreadLock);
        cmn_err(CE_CONT, "!kthread_t::t_procp @ 0x%lx (%ld)\n",   g_offrtSolThreadProc, g_offrtSolThreadProc);

        /*
         * Mandatory: CPU cross call infrastructure. Refer the-solaris-kernel.h for details.
         */
        rc = RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "xc_init_cpu", NULL /* ppvSymbol */);
        if (RT_SUCCESS(rc))
        {
            if (ncpus > IPRT_SOL_NCPUS)
            {
                cmn_err(CE_NOTE, "rtR0InitNative: CPU count mismatch! ncpus=%d IPRT_SOL_NCPUS=%d\n", ncpus, IPRT_SOL_NCPUS);
                rc = VERR_NOT_SUPPORTED;
                goto errorbail;
            }
            g_rtSolXcCall.u.pfnSol_xc_call = (void *)xc_call;
        }
        else
        {
            g_frtSolOldIPI = true;
            g_rtSolXcCall.u.pfnSol_xc_call_old = (void *)xc_call;
            if (max_cpuid + 1 == sizeof(ulong_t) * 8)
            {
                g_frtSolOldIPIUlong = true;
                g_rtSolXcCall.u.pfnSol_xc_call_old_ulong = (void *)xc_call;
            }
            else if (max_cpuid + 1 != IPRT_SOL_NCPUS)
            {
                cmn_err(CE_NOTE, "rtR0InitNative: cpuset_t size mismatch! max_cpuid=%d IPRT_SOL_NCPUS=%d\n", max_cpuid,
                        IPRT_SOL_NCPUS);
                rc = VERR_NOT_SUPPORTED;
                goto errorbail;
            }
        }

        /*
         * Mandatory: Thread-context hooks.
         */
        rc = RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "exitctx",  NULL /* ppvSymbol */);
        if (RT_SUCCESS(rc))
        {
            g_rtSolThreadCtx.Install.pfnSol_installctx = (void *)installctx;
            g_rtSolThreadCtx.Remove.pfnSol_removectx   = (void *)removectx;
        }
        else
        {
            g_frtSolOldThreadCtx = true;
            g_rtSolThreadCtx.Install.pfnSol_installctx_old = (void *)installctx;
            g_rtSolThreadCtx.Remove.pfnSol_removectx_old   = (void *)removectx;
        }

        /*
         * Optional: Timeout hooks.
         */
        RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "timeout_generic",
                                   (void **)&g_pfnrtR0Sol_timeout_generic);
        RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "untimeout_generic",
                                   (void **)&g_pfnrtR0Sol_untimeout_generic);
        if ((g_pfnrtR0Sol_timeout_generic == NULL) != (g_pfnrtR0Sol_untimeout_generic == NULL))
        {
            static const char *s_apszFn[2] = { "timeout_generic", "untimeout_generic" };
            bool iMissingFn = g_pfnrtR0Sol_timeout_generic == NULL;
            cmn_err(CE_NOTE, "rtR0InitNative: Weird! Found %s but not %s!\n", s_apszFn[!iMissingFn], s_apszFn[iMissingFn]);
            g_pfnrtR0Sol_timeout_generic   = NULL;
            g_pfnrtR0Sol_untimeout_generic = NULL;
        }
        RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "cyclic_reprogram",
                                   (void **)&g_pfnrtR0Sol_cyclic_reprogram);

        /*
         * Optional: Querying page no-relocation support.
         */
        RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /*pszModule */, "page_noreloc_supported",
                                   (void **)&g_pfnrtR0Sol_page_noreloc_supported);

        /*
         * Weak binding failures: contig_free
         */
        if (g_pfnrtR0Sol_contig_free == NULL)
        {
            rc = RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "contig_free",
                                            (void **)&g_pfnrtR0Sol_contig_free);
            if (RT_FAILURE(rc))
            {
                cmn_err(CE_NOTE, "rtR0InitNative: failed to find contig_free!\n");
                goto errorbail;
            }
        }

        g_frtSolInitDone = true;
        return VINF_SUCCESS;
    }
    else
    {
        cmn_err(CE_NOTE, "RTR0DbgKrnlInfoOpen failed. rc=%d\n", rc);
        return rc;
    }

errorbail:
    RTR0DbgKrnlInfoRelease(g_hKrnlDbgInfo);
    return rc;
}
/**
 * Helper for RTSemSpinMutexTryRequest and RTSemSpinMutexRequest.
 *
 * This will check the current context and see if it's usui
 *
 * @returns VINF_SUCCESS or VERR_SEM_BAD_CONTEXT.
 * @param   pState      Output structure.
 */
static int rtSemSpinMutexEnter(RTSEMSPINMUTEXSTATE *pState, RTSEMSPINMUTEXINTERNAL *pThis)
{
#ifndef RT_OS_WINDOWS
    RTTHREADPREEMPTSTATE const StateInit = RTTHREADPREEMPTSTATE_INITIALIZER;
#endif
    int rc  = VINF_SUCCESS;

    /** @todo Later #1: When entering in interrupt context and we're not able to
     *        wake up threads from it, we could try switch the lock into pure
     *        spinlock mode. This would require that there are no other threads
     *        currently waiting on it and that the RTSEMSPINMUTEX_FLAGS_IRQ_SAFE
     *        flag is set.
     *
     *        Later #2: Similarly, it is possible to turn on the
     *        RTSEMSPINMUTEX_FLAGS_IRQ_SAFE at run time if we manage to grab the
     *        semaphore ownership at interrupt time. We might want to try delay the
     *        RTSEMSPINMUTEX_FLAGS_IRQ_SAFE even, since we're fine if we get it...
     */

#ifdef RT_OS_WINDOWS
    /*
     * NT: IRQL <= DISPATCH_LEVEL for waking up threads; IRQL < DISPATCH_LEVEL for sleeping.
     */
    pState->PreemptState.uchOldIrql = KeGetCurrentIrql();
    if (pState->PreemptState.uchOldIrql > DISPATCH_LEVEL)
        return VERR_SEM_BAD_CONTEXT;

    if (pState->PreemptState.uchOldIrql >= DISPATCH_LEVEL)
        pState->fSpin = true;
    else
    {
        pState->fSpin = false;
        KeRaiseIrql(DISPATCH_LEVEL, &pState->PreemptState.uchOldIrql);
        Assert(pState->PreemptState.uchOldIrql < DISPATCH_LEVEL);
    }

#elif defined(RT_OS_SOLARIS)
    /*
     * Solaris:  RTSemEventSignal will do bad stuff on S10 if interrupts are disabled.
     */
    if (!ASMIntAreEnabled())
        return VERR_SEM_BAD_CONTEXT;

    pState->fSpin = !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
    if (RTThreadIsInInterrupt(NIL_RTTHREAD))
    {
        if (!(pThis->fFlags & RTSEMSPINMUTEX_FLAGS_IRQ_SAFE))
            rc = VINF_SEM_BAD_CONTEXT; /* Try, but owner might be interrupted. */
        pState->fSpin = true;
    }
    pState->PreemptState = StateInit;
    RTThreadPreemptDisable(&pState->PreemptState);

#elif defined(RT_OS_LINUX) || defined(RT_OS_OS2)
    /*
     * OSes on which RTSemEventSignal can be called from any context.
     */
    pState->fSpin = !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
    if (RTThreadIsInInterrupt(NIL_RTTHREAD))
    {
        if (!(pThis->fFlags & RTSEMSPINMUTEX_FLAGS_IRQ_SAFE))
            rc = VINF_SEM_BAD_CONTEXT; /* Try, but owner might be interrupted. */
        pState->fSpin = true;
    }
    pState->PreemptState = StateInit;
    RTThreadPreemptDisable(&pState->PreemptState);

#else /* PORTME: Check for context where we cannot wake up threads. */
    /*
     * Default: ASSUME thread can be woken up if interrupts are enabled and
     *          we're not in an interrupt context.
     *          ASSUME that we can go to sleep if preemption is enabled.
     */
    if (    RTThreadIsInInterrupt(NIL_RTTHREAD)
        ||  !ASMIntAreEnabled())
        return VERR_SEM_BAD_CONTEXT;

    pState->fSpin = !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
    pState->PreemptState = StateInit;
    RTThreadPreemptDisable(&pState->PreemptState);
#endif

    /*
     * Disable interrupts if necessary.
     */
    pState->fValidFlags = !!(pThis->fFlags & RTSEMSPINMUTEX_FLAGS_IRQ_SAFE);
    if (pState->fValidFlags)
        pState->fSavedFlags = ASMIntDisableFlags();
    else
        pState->fSavedFlags = 0;

    return rc;
}
Esempio n. 17
0
/**
 * Initalizes the triple fault / boot hack.
 *
 * Always call vmmR0TripleFaultHackTerm to clean up, even when this call fails.
 *
 * @returns VBox status code.
 */
int vmmR0TripleFaultHackInit(void)
{
    /*
     * Map the first page.
     */
    int rc = RTR0MemObjEnterPhys(&g_hMemPage0, 0, PAGE_SIZE, RTMEM_CACHE_POLICY_DONT_CARE);
    AssertRCReturn(rc, rc);
    rc = RTR0MemObjMapKernel(&g_hMapPage0, g_hMemPage0, (void *)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
    AssertRCReturn(rc, rc);
    g_pbPage0 = (uint8_t *)RTR0MemObjAddress(g_hMapPage0);
    LogRel(("0040:0067 = %04x:%04x\n", RT_MAKE_U16(g_pbPage0[0x467+2],  g_pbPage0[0x467+3]),  RT_MAKE_U16(g_pbPage0[0x467+0],  g_pbPage0[0x467+1]) ));

    /*
     * Allocate some "low core" memory.  If that fails, just grab some memory.
     */
    //rc = RTR0MemObjAllocPhys(&g_hMemLowCore, PAGE_SIZE, _1M - 1);
    //__debugbreak();
    rc = RTR0MemObjEnterPhys(&g_hMemLowCore, 0x7000, PAGE_SIZE, RTMEM_CACHE_POLICY_DONT_CARE);
    AssertRCReturn(rc, rc);
    rc = RTR0MemObjMapKernel(&g_hMapLowCore, g_hMemLowCore, (void *)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
    AssertRCReturn(rc, rc);
    g_pbLowCore = (uint8_t *)RTR0MemObjAddress(g_hMapLowCore);
    g_HCPhysLowCore = RTR0MemObjGetPagePhysAddr(g_hMapLowCore, 0);
    LogRel(("Low core at %RHp mapped at %p\n", g_HCPhysLowCore, g_pbLowCore));

    /*
     * Save memory we'll be overwriting.
     */
    g_pvSavedLowCore = RTMemAlloc(PAGE_SIZE);
    AssertReturn(g_pvSavedLowCore, VERR_NO_MEMORY);
    memcpy(g_pvSavedLowCore, g_pbLowCore, PAGE_SIZE);

    g_u32SavedVector = RT_MAKE_U32_FROM_U8(g_pbPage0[0x467], g_pbPage0[0x467+1], g_pbPage0[0x467+2], g_pbPage0[0x467+3]);
    g_u16SavedCadIndicator = RT_MAKE_U16(g_pbPage0[0x472], g_pbPage0[0x472+1]);

    /*
     * Install the code.
     */
    size_t cbCode = (uintptr_t)&vmmR0TripleFaultHackEnd - (uintptr_t)&vmmR0TripleFaultHackStart;
    AssertLogRelReturn(cbCode <= PAGE_SIZE, VERR_OUT_OF_RANGE);
    memcpy(g_pbLowCore, &vmmR0TripleFaultHackStart, cbCode);

    g_pbPage0[0x467+0] = 0x00;
    g_pbPage0[0x467+1] = 0x70;
    g_pbPage0[0x467+2] = 0x00;
    g_pbPage0[0x467+3] = 0x00;

    g_pbPage0[0x472+0] = 0x34;
    g_pbPage0[0x472+1] = 0x12;

    /*
     * Configure the status port and cmos shutdown command.
     */
    uint32_t fSaved = ASMIntDisableFlags();

    ASMOutU8(0x70, 0x0f);
    ASMOutU8(0x71, 0x0a);

    ASMOutU8(0x70, 0x05);
    ASMInU8(0x71);

    ASMReloadCR3();
    ASMWriteBackAndInvalidateCaches();

    ASMSetFlags(fSaved);

#if 1 /* For testing & debugging. */
    vmmR0TripleFaultHackTripleFault();
#endif

    return VINF_SUCCESS;
}
/**
 * MSR write handler for KVM.
 *
 * @returns Strict VBox status code like CPUMSetGuestMsr().
 * @retval  VINF_CPUM_R3_MSR_WRITE
 * @retval  VERR_CPUM_RAISE_GP_0
 *
 * @param   pVCpu       Pointer to the VMCPU.
 * @param   idMsr       The MSR being written.
 * @param   pRange      The range this MSR belongs to.
 * @param   uRawValue   The raw value with the ignored bits not masked.
 */
VMM_INT_DECL(VBOXSTRICTRC) gimKvmWriteMsr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uRawValue)
{
    NOREF(pRange);
    PVM     pVM  = pVCpu->CTX_SUFF(pVM);
    PGIMKVM pKvm = &pVM->gim.s.u.Kvm;
    PGIMKVMCPU pKvmCpu = &pVCpu->gim.s.u.KvmCpu;

    switch (idMsr)
    {
        case MSR_GIM_KVM_SYSTEM_TIME:
        case MSR_GIM_KVM_SYSTEM_TIME_OLD:
        {
            bool fEnable = RT_BOOL(uRawValue & MSR_GIM_KVM_SYSTEM_TIME_ENABLE_BIT);
#ifdef IN_RING0
            gimR0KvmUpdateSystemTime(pVM, pVCpu);
            return VINF_CPUM_R3_MSR_WRITE;
#elif defined(IN_RC)
            Assert(pVM->cCpus == 1);
            if (fEnable)
            {
                RTCCUINTREG fEFlags  = ASMIntDisableFlags();
                pKvmCpu->uTsc        = TMCpuTickGetNoCheck(pVCpu) | UINT64_C(1);
                pKvmCpu->uVirtNanoTS = TMVirtualGetNoCheck(pVM)   | UINT64_C(1);
                ASMSetFlags(fEFlags);
            }
            return VINF_CPUM_R3_MSR_WRITE;
#else /* IN_RING3 */
            if (!fEnable)
            {
                gimR3KvmDisableSystemTime(pVM);
                pKvmCpu->u64SystemTimeMsr = uRawValue;
                return VINF_SUCCESS;
            }

            /* Is the system-time struct. already enabled? If so, get flags that need preserving. */
            uint8_t fFlags = 0;
            GIMKVMSYSTEMTIME SystemTime;
            RT_ZERO(SystemTime);
            if (   MSR_GIM_KVM_SYSTEM_TIME_IS_ENABLED(pKvmCpu->u64SystemTimeMsr)
                && MSR_GIM_KVM_SYSTEM_TIME_GUEST_GPA(uRawValue) == pKvmCpu->GCPhysSystemTime)
            {
                int rc2 = PGMPhysSimpleReadGCPhys(pVM, &SystemTime, pKvmCpu->GCPhysSystemTime, sizeof(GIMKVMSYSTEMTIME));
                if (RT_SUCCESS(rc2))
                    pKvmCpu->fSystemTimeFlags = (SystemTime.fFlags & GIM_KVM_SYSTEM_TIME_FLAGS_GUEST_PAUSED);
            }

            /* Enable and populate the system-time struct. */
            pKvmCpu->u64SystemTimeMsr      = uRawValue;
            pKvmCpu->GCPhysSystemTime      = MSR_GIM_KVM_SYSTEM_TIME_GUEST_GPA(uRawValue);
            pKvmCpu->u32SystemTimeVersion += 2;
            int rc = gimR3KvmEnableSystemTime(pVM, pVCpu);
            if (RT_FAILURE(rc))
            {
                pKvmCpu->u64SystemTimeMsr = 0;
                return VERR_CPUM_RAISE_GP_0;
            }
            return VINF_SUCCESS;
#endif
        }

        case MSR_GIM_KVM_WALL_CLOCK:
        case MSR_GIM_KVM_WALL_CLOCK_OLD:
        {
#ifndef IN_RING3
            return VINF_CPUM_R3_MSR_WRITE;
#else
            /* Enable the wall-clock struct. */
            RTGCPHYS GCPhysWallClock = MSR_GIM_KVM_WALL_CLOCK_GUEST_GPA(uRawValue);
            if (RT_LIKELY(RT_ALIGN_64(GCPhysWallClock, 4) == GCPhysWallClock))
            {
                int rc = gimR3KvmEnableWallClock(pVM, GCPhysWallClock);
                if (RT_SUCCESS(rc))
                {
                    pKvm->u64WallClockMsr = uRawValue;
                    return VINF_SUCCESS;
                }
            }
            return VERR_CPUM_RAISE_GP_0;
#endif /* IN_RING3 */
        }

        default:
        {
#ifdef IN_RING3
            static uint32_t s_cTimes = 0;
            if (s_cTimes++ < 20)
                LogRel(("GIM: KVM: Unknown/invalid WrMsr (%#x,%#x`%08x) -> #GP(0)\n", idMsr,
                        uRawValue & UINT64_C(0xffffffff00000000), uRawValue & UINT64_C(0xffffffff)));
#endif
            LogFunc(("Unknown/invalid WrMsr (%#RX32,%#RX64) -> #GP(0)\n", idMsr, uRawValue));
            break;
        }
    }

    return VERR_CPUM_RAISE_GP_0;
}
#include <iprt/asm-amd64-x86.h>


/*********************************************************************************************************************************
*   External Symbols                                                                                                             *
*********************************************************************************************************************************/
extern uint16_t     BS3_DATA_NM(g_apfnBs3TrapHandlers_c16)[256];
extern uint32_t     BS3_DATA_NM(g_apfnBs3TrapHandlers_c32)[256];
extern uint64_t     BS3_DATA_NM(g_apfnBs3TrapHandlers_c64)[256];


#undef Bs3TrapSetHandlerEx
BS3_CMN_DEF(void, Bs3TrapSetHandlerEx,(uint8_t iIdt, PFNBS3TRAPHANDLER16 pfnHandler16,
                                       PFNBS3TRAPHANDLER32 pfnHandler32, PFNBS3TRAPHANDLER64 pfnHandler64))
{
    RTCCUINTREG fFlags = ASMIntDisableFlags();
#if ARCH_BITS == 16
    /* Far real mode pointers as input. */
    g_apfnBs3TrapHandlers_c16[iIdt]  = (uint16_t)pfnHandler16;
    g_apfnBs3TrapHandlers_c32[iIdt]  = Bs3SelRealModeCodeToFlat((PFNBS3FARADDRCONV)pfnHandler32);
    g_apfnBs3TrapHandlers_c64[iIdt]  = Bs3SelRealModeCodeToFlat((PFNBS3FARADDRCONV)pfnHandler64);
#else
    /* Flat pointers as input. */
    g_apfnBs3TrapHandlers_c16[iIdt]  = (uint16_t)Bs3SelFlatCodeToProtFar16((uintptr_t)pfnHandler16);
    g_apfnBs3TrapHandlers_c32[iIdt]  = (uint32_t)(uintptr_t)pfnHandler32;
    g_apfnBs3TrapHandlers_c64[iIdt]  = (uintptr_t)pfnHandler64;
#endif
    ASMSetFlags(fFlags);
}