Ejemplo n.º 1
1
VOID
FASTCALL
KiEnterV86Mode(IN ULONG_PTR StackFrameUnaligned)
{
    PKTHREAD Thread;
    PKV8086_STACK_FRAME StackFrame = (PKV8086_STACK_FRAME)(ROUND_UP(StackFrameUnaligned - 4, 16) + 4);
    PKTRAP_FRAME TrapFrame = &StackFrame->TrapFrame;
    PKV86_FRAME V86Frame = &StackFrame->V86Frame;
    PFX_SAVE_AREA NpxFrame = &StackFrame->NpxArea;

    ASSERT((ULONG_PTR)NpxFrame % 16 == 0);

    /* Build fake user-mode trap frame */
    TrapFrame->SegCs = KGDT_R0_CODE | RPL_MASK;
    TrapFrame->SegEs = TrapFrame->SegDs = TrapFrame->SegFs = TrapFrame->SegGs = 0;
    TrapFrame->ErrCode = 0;

    /* Get the current thread's initial stack */
    Thread = KeGetCurrentThread();
    V86Frame->ThreadStack = KiGetThreadNpxArea(Thread);

    /* Save TEB addresses */
    V86Frame->ThreadTeb = Thread->Teb;
    V86Frame->PcrTeb = KeGetPcr()->NtTib.Self;

    /* Save return EIP */
    TrapFrame->Eip = (ULONG_PTR)Ki386BiosCallReturnAddress;

    /* Save our stack (after the frames) */
    TrapFrame->Esi = StackFrameUnaligned;
    TrapFrame->Edi = (ULONG_PTR)_AddressOfReturnAddress() + 4;

    /* Sanitize EFlags and enable interrupts */
    TrapFrame->EFlags = __readeflags() & 0x60DD7;
    TrapFrame->EFlags |= EFLAGS_INTERRUPT_MASK;

    /* Fill out the rest of the frame */
    TrapFrame->HardwareSegSs = KGDT_R3_DATA | RPL_MASK;
    TrapFrame->HardwareEsp = 0x11FFE;
    TrapFrame->ExceptionList = EXCEPTION_CHAIN_END;
    TrapFrame->Dr7 = 0;

    /* Set some debug fields if trap debugging is enabled */
    KiFillTrapFrameDebug(TrapFrame);

    /* Disable interrupts */
    _disable();

    /* Copy the thread's NPX frame */
    RtlCopyMemory(NpxFrame, V86Frame->ThreadStack, sizeof(FX_SAVE_AREA));

    /* Clear exception list */
    KeGetPcr()->NtTib.ExceptionList = EXCEPTION_CHAIN_END;

    /* Set new ESP0 */
    KeGetPcr()->TSS->Esp0 = (ULONG_PTR)&TrapFrame->V86Es;

    /* Set new initial stack */
    Thread->InitialStack = V86Frame;

    /* Set VDM TEB */
    Thread->Teb = (PTEB)TRAMPOLINE_TEB;
    KiSetTebBase(KeGetPcr(), (PVOID)TRAMPOLINE_TEB);

    /* Enable interrupts */
    _enable();

    /* Start VDM execution */
    NtVdmControl(VdmStartExecution, NULL);

    /* Exit to V86 mode */
    KiEoiHelper(TrapFrame);
}
Ejemplo n.º 2
0
ULONG_PTR
FASTCALL
KiExitV86Mode(IN PKTRAP_FRAME TrapFrame)
{
    PKV8086_STACK_FRAME StackFrame;
    PKTHREAD Thread;
    PKTRAP_FRAME PmTrapFrame;
    PKV86_FRAME V86Frame;
    PFX_SAVE_AREA NpxFrame;

    /* Get the stack frame back */
    StackFrame = CONTAINING_RECORD(TrapFrame->Esi, KV8086_STACK_FRAME, V86Frame);
    PmTrapFrame = &StackFrame->TrapFrame;
    V86Frame = &StackFrame->V86Frame;
    NpxFrame = &StackFrame->NpxArea;

    /* Copy the FPU frame back */
    Thread = KeGetCurrentThread();
    RtlCopyMemory(KiGetThreadNpxArea(Thread), NpxFrame, sizeof(FX_SAVE_AREA));

    /* Set initial stack back */
    Thread->InitialStack = (PVOID)((ULONG_PTR)V86Frame->ThreadStack + sizeof(FX_SAVE_AREA));

    /* Set ESP0 back in the KTSS */
    KeGetPcr()->TSS->Esp0 = (ULONG_PTR)&PmTrapFrame->V86Es;

    /* Restore TEB addresses */
    Thread->Teb = V86Frame->ThreadTeb;
    KiSetTebBase(KeGetPcr(), V86Frame->ThreadTeb);

    /* Enable interrupts and return a pointer to the trap frame */
    _enable();
    return (ULONG)PmTrapFrame;
}
Ejemplo n.º 3
0
DECLSPEC_NORETURN
VOID
FASTCALL
KiTrap07Handler(IN PKTRAP_FRAME TrapFrame)
{
    PKTHREAD Thread, NpxThread;
    PFX_SAVE_AREA SaveArea, NpxSaveArea;
    ULONG Cr0;
    
    /* Save trap frame */
    KiEnterTrap(TrapFrame);

    /* Try to handle NPX delay load */
    while (TRUE)
    {
        /* Get the current thread */
        Thread = KeGetCurrentThread();

        /* Get the NPX frame */
        SaveArea = KiGetThreadNpxArea(Thread);

        /* Check if emulation is enabled */
        if (SaveArea->Cr0NpxState & CR0_EM)
        {
            /* Not implemented */
            UNIMPLEMENTED;
            while (TRUE);
        }
    
        /* Save CR0 and check NPX state */
        Cr0 = __readcr0();
        if (Thread->NpxState != NPX_STATE_LOADED)
        {
            /* Update CR0 */
            Cr0 &= ~(CR0_MP | CR0_EM | CR0_TS);
            __writecr0(Cr0);
        
            /* Get the NPX thread */
            NpxThread = KeGetCurrentPrcb()->NpxThread;
            if (NpxThread)
            {
                /* Get the NPX frame */
                NpxSaveArea = KiGetThreadNpxArea(NpxThread);
                
                /* Save FPU state */
                DPRINT("FIXME: Save FPU state: %p\n", NpxSaveArea);
                //Ke386SaveFpuState(NpxSaveArea);

                /* Update NPX state */
                Thread->NpxState = NPX_STATE_NOT_LOADED;
           }
       
            /* Load FPU state */
            //Ke386LoadFpuState(SaveArea);
        
            /* Update NPX state */
            Thread->NpxState = NPX_STATE_LOADED;
            KeGetCurrentPrcb()->NpxThread = Thread;
        
            /* Enable interrupts */
            _enable();
        
            /* Check if CR0 needs to be reloaded due to context switch */
            if (!SaveArea->Cr0NpxState) KiEoiHelper(TrapFrame);
        
            /* Otherwise, we need to reload CR0, disable interrupts */
            _disable();
        
            /* Reload CR0 */
            Cr0 = __readcr0();
            Cr0 |= SaveArea->Cr0NpxState;
            __writecr0(Cr0);
        
            /* Now restore interrupts and check for TS */
            _enable();
            if (Cr0 & CR0_TS) KiEoiHelper(TrapFrame);
        
            /* We're still here -- clear TS and try again */
            __writecr0(__readcr0() &~ CR0_TS);
            _disable();
        }
        else
        {
            /* This is an actual fault, not a lack of FPU state */
            break;
        }
    }
    
    /* TS should not be set */
    if (Cr0 & CR0_TS)
    {
        /*
         * If it's incorrectly set, then maybe the state is actually still valid
         * but we could've lock track of that due to a BIOS call.
         * Make sure MP is still set, which should verify the theory.
         */
        if (Cr0 & CR0_MP)
        {
            /* Indeed, the state is actually still valid, so clear TS */
            __writecr0(__readcr0() &~ CR0_TS);
            KiEoiHelper(TrapFrame);
        }
        
        /* Otherwise, something strange is going on */
        KeBugCheckWithTf(TRAP_CAUSE_UNKNOWN, 2, Cr0, 0, 0, TrapFrame);
    }
    
    /* It's not a delayed load, so process this trap as an NPX fault */
    KiNpxHandler(TrapFrame, Thread, SaveArea);
}
Ejemplo n.º 4
0
VOID
NTAPI
INIT_FUNCTION
KiInitMachineDependent(VOID)
{
    ULONG CpuCount;
    BOOLEAN FbCaching = FALSE;
    NTSTATUS Status;
    ULONG ReturnLength;
    ULONG i, Affinity, Sample = 0;
    PFX_SAVE_AREA FxSaveArea;
    ULONG MXCsrMask = 0xFFBF;
    ULONG Dummy;
    KI_SAMPLE_MAP Samples[4];
    PKI_SAMPLE_MAP CurrentSample = Samples;

    /* Check for large page support */
    if (KeFeatureBits & KF_LARGE_PAGE)
    {
        /* FIXME: Support this */
        DPRINT("Large Page support detected but not yet taken advantage of\n");
    }

    /* Check for global page support */
    if (KeFeatureBits & KF_GLOBAL_PAGE)
    {
        /* Do an IPI to enable it on all CPUs */
        CpuCount = KeNumberProcessors;
        KeIpiGenericCall(Ki386EnableGlobalPage, (ULONG_PTR)&CpuCount);
    }

    /* Check for PAT and/or MTRR support */
    if (KeFeatureBits & (KF_PAT | KF_MTRR))
    {
        /* Query the HAL to make sure we can use it */
        Status = HalQuerySystemInformation(HalFrameBufferCachingInformation,
                                           sizeof(BOOLEAN),
                                           &FbCaching,
                                           &ReturnLength);
        if ((NT_SUCCESS(Status)) && (FbCaching))
        {
            /* We can't, disable it */
            KeFeatureBits &= ~(KF_PAT | KF_MTRR);
        }
    }

    /* Check for PAT support and enable it */
    if (KeFeatureBits & KF_PAT) KiInitializePAT();

    /* Assume no errata for now */
    SharedUserData->ProcessorFeatures[PF_FLOATING_POINT_PRECISION_ERRATA] = 0;

    /* Check if we have an NPX */
    if (KeI386NpxPresent)
    {
        /* Loop every CPU */
        i = KeActiveProcessors;
        for (Affinity = 1; i; Affinity <<= 1)
        {
            /* Check if this is part of the set */
            if (i & Affinity)
            {
                /* Run on this CPU */
                i &= ~Affinity;
                KeSetSystemAffinityThread(Affinity);

                /* Detect FPU errata */
                if (KiIsNpxErrataPresent())
                {
                    /* Disable NPX support */
                    KeI386NpxPresent = FALSE;
                    SharedUserData->
                        ProcessorFeatures[PF_FLOATING_POINT_PRECISION_ERRATA] =
                        TRUE;
                    break;
                }
            }
        }
    }

    /* If there's no NPX, then we're emulating the FPU */
    SharedUserData->ProcessorFeatures[PF_FLOATING_POINT_EMULATED] =
        !KeI386NpxPresent;

    /* Check if there's no NPX, so that we can disable associated features */
    if (!KeI386NpxPresent)
    {
        /* Remove NPX-related bits */
        KeFeatureBits &= ~(KF_XMMI64 | KF_XMMI | KF_FXSR | KF_MMX);

        /* Disable kernel flags */
        KeI386FxsrPresent = KeI386XMMIPresent = FALSE;

        /* Disable processor features that might've been set until now */
        SharedUserData->ProcessorFeatures[PF_FLOATING_POINT_PRECISION_ERRATA] =
        SharedUserData->ProcessorFeatures[PF_XMMI64_INSTRUCTIONS_AVAILABLE]   =
        SharedUserData->ProcessorFeatures[PF_XMMI_INSTRUCTIONS_AVAILABLE]     =
        SharedUserData->ProcessorFeatures[PF_3DNOW_INSTRUCTIONS_AVAILABLE]    =
        SharedUserData->ProcessorFeatures[PF_MMX_INSTRUCTIONS_AVAILABLE] = 0;
    }

    /* Check for CR4 support */
    if (KeFeatureBits & KF_CR4)
    {
        /* Do an IPI call to enable the Debug Exceptions */
        CpuCount = KeNumberProcessors;
        KeIpiGenericCall(Ki386EnableDE, (ULONG_PTR)&CpuCount);
    }

    /* Check if FXSR was found */
    if (KeFeatureBits & KF_FXSR)
    {
        /* Do an IPI call to enable the FXSR */
        CpuCount = KeNumberProcessors;
        KeIpiGenericCall(Ki386EnableFxsr, (ULONG_PTR)&CpuCount);

        /* Check if XMM was found too */
        if (KeFeatureBits & KF_XMMI)
        {
            /* Do an IPI call to enable XMMI exceptions */
            CpuCount = KeNumberProcessors;
            KeIpiGenericCall(Ki386EnableXMMIExceptions, (ULONG_PTR)&CpuCount);

            /* FIXME: Implement and enable XMM Page Zeroing for Mm */

            /* Patch the RtlPrefetchMemoryNonTemporal routine to enable it */
            *(PCHAR)RtlPrefetchMemoryNonTemporal = 0x90;
        }
    }

    /* Check for, and enable SYSENTER support */
    KiRestoreFastSyscallReturnState();

    /* Loop every CPU */
    i = KeActiveProcessors;
    for (Affinity = 1; i; Affinity <<= 1)
    {
        /* Check if this is part of the set */
        if (i & Affinity)
        {
            /* Run on this CPU */
            i &= ~Affinity;
            KeSetSystemAffinityThread(Affinity);

            /* Reset MHz to 0 for this CPU */
            KeGetCurrentPrcb()->MHz = 0;

            /* Check if we can use RDTSC */
            if (KeFeatureBits & KF_RDTSC)
            {
                /* Start sampling loop */
                for (;;)
                {
                    /* Do a dummy CPUID to start the sample */
                    CPUID(0, &Dummy, &Dummy, &Dummy, &Dummy);

                    /* Fill out the starting data */
                    CurrentSample->PerfStart = KeQueryPerformanceCounter(NULL);
                    CurrentSample->TSCStart = __rdtsc();
                    CurrentSample->PerfFreq.QuadPart = -50000;

                    /* Sleep for this sample */
                    KeDelayExecutionThread(KernelMode,
                                           FALSE,
                                           &CurrentSample->PerfFreq);

                    /* Do another dummy CPUID */
                    CPUID(0, &Dummy, &Dummy, &Dummy, &Dummy);

                    /* Fill out the ending data */
                    CurrentSample->PerfEnd =
                        KeQueryPerformanceCounter(&CurrentSample->PerfFreq);
                    CurrentSample->TSCEnd = __rdtsc();

                    /* Calculate the differences */
                    CurrentSample->PerfDelta = CurrentSample->PerfEnd.QuadPart -
                                               CurrentSample->PerfStart.QuadPart;
                    CurrentSample->TSCDelta = CurrentSample->TSCEnd -
                                              CurrentSample->TSCStart;

                    /* Compute CPU Speed */
                    CurrentSample->MHz = (ULONG)((CurrentSample->TSCDelta *
                                                  CurrentSample->
                                                  PerfFreq.QuadPart + 500000) /
                                                 (CurrentSample->PerfDelta *
                                                  1000000));

                    /* Check if this isn't the first sample */
                    if (Sample)
                    {
                        /* Check if we got a good precision within 1MHz */
                        if ((CurrentSample->MHz == CurrentSample[-1].MHz) ||
                            (CurrentSample->MHz == CurrentSample[-1].MHz + 1) ||
                            (CurrentSample->MHz == CurrentSample[-1].MHz - 1))
                        {
                            /* We did, stop sampling */
                            break;
                        }
                    }

                    /* Move on */
                    CurrentSample++;
                    Sample++;

                    if (Sample == sizeof(Samples) / sizeof(Samples[0]))
                    {
                        /* Restart */
                        CurrentSample = Samples;
                        Sample = 0;
                    }
                }

                /* Save the CPU Speed */
                KeGetCurrentPrcb()->MHz = CurrentSample[-1].MHz;
            }

            /* Check if we have MTRR */
            if (KeFeatureBits & KF_MTRR)
            {
                /* Then manually initialize MTRR for the CPU */
                KiInitializeMTRR(i ? FALSE : TRUE);
            }

            /* Check if we have AMD MTRR and initialize it for the CPU */
            if (KeFeatureBits & KF_AMDK6MTRR) KiAmdK6InitializeMTRR();

            /* Check if this is a buggy Pentium and apply the fixup if so */
            if (KiI386PentiumLockErrataPresent) KiI386PentiumLockErrataFixup();

            /* Check if the CPU supports FXSR */
            if (KeFeatureBits & KF_FXSR)
            {
                /* Get the current thread NPX state */
                FxSaveArea = KiGetThreadNpxArea(KeGetCurrentThread());

                /* Clear initial MXCsr mask */
                FxSaveArea->U.FxArea.MXCsrMask = 0;

                /* Save the current NPX State */
                Ke386SaveFpuState(FxSaveArea);

                /* Check if the current mask doesn't match the reserved bits */
                if (FxSaveArea->U.FxArea.MXCsrMask != 0)
                {
                    /* Then use whatever it's holding */
                    MXCsrMask = FxSaveArea->U.FxArea.MXCsrMask;
                }

                /* Check if nobody set the kernel-wide mask */
                if (!KiMXCsrMask)
                {
                    /* Then use the one we calculated above */
                    KiMXCsrMask = MXCsrMask;
                }
                else
                {
                    /* Was it set to the same value we found now? */
                    if (KiMXCsrMask != MXCsrMask)
                    {
                        /* No, something is definitely wrong */
                        KeBugCheckEx(MULTIPROCESSOR_CONFIGURATION_NOT_SUPPORTED,
                                     KF_FXSR,
                                     KiMXCsrMask,
                                     MXCsrMask,
                                     0);
                    }
                }

                /* Now set the kernel mask */
                KiMXCsrMask &= MXCsrMask;
            }
        }
    }

    /* Return affinity back to where it was */
    KeRevertToUserAffinityThread();

    /* NT allows limiting the duration of an ISR with a registry key */
    if (KiTimeLimitIsrMicroseconds)
    {
        /* FIXME: TODO */
        DPRINT1("ISR Time Limit not yet supported\n");
    }

    /* Set CR0 features based on detected CPU */
    KiSetCR0Bits();
}