Esempio n. 1
0
ULONG AllocatePortFromRange( PPORT_SET PortSet, ULONG Lowest, ULONG Highest ) {
    ULONG AllocatedPort;
    KIRQL OldIrql;

    if ((Lowest < PortSet->StartingPort) ||
        (Highest >= PortSet->StartingPort + PortSet->PortsToOversee))
    {
        return -1;
    }

    Lowest -= PortSet->StartingPort;
    Highest -= PortSet->StartingPort;

    KeAcquireSpinLock( &PortSet->Lock, &OldIrql );
    AllocatedPort = RtlFindClearBits( &PortSet->ProtoBitmap, 1, Lowest );
    if( AllocatedPort != (ULONG)-1 && AllocatedPort <= Highest) {
	RtlSetBit( &PortSet->ProtoBitmap, AllocatedPort );
	AllocatedPort += PortSet->StartingPort;
	KeReleaseSpinLock( &PortSet->Lock, OldIrql );
	return htons(AllocatedPort);
    }
    KeReleaseSpinLock( &PortSet->Lock, OldIrql );

    return -1;
}
Esempio n. 2
0
ULONG AllocateAnyPort( PPORT_SET PortSet ) {
    ULONG AllocatedPort;
    KIRQL OldIrql;

    KeAcquireSpinLock( &PortSet->Lock, &OldIrql );
    AllocatedPort = RtlFindClearBits( &PortSet->ProtoBitmap, 1, 0 );
    if( AllocatedPort != (ULONG)-1 ) {
	RtlSetBit( &PortSet->ProtoBitmap, AllocatedPort );
	AllocatedPort += PortSet->StartingPort;
	KeReleaseSpinLock( &PortSet->Lock, OldIrql );
	return htons(AllocatedPort);
    }
    KeReleaseSpinLock( &PortSet->Lock, OldIrql );

    return -1;
}
Esempio n. 3
0
VOID test_bitmap()
{
	Pool		pool;
	ULONG		Bitmap_Size, Index;
	PULONG		Bitmap_Buffer;

	// Initialize
	Bitmap_Size = 13; // Number of bits
	Bitmap_Buffer = ExAllocatePoolWithTag (
		NonPagedPool,
		(ULONG)(((Bitmap_Size/8+1)/sizeof(ULONG) + 1)* sizeof(ULONG)),
		BITMAP_TAG
	);
	RtlInitializeBitMap(
		&pool.Bitmap, 
		(PULONG)(Bitmap_Buffer),
		(ULONG)(Bitmap_Size)
	);
	RtlClearAllBits(&pool.Bitmap);

	for (Index = 0; Index < 10; Index++)
		RtlSetBit(&pool.Bitmap, Index);
	if (RtlAreBitsSet(&pool.Bitmap, 0, 10) == TRUE)
		DbgPrint("bitmap: bit[0..9] is set\r\n");

	if (RtlCheckBit(&pool.Bitmap, 10))
		DbgPrint("bitmap: bit[10] is set\r\n");
	if (RtlCheckBit(&pool.Bitmap, 1024)) //Warning! Return 1 here
		DbgPrint("bitmap: bit[1024] is set\r\n");

	Index = 0;
	do
	{
		Index = RtlFindClearBitsAndSet (
			&pool.Bitmap,
			1, //NumberToFind
			Index //HintIndex
		);
		DbgPrint("%d\n", Index);
	}while (Index != -1);

	// Free
	ExFreePoolWithTag(pool.Bitmap.Buffer, BITMAP_TAG);
}
Esempio n. 4
0
PVOID
NTAPI
MiAllocatePoolPages(IN POOL_TYPE PoolType,
                    IN SIZE_T SizeInBytes)
{
    PFN_NUMBER PageFrameNumber;
    PFN_COUNT SizeInPages, PageTableCount;
    ULONG i;
    KIRQL OldIrql;
    PLIST_ENTRY NextEntry, NextHead, LastHead;
    PMMPTE PointerPte, StartPte;
    PMMPDE PointerPde;
    ULONG EndAllocation;
    MMPTE TempPte;
    MMPDE TempPde;
    PMMPFN Pfn1;
    PVOID BaseVa, BaseVaStart;
    PMMFREE_POOL_ENTRY FreeEntry;
    PKSPIN_LOCK_QUEUE LockQueue;

    //
    // Figure out how big the allocation is in pages
    //
    SizeInPages = (PFN_COUNT)BYTES_TO_PAGES(SizeInBytes);

    //
    // Check for overflow
    //
    if (SizeInPages == 0)
    {
        //
        // Fail
        //
        return NULL;
    }

    //
    // Handle paged pool
    //
    if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool)
    {
        //
        // If only one page is being requested, try to grab it from the S-LIST
        //
        if ((SizeInPages == 1) && (ExQueryDepthSList(&MiPagedPoolSListHead)))
        {
            BaseVa = InterlockedPopEntrySList(&MiPagedPoolSListHead);
            if (BaseVa) return BaseVa;
        }

        //
        // Lock the paged pool mutex
        //
        KeAcquireGuardedMutex(&MmPagedPoolMutex);

        //
        // Find some empty allocation space
        //
        i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
                                   SizeInPages,
                                   MmPagedPoolInfo.PagedPoolHint);
        if (i == 0xFFFFFFFF)
        {
            //
            // Get the page bit count
            //
            i = ((SizeInPages - 1) / PTE_COUNT) + 1;
            DPRINT("Paged pool expansion: %lu %x\n", i, SizeInPages);

            //
            // Check if there is enougn paged pool expansion space left
            //
            if (MmPagedPoolInfo.NextPdeForPagedPoolExpansion >
                    (PMMPDE)MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
            {
                //
                // Out of memory!
                //
                DPRINT1("OUT OF PAGED POOL!!!\n");
                KeReleaseGuardedMutex(&MmPagedPoolMutex);
                return NULL;
            }

            //
            // Check if we'll have to expand past the last PTE we have available
            //
            if (((i - 1) + MmPagedPoolInfo.NextPdeForPagedPoolExpansion) >
                    (PMMPDE)MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
            {
                //
                // We can only support this much then
                //
                PointerPde = MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool);
                PageTableCount = (PFN_COUNT)(PointerPde + 1 -
                                             MmPagedPoolInfo.NextPdeForPagedPoolExpansion);
                ASSERT(PageTableCount < i);
                i = PageTableCount;
            }
            else
            {
                //
                // Otherwise, there is plenty of space left for this expansion
                //
                PageTableCount = i;
            }

            //
            // Get the template PDE we'll use to expand
            //
            TempPde = ValidKernelPde;

            //
            // Get the first PTE in expansion space
            //
            PointerPde = MmPagedPoolInfo.NextPdeForPagedPoolExpansion;
            BaseVa = MiPdeToPte(PointerPde);
            BaseVaStart = BaseVa;

            //
            // Lock the PFN database and loop pages
            //
            OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
            do
            {
                //
                // It should not already be valid
                //
                ASSERT(PointerPde->u.Hard.Valid == 0);

                /* Request a page */
                MI_SET_USAGE(MI_USAGE_PAGED_POOL);
                MI_SET_PROCESS2("Kernel");
                PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR());
                TempPde.u.Hard.PageFrameNumber = PageFrameNumber;
#if (_MI_PAGING_LEVELS >= 3)
                /* On PAE/x64 systems, there's no double-buffering */
                ASSERT(FALSE);
#else
                //
                // Save it into our double-buffered system page directory
                //
                MmSystemPagePtes[((ULONG_PTR)PointerPde & (SYSTEM_PD_SIZE - 1)) / sizeof(MMPTE)] = TempPde;

                /* Initialize the PFN */
                MiInitializePfnForOtherProcess(PageFrameNumber,
                                               (PMMPTE)PointerPde,
                                               MmSystemPageDirectory[(PointerPde - MiAddressToPde(NULL)) / PDE_COUNT]);

                /* Write the actual PDE now */
//                MI_WRITE_VALID_PDE(PointerPde, TempPde);
#endif
                //
                // Move on to the next expansion address
                //
                PointerPde++;
                BaseVa = (PVOID)((ULONG_PTR)BaseVa + PAGE_SIZE);
                i--;
            } while (i > 0);

            //
            // Release the PFN database lock
            //
            KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);

            //
            // These pages are now available, clear their availablity bits
            //
            EndAllocation = (ULONG)(MmPagedPoolInfo.NextPdeForPagedPoolExpansion -
                                    (PMMPDE)MiAddressToPte(MmPagedPoolInfo.FirstPteForPagedPool)) *
                            PTE_COUNT;
            RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap,
                         EndAllocation,
                         PageTableCount * PTE_COUNT);

            //
            // Update the next expansion location
            //
            MmPagedPoolInfo.NextPdeForPagedPoolExpansion += PageTableCount;

            //
            // Zero out the newly available memory
            //
            RtlZeroMemory(BaseVaStart, PageTableCount * PAGE_SIZE);

            //
            // Now try consuming the pages again
            //
            i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
                                       SizeInPages,
                                       0);
            if (i == 0xFFFFFFFF)
            {
                //
                // Out of memory!
                //
                DPRINT1("OUT OF PAGED POOL!!!\n");
                KeReleaseGuardedMutex(&MmPagedPoolMutex);
                return NULL;
            }
        }

        //
        // Update the pool hint if the request was just one page
        //
        if (SizeInPages == 1) MmPagedPoolInfo.PagedPoolHint = i + 1;

        //
        // Update the end bitmap so we know the bounds of this allocation when
        // the time comes to free it
        //
        EndAllocation = i + SizeInPages - 1;
        RtlSetBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, EndAllocation);

        //
        // Now we can release the lock (it mainly protects the bitmap)
        //
        KeReleaseGuardedMutex(&MmPagedPoolMutex);

        //
        // Now figure out where this allocation starts
        //
        BaseVa = (PVOID)((ULONG_PTR)MmPagedPoolStart + (i << PAGE_SHIFT));

        //
        // Flush the TLB
        //
        KeFlushEntireTb(TRUE, TRUE);

        /* Setup a demand-zero writable PTE */
        MI_MAKE_SOFTWARE_PTE(&TempPte, MM_READWRITE);

        //
        // Find the first and last PTE, then loop them all
        //
        PointerPte = MiAddressToPte(BaseVa);
        StartPte = PointerPte + SizeInPages;
        do
        {
            //
            // Write the demand zero PTE and keep going
            //
            MI_WRITE_INVALID_PTE(PointerPte, TempPte);
        } while (++PointerPte < StartPte);

        //
        // Return the allocation address to the caller
        //
        return BaseVa;
    }

    //
    // If only one page is being requested, try to grab it from the S-LIST
    //
    if ((SizeInPages == 1) && (ExQueryDepthSList(&MiNonPagedPoolSListHead)))
    {
        BaseVa = InterlockedPopEntrySList(&MiNonPagedPoolSListHead);
        if (BaseVa) return BaseVa;
    }

    //
    // Allocations of less than 4 pages go into their individual buckets
    //
    i = SizeInPages - 1;
    if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;

    //
    // Loop through all the free page lists based on the page index
    //
    NextHead = &MmNonPagedPoolFreeListHead[i];
    LastHead = &MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];

    //
    // Acquire the nonpaged pool lock
    //
    OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
    do
    {
        //
        // Now loop through all the free page entries in this given list
        //
        NextEntry = NextHead->Flink;
        while (NextEntry != NextHead)
        {
            /* Is freed non paged pool enabled */
            if (MmProtectFreedNonPagedPool)
            {
                /* We need to be able to touch this page, unprotect it */
                MiUnProtectFreeNonPagedPool(NextEntry, 0);
            }

            //
            // Grab the entry and see if it can handle our allocation
            //
            FreeEntry = CONTAINING_RECORD(NextEntry, MMFREE_POOL_ENTRY, List);
            ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE);
            if (FreeEntry->Size >= SizeInPages)
            {
                //
                // It does, so consume the pages from here
                //
                FreeEntry->Size -= SizeInPages;

                //
                // The allocation will begin in this free page area
                //
                BaseVa = (PVOID)((ULONG_PTR)FreeEntry +
                                 (FreeEntry->Size  << PAGE_SHIFT));

                /* Remove the item from the list, depending if pool is protected */
                if (MmProtectFreedNonPagedPool)
                    MiProtectedPoolRemoveEntryList(&FreeEntry->List);
                else
                    RemoveEntryList(&FreeEntry->List);

                //
                // However, check if its' still got space left
                //
                if (FreeEntry->Size != 0)
                {
                    /* Check which list to insert this entry into */
                    i = FreeEntry->Size - 1;
                    if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;

                    /* Insert the entry into the free list head, check for prot. pool */
                    if (MmProtectFreedNonPagedPool)
                        MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE);
                    else
                        InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);

                    /* Is freed non paged pool protected? */
                    if (MmProtectFreedNonPagedPool)
                    {
                        /* Protect the freed pool! */
                        MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
                    }
                }

                //
                // Grab the PTE for this allocation
                //
                PointerPte = MiAddressToPte(BaseVa);
                ASSERT(PointerPte->u.Hard.Valid == 1);

                //
                // Grab the PFN NextEntry and index
                //
                Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte));

                //
                // Now mark it as the beginning of an allocation
                //
                ASSERT(Pfn1->u3.e1.StartOfAllocation == 0);
                Pfn1->u3.e1.StartOfAllocation = 1;

                /* Mark it as special pool if needed */
                ASSERT(Pfn1->u4.VerifierAllocation == 0);
                if (PoolType & VERIFIER_POOL_MASK)
                {
                    Pfn1->u4.VerifierAllocation = 1;
                }

                //
                // Check if the allocation is larger than one page
                //
                if (SizeInPages != 1)
                {
                    //
                    // Navigate to the last PFN entry and PTE
                    //
                    PointerPte += SizeInPages - 1;
                    ASSERT(PointerPte->u.Hard.Valid == 1);
                    Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
                }

                //
                // Mark this PFN as the last (might be the same as the first)
                //
                ASSERT(Pfn1->u3.e1.EndOfAllocation == 0);
                Pfn1->u3.e1.EndOfAllocation = 1;

                //
                // Release the nonpaged pool lock, and return the allocation
                //
                KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
                return BaseVa;
            }

            //
            // Try the next free page entry
            //
            NextEntry = FreeEntry->List.Flink;

            /* Is freed non paged pool protected? */
            if (MmProtectFreedNonPagedPool)
            {
                /* Protect the freed pool! */
                MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
            }
        }
    } while (++NextHead < LastHead);

    //
    // If we got here, we're out of space.
    // Start by releasing the lock
    //
    KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);

    //
    // Allocate some system PTEs
    //
    StartPte = MiReserveSystemPtes(SizeInPages, NonPagedPoolExpansion);
    PointerPte = StartPte;
    if (StartPte == NULL)
    {
        //
        // Ran out of memory
        //
        DPRINT1("Out of NP Expansion Pool\n");
        return NULL;
    }

    //
    // Acquire the pool lock now
    //
    OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);

    //
    // Lock the PFN database too
    //
    LockQueue = &KeGetCurrentPrcb()->LockQueue[LockQueuePfnLock];
    KeAcquireQueuedSpinLockAtDpcLevel(LockQueue);

    //
    // Loop the pages
    //
    TempPte = ValidKernelPte;
    do
    {
        /* Allocate a page */
        MI_SET_USAGE(MI_USAGE_PAGED_POOL);
        MI_SET_PROCESS2("Kernel");
        PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR());

        /* Get the PFN entry for it and fill it out */
        Pfn1 = MiGetPfnEntry(PageFrameNumber);
        Pfn1->u3.e2.ReferenceCount = 1;
        Pfn1->u2.ShareCount = 1;
        Pfn1->PteAddress = PointerPte;
        Pfn1->u3.e1.PageLocation = ActiveAndValid;
        Pfn1->u4.VerifierAllocation = 0;

        /* Write the PTE for it */
        TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
        MI_WRITE_VALID_PTE(PointerPte++, TempPte);
    } while (--SizeInPages > 0);

    //
    // This is the last page
    //
    Pfn1->u3.e1.EndOfAllocation = 1;

    //
    // Get the first page and mark it as such
    //
    Pfn1 = MiGetPfnEntry(StartPte->u.Hard.PageFrameNumber);
    Pfn1->u3.e1.StartOfAllocation = 1;

    /* Mark it as a verifier allocation if needed */
    ASSERT(Pfn1->u4.VerifierAllocation == 0);
    if (PoolType & VERIFIER_POOL_MASK) Pfn1->u4.VerifierAllocation = 1;

    //
    // Release the PFN and nonpaged pool lock
    //
    KeReleaseQueuedSpinLockFromDpcLevel(LockQueue);
    KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);

    //
    // Return the address
    //
    return MiPteToAddress(StartPte);
}
Esempio n. 5
0
/// <summary>
/// Setup VMCS fields
/// </summary>
/// <param name="VpData">Virtual CPU data</param>
VOID VmxSetupVMCS( IN PVCPU VpData )
{
    PKPROCESSOR_STATE state = &VpData->HostState;
    VMX_GDTENTRY64 vmxGdtEntry = { 0 };
    VMX_VM_ENTER_CONTROLS vmEnterCtlRequested = { 0 };
    VMX_VM_EXIT_CONTROLS vmExitCtlRequested = { 0 };
    VMX_PIN_BASED_CONTROLS vmPinCtlRequested = { 0 };
    VMX_CPU_BASED_CONTROLS vmCpuCtlRequested = { 0 };
    VMX_SECONDARY_CPU_BASED_CONTROLS vmCpuCtl2Requested = { 0 };

    // As we exit back into the guest, make sure to exist in x64 mode as well.
    vmEnterCtlRequested.Fields.IA32eModeGuest = TRUE;

    // If any interrupts were pending upon entering the hypervisor, acknowledge
    // them when we're done. And make sure to enter us in x64 mode at all times
    vmExitCtlRequested.Fields.AcknowledgeInterruptOnExit = TRUE;
    vmExitCtlRequested.Fields.HostAddressSpaceSize = TRUE;

    // In order for our choice of supporting RDTSCP and XSAVE/RESTORES above to
    // actually mean something, we have to request secondary controls. We also
    // want to activate the MSR bitmap in order to keep them from being caught.
    vmCpuCtlRequested.Fields.UseMSRBitmaps = TRUE;
    vmCpuCtlRequested.Fields.ActivateSecondaryControl = TRUE;
    //vmCpuCtlRequested.Fields.UseTSCOffseting = TRUE;
    //vmCpuCtlRequested.Fields.RDTSCExiting = TRUE;

    // VPID caches must be invalidated on CR3 change
    if(g_Data->Features.VPID)
        vmCpuCtlRequested.Fields.CR3LoadExiting = TRUE;

    // Enable support for RDTSCP and XSAVES/XRESTORES in the guest. Windows 10
    // makes use of both of these instructions if the CPU supports it. By using
    // VmxpAdjustMsr, these options will be ignored if this processor does
    // not actually support the instructions to begin with.
    vmCpuCtl2Requested.Fields.EnableRDTSCP = TRUE;
    vmCpuCtl2Requested.Fields.EnableXSAVESXSTORS = TRUE;

    // Begin by setting the link pointer to the required value for 4KB VMCS.
    __vmx_vmwrite( VMCS_LINK_POINTER, MAXULONG64 );

    __vmx_vmwrite(
        PIN_BASED_VM_EXEC_CONTROL, 
        VmxpAdjustMsr( VpData->MsrData[VMX_MSR( MSR_IA32_VMX_TRUE_PINBASED_CTLS )], vmPinCtlRequested.All ) 
        );
    __vmx_vmwrite( 
        CPU_BASED_VM_EXEC_CONTROL, 
        VmxpAdjustMsr( VpData->MsrData[VMX_MSR( MSR_IA32_VMX_TRUE_PROCBASED_CTLS )], vmCpuCtlRequested.All ) 
        );
    __vmx_vmwrite( 
        SECONDARY_VM_EXEC_CONTROL, 
        VmxpAdjustMsr( VpData->MsrData[VMX_MSR( MSR_IA32_VMX_PROCBASED_CTLS2 )], vmCpuCtl2Requested.All ) 
        );
    __vmx_vmwrite(
        VM_EXIT_CONTROLS, 
        VmxpAdjustMsr( VpData->MsrData[VMX_MSR( MSR_IA32_VMX_TRUE_EXIT_CTLS )], vmExitCtlRequested.All ) 
        );
    __vmx_vmwrite( 
        VM_ENTRY_CONTROLS, 
        VmxpAdjustMsr( VpData->MsrData[VMX_MSR( MSR_IA32_VMX_TRUE_ENTRY_CTLS )], vmEnterCtlRequested.All ) 
        );

    // Load the MSR bitmap. Unlike other bitmaps, not having an MSR bitmap will
    // trap all MSRs, so have to allocate an empty one.
    PUCHAR bitMapReadLow = g_Data->MSRBitmap;       // 0x00000000 - 0x00001FFF
    PUCHAR bitMapReadHigh = bitMapReadLow + 1024;   // 0xC0000000 - 0xC0001FFF

    RTL_BITMAP bitMapReadLowHeader = { 0 };
    RTL_BITMAP bitMapReadHighHeader = { 0 };
    RtlInitializeBitMap( &bitMapReadLowHeader, (PULONG)bitMapReadLow, 1024 * 8 );
    RtlInitializeBitMap( &bitMapReadHighHeader, (PULONG)bitMapReadHigh, 1024 * 8 );

    RtlSetBit( &bitMapReadLowHeader, MSR_IA32_FEATURE_CONTROL );    // MSR_IA32_FEATURE_CONTROL
    RtlSetBit( &bitMapReadLowHeader,  MSR_IA32_DEBUGCTL );          // MSR_DEBUGCTL
    RtlSetBit( &bitMapReadHighHeader, MSR_LSTAR - 0xC0000000 );     // MSR_LSTAR

    // VMX MSRs
    for (ULONG i = MSR_IA32_VMX_BASIC; i <= MSR_IA32_VMX_VMFUNC; i++)
        RtlSetBit( &bitMapReadLowHeader, i );

    __vmx_vmwrite( MSR_BITMAP, MmGetPhysicalAddress( g_Data->MSRBitmap ).QuadPart );

    // Exception bitmap
    ULONG ExceptionBitmap = 0;
    //ExceptionBitmap |= 1 << VECTOR_DEBUG_EXCEPTION;
    ExceptionBitmap |= 1 << VECTOR_BREAKPOINT_EXCEPTION;

    __vmx_vmwrite( EXCEPTION_BITMAP, ExceptionBitmap );

    // CS (Ring 0 Code)
    VmxpConvertGdtEntry( state->SpecialRegisters.Gdtr.Base, state->ContextFrame.SegCs, &vmxGdtEntry );
    __vmx_vmwrite( GUEST_CS_SELECTOR, vmxGdtEntry.Selector );
    __vmx_vmwrite( GUEST_CS_LIMIT, vmxGdtEntry.Limit );
    __vmx_vmwrite( GUEST_CS_AR_BYTES, vmxGdtEntry.AccessRights );
    __vmx_vmwrite( GUEST_CS_BASE, vmxGdtEntry.Base );
    __vmx_vmwrite( HOST_CS_SELECTOR, state->ContextFrame.SegCs & ~RPL_MASK );

    // SS (Ring 0 Data)
    VmxpConvertGdtEntry( state->SpecialRegisters.Gdtr.Base, state->ContextFrame.SegSs, &vmxGdtEntry );
    __vmx_vmwrite( GUEST_SS_SELECTOR, vmxGdtEntry.Selector );
    __vmx_vmwrite( GUEST_SS_LIMIT, vmxGdtEntry.Limit );
    __vmx_vmwrite( GUEST_SS_AR_BYTES, vmxGdtEntry.AccessRights );
    __vmx_vmwrite( GUEST_SS_BASE, vmxGdtEntry.Base );
    __vmx_vmwrite( HOST_SS_SELECTOR, state->ContextFrame.SegSs & ~RPL_MASK );

    // DS (Ring 3 Data)
    VmxpConvertGdtEntry( state->SpecialRegisters.Gdtr.Base, state->ContextFrame.SegDs, &vmxGdtEntry );
    __vmx_vmwrite( GUEST_DS_SELECTOR, vmxGdtEntry.Selector );
    __vmx_vmwrite( GUEST_DS_LIMIT, vmxGdtEntry.Limit );
    __vmx_vmwrite( GUEST_DS_AR_BYTES, vmxGdtEntry.AccessRights );
    __vmx_vmwrite( GUEST_DS_BASE, vmxGdtEntry.Base );
    __vmx_vmwrite( HOST_DS_SELECTOR, state->ContextFrame.SegDs & ~RPL_MASK );

    // ES (Ring 3 Data)
    VmxpConvertGdtEntry( state->SpecialRegisters.Gdtr.Base, state->ContextFrame.SegEs, &vmxGdtEntry );
    __vmx_vmwrite( GUEST_ES_SELECTOR, vmxGdtEntry.Selector );
    __vmx_vmwrite( GUEST_ES_LIMIT, vmxGdtEntry.Limit );
    __vmx_vmwrite( GUEST_ES_AR_BYTES, vmxGdtEntry.AccessRights );
    __vmx_vmwrite( GUEST_ES_BASE, vmxGdtEntry.Base );
    __vmx_vmwrite( HOST_ES_SELECTOR, state->ContextFrame.SegEs & ~RPL_MASK );

    // FS (Ring 3 Compatibility-Mode TEB)
    VmxpConvertGdtEntry( state->SpecialRegisters.Gdtr.Base, state->ContextFrame.SegFs, &vmxGdtEntry );
    __vmx_vmwrite( GUEST_FS_SELECTOR, vmxGdtEntry.Selector );
    __vmx_vmwrite( GUEST_FS_LIMIT, vmxGdtEntry.Limit );
    __vmx_vmwrite( GUEST_FS_AR_BYTES, vmxGdtEntry.AccessRights );
    __vmx_vmwrite( GUEST_FS_BASE, vmxGdtEntry.Base );
    __vmx_vmwrite( HOST_FS_BASE, vmxGdtEntry.Base );
    __vmx_vmwrite( HOST_FS_SELECTOR, state->ContextFrame.SegFs & ~RPL_MASK );

    // GS (Ring 3 Data if in Compatibility-Mode, MSR-based in Long Mode)
    VmxpConvertGdtEntry( state->SpecialRegisters.Gdtr.Base, state->ContextFrame.SegGs, &vmxGdtEntry );
    __vmx_vmwrite( GUEST_GS_SELECTOR, vmxGdtEntry.Selector );
    __vmx_vmwrite( GUEST_GS_LIMIT, vmxGdtEntry.Limit );
    __vmx_vmwrite( GUEST_GS_AR_BYTES, vmxGdtEntry.AccessRights );
    __vmx_vmwrite( GUEST_GS_BASE, state->SpecialRegisters.MsrGsBase );
    __vmx_vmwrite( HOST_GS_BASE, state->SpecialRegisters.MsrGsBase );
    __vmx_vmwrite( HOST_GS_SELECTOR, state->ContextFrame.SegGs & ~RPL_MASK );

    // Task Register (Ring 0 TSS)
    VmxpConvertGdtEntry( state->SpecialRegisters.Gdtr.Base, state->SpecialRegisters.Tr, &vmxGdtEntry );
    __vmx_vmwrite( GUEST_TR_SELECTOR, vmxGdtEntry.Selector );
    __vmx_vmwrite( GUEST_TR_LIMIT, vmxGdtEntry.Limit );
    __vmx_vmwrite( GUEST_TR_AR_BYTES, vmxGdtEntry.AccessRights );
    __vmx_vmwrite( GUEST_TR_BASE, vmxGdtEntry.Base );
    __vmx_vmwrite( HOST_TR_BASE, vmxGdtEntry.Base );
    __vmx_vmwrite( HOST_TR_SELECTOR, state->SpecialRegisters.Tr & ~RPL_MASK );

    // LDT
    VmxpConvertGdtEntry( state->SpecialRegisters.Gdtr.Base, state->SpecialRegisters.Ldtr, &vmxGdtEntry );
    __vmx_vmwrite( GUEST_LDTR_SELECTOR, vmxGdtEntry.Selector );
    __vmx_vmwrite( GUEST_LDTR_LIMIT, vmxGdtEntry.Limit );
    __vmx_vmwrite( GUEST_LDTR_AR_BYTES, vmxGdtEntry.AccessRights );
    __vmx_vmwrite( GUEST_LDTR_BASE, vmxGdtEntry.Base );

    // GDT
    __vmx_vmwrite( GUEST_GDTR_BASE, (ULONG_PTR)state->SpecialRegisters.Gdtr.Base );
    __vmx_vmwrite( GUEST_GDTR_LIMIT, state->SpecialRegisters.Gdtr.Limit );
    __vmx_vmwrite( HOST_GDTR_BASE, (ULONG_PTR)state->SpecialRegisters.Gdtr.Base );

    // IDT
    __vmx_vmwrite( GUEST_IDTR_BASE, (ULONG_PTR)state->SpecialRegisters.Idtr.Base );
    __vmx_vmwrite( GUEST_IDTR_LIMIT, state->SpecialRegisters.Idtr.Limit );
    __vmx_vmwrite( HOST_IDTR_BASE, (ULONG_PTR)state->SpecialRegisters.Idtr.Base );

    // CR0
    __vmx_vmwrite( CR0_READ_SHADOW, state->SpecialRegisters.Cr0 );
    __vmx_vmwrite( HOST_CR0, state->SpecialRegisters.Cr0 );
    __vmx_vmwrite( GUEST_CR0, state->SpecialRegisters.Cr0 );

    // CR3 -- do not use the current process' address space for the host,
    // because we may be executing in an arbitrary user-mode process right now
    // as part of the DPC interrupt we execute in.
    __vmx_vmwrite( HOST_CR3, VpData->SystemDirectoryTableBase );
    __vmx_vmwrite( GUEST_CR3, state->SpecialRegisters.Cr3 );

    // CR4
    __vmx_vmwrite( HOST_CR4, state->SpecialRegisters.Cr4 );
    __vmx_vmwrite( GUEST_CR4, state->SpecialRegisters.Cr4 );
    __vmx_vmwrite( CR4_GUEST_HOST_MASK, 0x2000 );
    __vmx_vmwrite( CR4_READ_SHADOW, state->SpecialRegisters.Cr4 & ~0x2000 );

    // Debug MSR and DR7
    __vmx_vmwrite( GUEST_IA32_DEBUGCTL, state->SpecialRegisters.DebugControl );
    __vmx_vmwrite( GUEST_DR7, state->SpecialRegisters.KernelDr7 );

    // Finally, load the guest stack, instruction pointer, and rflags, which
    // corresponds exactly to the location where RtlCaptureContext will return
    // to inside of VmxInitializeCPU.
    __vmx_vmwrite( GUEST_RSP, state->ContextFrame.Rsp );
    __vmx_vmwrite( GUEST_RIP, state->ContextFrame.Rip );
    __vmx_vmwrite( GUEST_RFLAGS, state->ContextFrame.EFlags );

    // Load the hypervisor entrypoint and stack. We give ourselves a standard
    // size kernel stack (24KB) and bias for the context structure that the
    // hypervisor entrypoint will push on the stack, avoiding the need for RSP
    // modifying instructions in the entrypoint. Note that the CONTEXT pointer
    // and thus the stack itself, must be 16-byte aligned for ABI compatibility
    // with AMD64 -- specifically, XMM operations will fail otherwise, such as
    // the ones that RtlCaptureContext will perform.
    NT_ASSERT( (KERNEL_STACK_SIZE - sizeof( CONTEXT )) % 16 == 0 );
    __vmx_vmwrite( HOST_RSP, (ULONG_PTR)VpData->VMMStack + KERNEL_STACK_SIZE - sizeof( CONTEXT ) );
    __vmx_vmwrite( HOST_RIP, (ULONG_PTR)VmxVMEntry );
}
Esempio n. 6
0
VOID
MiZeroPageFile (
    IN PVOID Context
    )

/*++

Routine Description:

    This routine zeroes all inactive pagefile blocks in the specified paging
    file.

Arguments:

    Context - Supplies the information on which pagefile to zero and a zeroed
              page to use for the I/O.

Return Value:

    Returns TRUE on success, FALSE on failure.

Environment:

    Kernel mode, the caller must lock down PAGELK.

--*/

{
    PFN_NUMBER MaxPagesToWrite;
    PMMPFN Pfn1;
    PPFN_NUMBER Page;
    PFN_NUMBER MdlHack[(sizeof(MDL)/sizeof(PFN_NUMBER)) + MM_MAXIMUM_WRITE_CLUSTER];
    PMDL Mdl;
    NTSTATUS Status;
    KEVENT IoEvent;
    IO_STATUS_BLOCK IoStatus;
    KIRQL OldIrql;
    LARGE_INTEGER StartingOffset;
    ULONG count;
    ULONG i;
    PFN_NUMBER first;
    ULONG write;
    PKEVENT AllDone;
    SIZE_T NumberOfBytes;
    PMMPAGING_FILE PagingFile;
    PFN_NUMBER ZeroedPageFrame;
    PMM_ZERO_PAGEFILE_CONTEXT ZeroContext;

    ZeroContext = (PMM_ZERO_PAGEFILE_CONTEXT) Context;

    PagingFile = ZeroContext->PagingFile;
    ZeroedPageFrame = ZeroContext->ZeroedPageFrame;
    AllDone = ZeroContext->AllDone;

    ExFreePool (Context);

    NumberOfBytes = MmModifiedWriteClusterSize << PAGE_SHIFT;
    MaxPagesToWrite = NumberOfBytes >> PAGE_SHIFT;

    Mdl = (PMDL) MdlHack;
    Page = (PPFN_NUMBER)(Mdl + 1);

    KeInitializeEvent (&IoEvent, NotificationEvent, FALSE);

    MmInitializeMdl (Mdl, NULL, PAGE_SIZE);

    Mdl->MdlFlags |= MDL_PAGES_LOCKED;

    Mdl->StartVa = NULL;

    i = 0;
    Page = (PPFN_NUMBER)(Mdl + 1);

    for (i = 0; i < MaxPagesToWrite; i += 1) {
        *Page = ZeroedPageFrame;
        Page += 1;
    }

    count = 0;
    write = FALSE;

    SATISFY_OVERZEALOUS_COMPILER (first = 0);

    LOCK_PFN (OldIrql);

    for (i = 1; i < PagingFile->Size; i += 1) {

        if (RtlCheckBit (PagingFile->Bitmap, (ULONG) i) == 0) {

            //
            // Claim the pagefile location as the modified writer
            // may already be scanning.
            //

            RtlSetBit (PagingFile->Bitmap, (ULONG) i);

            if (count == 0) {
                first = i;
            }

            count += 1;

            if ((count == MaxPagesToWrite) || (i == PagingFile->Size - 1)) {
                write = TRUE;
            }
        }
        else {
            if (count != 0) {

                //
                // Issue a write.
                //

                write = TRUE;
            }
        }

        if (write) {

            UNLOCK_PFN (OldIrql);

            StartingOffset.QuadPart = (LONGLONG)first << PAGE_SHIFT;
            Mdl->ByteCount = count << PAGE_SHIFT;
            KeClearEvent (&IoEvent);

            Status = IoSynchronousPageWrite (PagingFile->File,
                                             Mdl,
                                             &StartingOffset,
                                             &IoEvent,
                                             &IoStatus);

            //
            // Ignore all I/O failures - there is nothing that can
            // be done at this point.
            //

            if (!NT_SUCCESS (Status)) {
                KeSetEvent (&IoEvent, 0, FALSE);
            }

            Status = KeWaitForSingleObject (&IoEvent,
                                            WrPageOut,
                                            KernelMode,
                                            FALSE,
                                            (PLARGE_INTEGER)&MmTwentySeconds);

            if (Status == STATUS_TIMEOUT) {

                //
                // The write did not complete in 20 seconds, assume
                // that the file systems are hung and return an error.
                //
                // Note the zero page (and any MDL system virtual address a
                // driver may have created) is leaked because we don't know
                // what the filesystem or storage stack might (still) be
                // doing to them.
                //

                Pfn1 = MI_PFN_ELEMENT (ZeroedPageFrame);

                LOCK_PFN (OldIrql);

                //
                // Increment the reference count on the zeroed page to ensure
                // it is never freed.
                //

                InterlockedIncrementPfn ((PSHORT)&Pfn1->u3.e2.ReferenceCount);

                RtlClearBits (PagingFile->Bitmap, (ULONG) first, count);

                break;
            }

            if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) {
                MmUnmapLockedPages (Mdl->MappedSystemVa, Mdl);
            }

            write = FALSE;
            LOCK_PFN (OldIrql);
            RtlClearBits (PagingFile->Bitmap, (ULONG) first, count);
            count = 0;
        }
    }

    UNLOCK_PFN (OldIrql);

    KeSetEvent (AllDone, 0, FALSE);
    return;
}