Exemplo n.º 1
0
/*
 * @implemented
 */
DWORD
WINAPI
TlsAlloc(VOID)
{
    ULONG Index;

    RtlAcquirePebLock();

    /* Try to get regular TEB slot. */
    Index = RtlFindClearBitsAndSet(NtCurrentPeb()->TlsBitmap, 1, 0);
    if (Index == ~0U)
    {
        /* If it fails, try to find expansion TEB slot. */
        Index = RtlFindClearBitsAndSet(NtCurrentPeb()->TlsExpansionBitmap, 1, 0);
        if (Index != ~0U)
        {
            if (NtCurrentTeb()->TlsExpansionSlots == NULL)
            {
                NtCurrentTeb()->TlsExpansionSlots = HeapAlloc(RtlGetProcessHeap(),
                                                              HEAP_ZERO_MEMORY,
                                                              TLS_EXPANSION_SLOTS *
                                                              sizeof(PVOID));
            }

            if (NtCurrentTeb()->TlsExpansionSlots == NULL)
            {
                RtlClearBits(NtCurrentPeb()->TlsExpansionBitmap, Index, 1);
                Index = ~0;
                SetLastError(ERROR_NOT_ENOUGH_MEMORY);
            }
            else
            {
                /* Clear the value. */
                NtCurrentTeb()->TlsExpansionSlots[Index] = 0;
                Index += TLS_MINIMUM_AVAILABLE;
            }
        }
        else
        {
            SetLastError(ERROR_NO_MORE_ITEMS);
        }
    }
    else
    {
        /* Clear the value. */
        NtCurrentTeb()->TlsSlots[Index] = 0;
    }

    RtlReleasePebLock();

    return Index;
}
Exemplo n.º 2
0
PFN_NUMBER
XenevtchnAllocIoPFN(void)
{
    KIRQL old_irql;
    ULONG page_nr;

    old_irql = acquire_irqsafe_lock(&io_hole_lock);
    page_nr = RtlFindClearBitsAndSet(&io_hole_in_use, 1, 0);
    release_irqsafe_lock(&io_hole_lock, old_irql);

    if (page_nr == 0xffffffff)
        return 0;
    else
        return (PFN_NUMBER)(page_nr + (io_hole_start.QuadPart >> PAGE_SHIFT));
}
Exemplo n.º 3
0
VOID test_bitmap()
{
	Pool		pool;
	ULONG		Bitmap_Size, Index;
	PULONG		Bitmap_Buffer;

	// Initialize
	Bitmap_Size = 13; // Number of bits
	Bitmap_Buffer = ExAllocatePoolWithTag (
		NonPagedPool,
		(ULONG)(((Bitmap_Size/8+1)/sizeof(ULONG) + 1)* sizeof(ULONG)),
		BITMAP_TAG
	);
	RtlInitializeBitMap(
		&pool.Bitmap, 
		(PULONG)(Bitmap_Buffer),
		(ULONG)(Bitmap_Size)
	);
	RtlClearAllBits(&pool.Bitmap);

	for (Index = 0; Index < 10; Index++)
		RtlSetBit(&pool.Bitmap, Index);
	if (RtlAreBitsSet(&pool.Bitmap, 0, 10) == TRUE)
		DbgPrint("bitmap: bit[0..9] is set\r\n");

	if (RtlCheckBit(&pool.Bitmap, 10))
		DbgPrint("bitmap: bit[10] is set\r\n");
	if (RtlCheckBit(&pool.Bitmap, 1024)) //Warning! Return 1 here
		DbgPrint("bitmap: bit[1024] is set\r\n");

	Index = 0;
	do
	{
		Index = RtlFindClearBitsAndSet (
			&pool.Bitmap,
			1, //NumberToFind
			Index //HintIndex
		);
		DbgPrint("%d\n", Index);
	}while (Index != -1);

	// Free
	ExFreePoolWithTag(pool.Bitmap.Buffer, BITMAP_TAG);
}
Exemplo n.º 4
0
PVOID
XenevtchnAllocIoMemory(ULONG nr_bytes, PHYSICAL_ADDRESS *pa)
{
    KIRQL old_irql;
    ULONG page_nr;

    nr_bytes = (nr_bytes + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);

    old_irql = acquire_irqsafe_lock(&io_hole_lock);
    page_nr = RtlFindClearBitsAndSet(&io_hole_in_use,
                                     nr_bytes / PAGE_SIZE,
                                     0);
    release_irqsafe_lock(&io_hole_lock, old_irql);
    if (page_nr == 0xffffffff) {
        TraceWarning (("Filled the io hole!\n"));
        return NULL;
    } else {
        pa->QuadPart = io_hole_start.QuadPart + page_nr * PAGE_SIZE;
        return (PVOID)((ULONG_PTR)io_hole_va_start + page_nr * PAGE_SIZE);
    }
}
Exemplo n.º 5
0
PVOID
NTAPI
MiAllocatePoolPages(IN POOL_TYPE PoolType,
                    IN SIZE_T SizeInBytes)
{
    PFN_NUMBER PageFrameNumber;
    PFN_COUNT SizeInPages, PageTableCount;
    ULONG i;
    KIRQL OldIrql;
    PLIST_ENTRY NextEntry, NextHead, LastHead;
    PMMPTE PointerPte, StartPte;
    PMMPDE PointerPde;
    ULONG EndAllocation;
    MMPTE TempPte;
    MMPDE TempPde;
    PMMPFN Pfn1;
    PVOID BaseVa, BaseVaStart;
    PMMFREE_POOL_ENTRY FreeEntry;
    PKSPIN_LOCK_QUEUE LockQueue;

    //
    // Figure out how big the allocation is in pages
    //
    SizeInPages = (PFN_COUNT)BYTES_TO_PAGES(SizeInBytes);

    //
    // Check for overflow
    //
    if (SizeInPages == 0)
    {
        //
        // Fail
        //
        return NULL;
    }

    //
    // Handle paged pool
    //
    if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool)
    {
        //
        // If only one page is being requested, try to grab it from the S-LIST
        //
        if ((SizeInPages == 1) && (ExQueryDepthSList(&MiPagedPoolSListHead)))
        {
            BaseVa = InterlockedPopEntrySList(&MiPagedPoolSListHead);
            if (BaseVa) return BaseVa;
        }

        //
        // Lock the paged pool mutex
        //
        KeAcquireGuardedMutex(&MmPagedPoolMutex);

        //
        // Find some empty allocation space
        //
        i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
                                   SizeInPages,
                                   MmPagedPoolInfo.PagedPoolHint);
        if (i == 0xFFFFFFFF)
        {
            //
            // Get the page bit count
            //
            i = ((SizeInPages - 1) / PTE_COUNT) + 1;
            DPRINT("Paged pool expansion: %lu %x\n", i, SizeInPages);

            //
            // Check if there is enougn paged pool expansion space left
            //
            if (MmPagedPoolInfo.NextPdeForPagedPoolExpansion >
                    (PMMPDE)MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
            {
                //
                // Out of memory!
                //
                DPRINT1("OUT OF PAGED POOL!!!\n");
                KeReleaseGuardedMutex(&MmPagedPoolMutex);
                return NULL;
            }

            //
            // Check if we'll have to expand past the last PTE we have available
            //
            if (((i - 1) + MmPagedPoolInfo.NextPdeForPagedPoolExpansion) >
                    (PMMPDE)MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
            {
                //
                // We can only support this much then
                //
                PointerPde = MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool);
                PageTableCount = (PFN_COUNT)(PointerPde + 1 -
                                             MmPagedPoolInfo.NextPdeForPagedPoolExpansion);
                ASSERT(PageTableCount < i);
                i = PageTableCount;
            }
            else
            {
                //
                // Otherwise, there is plenty of space left for this expansion
                //
                PageTableCount = i;
            }

            //
            // Get the template PDE we'll use to expand
            //
            TempPde = ValidKernelPde;

            //
            // Get the first PTE in expansion space
            //
            PointerPde = MmPagedPoolInfo.NextPdeForPagedPoolExpansion;
            BaseVa = MiPdeToPte(PointerPde);
            BaseVaStart = BaseVa;

            //
            // Lock the PFN database and loop pages
            //
            OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
            do
            {
                //
                // It should not already be valid
                //
                ASSERT(PointerPde->u.Hard.Valid == 0);

                /* Request a page */
                MI_SET_USAGE(MI_USAGE_PAGED_POOL);
                MI_SET_PROCESS2("Kernel");
                PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR());
                TempPde.u.Hard.PageFrameNumber = PageFrameNumber;
#if (_MI_PAGING_LEVELS >= 3)
                /* On PAE/x64 systems, there's no double-buffering */
                ASSERT(FALSE);
#else
                //
                // Save it into our double-buffered system page directory
                //
                MmSystemPagePtes[((ULONG_PTR)PointerPde & (SYSTEM_PD_SIZE - 1)) / sizeof(MMPTE)] = TempPde;

                /* Initialize the PFN */
                MiInitializePfnForOtherProcess(PageFrameNumber,
                                               (PMMPTE)PointerPde,
                                               MmSystemPageDirectory[(PointerPde - MiAddressToPde(NULL)) / PDE_COUNT]);

                /* Write the actual PDE now */
//                MI_WRITE_VALID_PDE(PointerPde, TempPde);
#endif
                //
                // Move on to the next expansion address
                //
                PointerPde++;
                BaseVa = (PVOID)((ULONG_PTR)BaseVa + PAGE_SIZE);
                i--;
            } while (i > 0);

            //
            // Release the PFN database lock
            //
            KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);

            //
            // These pages are now available, clear their availablity bits
            //
            EndAllocation = (ULONG)(MmPagedPoolInfo.NextPdeForPagedPoolExpansion -
                                    (PMMPDE)MiAddressToPte(MmPagedPoolInfo.FirstPteForPagedPool)) *
                            PTE_COUNT;
            RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap,
                         EndAllocation,
                         PageTableCount * PTE_COUNT);

            //
            // Update the next expansion location
            //
            MmPagedPoolInfo.NextPdeForPagedPoolExpansion += PageTableCount;

            //
            // Zero out the newly available memory
            //
            RtlZeroMemory(BaseVaStart, PageTableCount * PAGE_SIZE);

            //
            // Now try consuming the pages again
            //
            i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
                                       SizeInPages,
                                       0);
            if (i == 0xFFFFFFFF)
            {
                //
                // Out of memory!
                //
                DPRINT1("OUT OF PAGED POOL!!!\n");
                KeReleaseGuardedMutex(&MmPagedPoolMutex);
                return NULL;
            }
        }

        //
        // Update the pool hint if the request was just one page
        //
        if (SizeInPages == 1) MmPagedPoolInfo.PagedPoolHint = i + 1;

        //
        // Update the end bitmap so we know the bounds of this allocation when
        // the time comes to free it
        //
        EndAllocation = i + SizeInPages - 1;
        RtlSetBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, EndAllocation);

        //
        // Now we can release the lock (it mainly protects the bitmap)
        //
        KeReleaseGuardedMutex(&MmPagedPoolMutex);

        //
        // Now figure out where this allocation starts
        //
        BaseVa = (PVOID)((ULONG_PTR)MmPagedPoolStart + (i << PAGE_SHIFT));

        //
        // Flush the TLB
        //
        KeFlushEntireTb(TRUE, TRUE);

        /* Setup a demand-zero writable PTE */
        MI_MAKE_SOFTWARE_PTE(&TempPte, MM_READWRITE);

        //
        // Find the first and last PTE, then loop them all
        //
        PointerPte = MiAddressToPte(BaseVa);
        StartPte = PointerPte + SizeInPages;
        do
        {
            //
            // Write the demand zero PTE and keep going
            //
            MI_WRITE_INVALID_PTE(PointerPte, TempPte);
        } while (++PointerPte < StartPte);

        //
        // Return the allocation address to the caller
        //
        return BaseVa;
    }

    //
    // If only one page is being requested, try to grab it from the S-LIST
    //
    if ((SizeInPages == 1) && (ExQueryDepthSList(&MiNonPagedPoolSListHead)))
    {
        BaseVa = InterlockedPopEntrySList(&MiNonPagedPoolSListHead);
        if (BaseVa) return BaseVa;
    }

    //
    // Allocations of less than 4 pages go into their individual buckets
    //
    i = SizeInPages - 1;
    if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;

    //
    // Loop through all the free page lists based on the page index
    //
    NextHead = &MmNonPagedPoolFreeListHead[i];
    LastHead = &MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];

    //
    // Acquire the nonpaged pool lock
    //
    OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
    do
    {
        //
        // Now loop through all the free page entries in this given list
        //
        NextEntry = NextHead->Flink;
        while (NextEntry != NextHead)
        {
            /* Is freed non paged pool enabled */
            if (MmProtectFreedNonPagedPool)
            {
                /* We need to be able to touch this page, unprotect it */
                MiUnProtectFreeNonPagedPool(NextEntry, 0);
            }

            //
            // Grab the entry and see if it can handle our allocation
            //
            FreeEntry = CONTAINING_RECORD(NextEntry, MMFREE_POOL_ENTRY, List);
            ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE);
            if (FreeEntry->Size >= SizeInPages)
            {
                //
                // It does, so consume the pages from here
                //
                FreeEntry->Size -= SizeInPages;

                //
                // The allocation will begin in this free page area
                //
                BaseVa = (PVOID)((ULONG_PTR)FreeEntry +
                                 (FreeEntry->Size  << PAGE_SHIFT));

                /* Remove the item from the list, depending if pool is protected */
                if (MmProtectFreedNonPagedPool)
                    MiProtectedPoolRemoveEntryList(&FreeEntry->List);
                else
                    RemoveEntryList(&FreeEntry->List);

                //
                // However, check if its' still got space left
                //
                if (FreeEntry->Size != 0)
                {
                    /* Check which list to insert this entry into */
                    i = FreeEntry->Size - 1;
                    if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;

                    /* Insert the entry into the free list head, check for prot. pool */
                    if (MmProtectFreedNonPagedPool)
                        MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE);
                    else
                        InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);

                    /* Is freed non paged pool protected? */
                    if (MmProtectFreedNonPagedPool)
                    {
                        /* Protect the freed pool! */
                        MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
                    }
                }

                //
                // Grab the PTE for this allocation
                //
                PointerPte = MiAddressToPte(BaseVa);
                ASSERT(PointerPte->u.Hard.Valid == 1);

                //
                // Grab the PFN NextEntry and index
                //
                Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte));

                //
                // Now mark it as the beginning of an allocation
                //
                ASSERT(Pfn1->u3.e1.StartOfAllocation == 0);
                Pfn1->u3.e1.StartOfAllocation = 1;

                /* Mark it as special pool if needed */
                ASSERT(Pfn1->u4.VerifierAllocation == 0);
                if (PoolType & VERIFIER_POOL_MASK)
                {
                    Pfn1->u4.VerifierAllocation = 1;
                }

                //
                // Check if the allocation is larger than one page
                //
                if (SizeInPages != 1)
                {
                    //
                    // Navigate to the last PFN entry and PTE
                    //
                    PointerPte += SizeInPages - 1;
                    ASSERT(PointerPte->u.Hard.Valid == 1);
                    Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
                }

                //
                // Mark this PFN as the last (might be the same as the first)
                //
                ASSERT(Pfn1->u3.e1.EndOfAllocation == 0);
                Pfn1->u3.e1.EndOfAllocation = 1;

                //
                // Release the nonpaged pool lock, and return the allocation
                //
                KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
                return BaseVa;
            }

            //
            // Try the next free page entry
            //
            NextEntry = FreeEntry->List.Flink;

            /* Is freed non paged pool protected? */
            if (MmProtectFreedNonPagedPool)
            {
                /* Protect the freed pool! */
                MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
            }
        }
    } while (++NextHead < LastHead);

    //
    // If we got here, we're out of space.
    // Start by releasing the lock
    //
    KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);

    //
    // Allocate some system PTEs
    //
    StartPte = MiReserveSystemPtes(SizeInPages, NonPagedPoolExpansion);
    PointerPte = StartPte;
    if (StartPte == NULL)
    {
        //
        // Ran out of memory
        //
        DPRINT1("Out of NP Expansion Pool\n");
        return NULL;
    }

    //
    // Acquire the pool lock now
    //
    OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);

    //
    // Lock the PFN database too
    //
    LockQueue = &KeGetCurrentPrcb()->LockQueue[LockQueuePfnLock];
    KeAcquireQueuedSpinLockAtDpcLevel(LockQueue);

    //
    // Loop the pages
    //
    TempPte = ValidKernelPte;
    do
    {
        /* Allocate a page */
        MI_SET_USAGE(MI_USAGE_PAGED_POOL);
        MI_SET_PROCESS2("Kernel");
        PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR());

        /* Get the PFN entry for it and fill it out */
        Pfn1 = MiGetPfnEntry(PageFrameNumber);
        Pfn1->u3.e2.ReferenceCount = 1;
        Pfn1->u2.ShareCount = 1;
        Pfn1->PteAddress = PointerPte;
        Pfn1->u3.e1.PageLocation = ActiveAndValid;
        Pfn1->u4.VerifierAllocation = 0;

        /* Write the PTE for it */
        TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
        MI_WRITE_VALID_PTE(PointerPte++, TempPte);
    } while (--SizeInPages > 0);

    //
    // This is the last page
    //
    Pfn1->u3.e1.EndOfAllocation = 1;

    //
    // Get the first page and mark it as such
    //
    Pfn1 = MiGetPfnEntry(StartPte->u.Hard.PageFrameNumber);
    Pfn1->u3.e1.StartOfAllocation = 1;

    /* Mark it as a verifier allocation if needed */
    ASSERT(Pfn1->u4.VerifierAllocation == 0);
    if (PoolType & VERIFIER_POOL_MASK) Pfn1->u4.VerifierAllocation = 1;

    //
    // Release the PFN and nonpaged pool lock
    //
    KeReleaseQueuedSpinLockFromDpcLevel(LockQueue);
    KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);

    //
    // Return the address
    //
    return MiPteToAddress(StartPte);
}
Exemplo n.º 6
0
VOID
MiCheckPfn (
            )

/*++

Routine Description:

    This routine checks each physical page in the PFN database to ensure
    it is in the proper state.

Arguments:

    None.

Return Value:

    None.

Environment:

    Kernel mode, APCs disabled.

--*/

{
    PMMPFN Pfn1;
    PFN_NUMBER Link, Previous;
    ULONG i;
    PMMPTE PointerPte;
    KIRQL PreviousIrql;
    KIRQL OldIrql;
    USHORT ValidCheck[4];
    USHORT ValidPage[4];
    PMMPFN PfnX;

    ValidCheck[0] = ValidCheck[1] = ValidCheck[2] = ValidCheck[3] = 0;
    ValidPage[0] = ValidPage[1] = ValidPage[2] = ValidPage[3] = 0;

    if (CheckPfnBitMap == NULL) {
        MiCreateBitMap ( &CheckPfnBitMap, MmNumberOfPhysicalPages, NonPagedPool);
    }
    RtlClearAllBits (CheckPfnBitMap);

    //
    // Walk free list.
    //

    KeRaiseIrql (APC_LEVEL, &PreviousIrql);
    LOCK_PFN (OldIrql);

    Previous = MM_EMPTY_LIST;
    Link = MmFreePageListHead.Flink;
    for (i=0; i < MmFreePageListHead.Total; i++) {
        if (Link == MM_EMPTY_LIST) {
            DbgPrint("free list total count wrong\n");
            UNLOCK_PFN (OldIrql);
            KeLowerIrql (PreviousIrql);
            return;
        }
        RtlSetBits (CheckPfnBitMap, (ULONG)Link, 1L);
        Pfn1 = MI_PFN_ELEMENT(Link);
        if (Pfn1->u3.e2.ReferenceCount != 0) {
            DbgPrint("non zero reference count on free list\n");
            MiFormatPfn(Pfn1);

        }
        if (Pfn1->u3.e1.PageLocation != FreePageList) {
            DbgPrint("page location not freelist\n");
            MiFormatPfn(Pfn1);
        }
        if (Pfn1->u2.Blink != Previous) {
            DbgPrint("bad blink on free list\n");
            MiFormatPfn(Pfn1);
        }
        Previous = Link;
        Link = Pfn1->u1.Flink;

    }
    if (Link != MM_EMPTY_LIST) {
            DbgPrint("free list total count wrong\n");
            Pfn1 = MI_PFN_ELEMENT(Link);
            MiFormatPfn(Pfn1);
    }

    //
    // Walk zeroed list.
    //

    Previous = MM_EMPTY_LIST;
    Link = MmZeroedPageListHead.Flink;
    for (i=0; i < MmZeroedPageListHead.Total; i++) {
        if (Link == MM_EMPTY_LIST) {
            DbgPrint("zero list total count wrong\n");
            UNLOCK_PFN (OldIrql);
            KeLowerIrql (PreviousIrql);
            return;
        }
        RtlSetBits (CheckPfnBitMap, (ULONG)Link, 1L);
        Pfn1 = MI_PFN_ELEMENT(Link);
        if (Pfn1->u3.e2.ReferenceCount != 0) {
            DbgPrint("non zero reference count on zero list\n");
            MiFormatPfn(Pfn1);

        }
        if (Pfn1->u3.e1.PageLocation != ZeroedPageList) {
            DbgPrint("page location not zerolist\n");
            MiFormatPfn(Pfn1);
        }
        if (Pfn1->u2.Blink != Previous) {
            DbgPrint("bad blink on zero list\n");
            MiFormatPfn(Pfn1);
        }
        Previous = Link;
        Link = Pfn1->u1.Flink;

    }
    if (Link != MM_EMPTY_LIST) {
            DbgPrint("zero list total count wrong\n");
            Pfn1 = MI_PFN_ELEMENT(Link);
            MiFormatPfn(Pfn1);
    }

    //
    // Walk Bad list.
    //
    Previous = MM_EMPTY_LIST;
    Link = MmBadPageListHead.Flink;
    for (i=0; i < MmBadPageListHead.Total; i++) {
        if (Link == MM_EMPTY_LIST) {
            DbgPrint("Bad list total count wrong\n");
            UNLOCK_PFN (OldIrql);
            KeLowerIrql (PreviousIrql);
            return;
        }
        RtlSetBits (CheckPfnBitMap, (ULONG)Link, 1L);
        Pfn1 = MI_PFN_ELEMENT(Link);
        if (Pfn1->u3.e2.ReferenceCount != 0) {
            DbgPrint("non zero reference count on Bad list\n");
            MiFormatPfn(Pfn1);

        }
        if (Pfn1->u3.e1.PageLocation != BadPageList) {
            DbgPrint("page location not Badlist\n");
            MiFormatPfn(Pfn1);
        }
        if (Pfn1->u2.Blink != Previous) {
            DbgPrint("bad blink on Bad list\n");
            MiFormatPfn(Pfn1);
        }
        Previous = Link;
        Link = Pfn1->u1.Flink;

    }
    if (Link != MM_EMPTY_LIST) {
            DbgPrint("Bad list total count wrong\n");
            Pfn1 = MI_PFN_ELEMENT(Link);
            MiFormatPfn(Pfn1);
    }

    //
    // Walk Standby list.
    //

    Previous = MM_EMPTY_LIST;
    Link = MmStandbyPageListHead.Flink;
    for (i=0; i < MmStandbyPageListHead.Total; i++) {
        if (Link == MM_EMPTY_LIST) {
            DbgPrint("Standby list total count wrong\n");
            UNLOCK_PFN (OldIrql);
            KeLowerIrql (PreviousIrql);
            return;
        }
        RtlSetBits (CheckPfnBitMap, (ULONG)Link, 1L);
        Pfn1 = MI_PFN_ELEMENT(Link);
        if (Pfn1->u3.e2.ReferenceCount != 0) {
            DbgPrint("non zero reference count on Standby list\n");
            MiFormatPfn(Pfn1);

        }
        if (Pfn1->u3.e1.PageLocation != StandbyPageList) {
            DbgPrint("page location not Standbylist\n");
            MiFormatPfn(Pfn1);
        }
        if (Pfn1->u2.Blink != Previous) {
            DbgPrint("bad blink on Standby list\n");
            MiFormatPfn(Pfn1);
        }

        //
        // Check to see if referenced PTE is okay.
        //
        if (MI_IS_PFN_DELETED (Pfn1)) {
            DbgPrint("Invalid pteaddress in standby list\n");
            MiFormatPfn(Pfn1);

        } else {

            OldIrql = 99;
            if ((Pfn1->u3.e1.PrototypePte == 1) &&
                            (MmIsAddressValid (Pfn1->PteAddress))) {
                PointerPte = Pfn1->PteAddress;
            } else {
                PointerPte = MiMapPageInHyperSpace(Pfn1->PteFrame,
                                                   &OldIrql);
                PointerPte = (PMMPTE)((ULONG_PTR)PointerPte +
                                    MiGetByteOffset(Pfn1->PteAddress));
            }
            if (MI_GET_PAGE_FRAME_FROM_TRANSITION_PTE (PointerPte) != Link) {
                DbgPrint("Invalid PFN - PTE address is wrong in standby list\n");
                MiFormatPfn(Pfn1);
                MiFormatPte(PointerPte);
            }
            if (PointerPte->u.Soft.Transition == 0) {
                DbgPrint("Pte not in transition for page on standby list\n");
                MiFormatPfn(Pfn1);
                MiFormatPte(PointerPte);
            }
            if (OldIrql != 99) {
                MiUnmapPageInHyperSpace (OldIrql);
                OldIrql = 99;
            }

        }

        Previous = Link;
        Link = Pfn1->u1.Flink;

    }
    if (Link != MM_EMPTY_LIST) {
            DbgPrint("Standby list total count wrong\n");
            Pfn1 = MI_PFN_ELEMENT(Link);
            MiFormatPfn(Pfn1);
    }

    //
    // Walk Modified list.
    //

    Previous = MM_EMPTY_LIST;
    Link = MmModifiedPageListHead.Flink;
    for (i=0; i < MmModifiedPageListHead.Total; i++) {
        if (Link == MM_EMPTY_LIST) {
            DbgPrint("Modified list total count wrong\n");
            UNLOCK_PFN (OldIrql);
            KeLowerIrql (PreviousIrql);
            return;
        }
        RtlSetBits (CheckPfnBitMap, (ULONG)Link, 1L);
        Pfn1 = MI_PFN_ELEMENT(Link);
        if (Pfn1->u3.e2.ReferenceCount != 0) {
            DbgPrint("non zero reference count on Modified list\n");
            MiFormatPfn(Pfn1);

        }
        if (Pfn1->u3.e1.PageLocation != ModifiedPageList) {
            DbgPrint("page location not Modifiedlist\n");
            MiFormatPfn(Pfn1);
        }
        if (Pfn1->u2.Blink != Previous) {
            DbgPrint("bad blink on Modified list\n");
            MiFormatPfn(Pfn1);
        }
        //
        // Check to see if referenced PTE is okay.
        //
        if (MI_IS_PFN_DELETED (Pfn1)) {
            DbgPrint("Invalid pteaddress in modified list\n");
            MiFormatPfn(Pfn1);

        } else {

            if ((Pfn1->u3.e1.PrototypePte == 1) &&
                            (MmIsAddressValid (Pfn1->PteAddress))) {
                PointerPte = Pfn1->PteAddress;
            } else {
                PointerPte = MiMapPageInHyperSpace(Pfn1->PteFrame, &OldIrql);
                PointerPte = (PMMPTE)((ULONG_PTR)PointerPte +
                                    MiGetByteOffset(Pfn1->PteAddress));
            }

            if (MI_GET_PAGE_FRAME_FROM_TRANSITION_PTE (PointerPte) != Link) {
                DbgPrint("Invalid PFN - PTE address is wrong in modified list\n");
                MiFormatPfn(Pfn1);
                MiFormatPte(PointerPte);
            }
            if (PointerPte->u.Soft.Transition == 0) {
                DbgPrint("Pte not in transition for page on modified list\n");
                MiFormatPfn(Pfn1);
                MiFormatPte(PointerPte);
            }

            if (OldIrql != 99) {
                MiUnmapPageInHyperSpace (OldIrql);
                OldIrql = 99;
            }
        }

        Previous = Link;
        Link = Pfn1->u1.Flink;

    }
    if (Link != MM_EMPTY_LIST) {
            DbgPrint("Modified list total count wrong\n");
            Pfn1 = MI_PFN_ELEMENT(Link);
            MiFormatPfn(Pfn1);
    }
    //
    // All non active pages have been scanned.  Locate the
    // active pages and make sure they are consistent.
    //

    //
    // set bit zero as page zero is reserved for now
    //

    RtlSetBits (CheckPfnBitMap, 0L, 1L);

    Link = RtlFindClearBitsAndSet (CheckPfnBitMap, 1L, 0);
    while (Link != 0xFFFFFFFF) {
        Pfn1 = MI_PFN_ELEMENT (Link);

        //
        // Make sure the PTE address is okay
        //

        if ((Pfn1->PteAddress >= (PMMPTE)HYPER_SPACE)
                && (Pfn1->u3.e1.PrototypePte == 0)) {
            DbgPrint("pfn with illegal pte address\n");
            MiFormatPfn(Pfn1);
            break;
        }

        if (Pfn1->PteAddress < (PMMPTE)PTE_BASE) {
            DbgPrint("pfn with illegal pte address\n");
            MiFormatPfn(Pfn1);
            break;
        }

#if defined(_IA64_)

        //
        // ignore PTEs mapped to IA64 kernel BAT.
        //

        if (MI_IS_PHYSICAL_ADDRESS(MiGetVirtualAddressMappedByPte(Pfn1->PteAddress))) {

            goto NoCheck;
        }
#endif // _IA64_

#ifdef _ALPHA_

        //
        // ignore ptes mapped to ALPHA's 32-bit superpage.
        //

        if ((Pfn1->PteAddress > (PMMPTE)(ULONG_PTR)0xc0100000) &&
            (Pfn1->PteAddress < (PMMPTE)(ULONG_PTR)0xc0180000)) {

            goto NoCheck;
        }
#endif //ALPHA

        //
        // Check to make sure the referenced PTE is for this page.
        //

        if ((Pfn1->u3.e1.PrototypePte == 1) &&
                            (MmIsAddressValid (Pfn1->PteAddress))) {
            PointerPte = Pfn1->PteAddress;
        } else {
            PointerPte = MiMapPageInHyperSpace(Pfn1->PteFrame, &OldIrql);
            PointerPte = (PMMPTE)((ULONG_PTR)PointerPte +
                                    MiGetByteOffset(Pfn1->PteAddress));
        }

        if (MI_GET_PAGE_FRAME_FROM_PTE (PointerPte) != Link) {
            DbgPrint("Invalid PFN - PTE address is wrong in active list\n");
            MiFormatPfn(Pfn1);
            MiFormatPte(PointerPte);
        }
        if (PointerPte->u.Hard.Valid == 0) {
            //
            // if the page is a page table page it could be out of
            // the working set yet a transition page is keeping it
            // around in memory (ups the share count).
            //

            if ((Pfn1->PteAddress < (PMMPTE)PDE_BASE) ||
                (Pfn1->PteAddress > (PMMPTE)PDE_TOP)) {

                DbgPrint("Pte not valid for page on active list\n");
                MiFormatPfn(Pfn1);
                MiFormatPte(PointerPte);
            }
        }

        if (Pfn1->u3.e2.ReferenceCount != 1) {
            DbgPrint("refcount not 1\n");
            MiFormatPfn(Pfn1);
        }


        //
        // Check to make sure the PTE count for the frame is okay.
        //

        if (Pfn1->u3.e1.PrototypePte == 1) {
            PfnX = MI_PFN_ELEMENT(Pfn1->PteFrame);
            for (i = 0; i < 4; i++) {
                if (ValidPage[i] == 0) {
                    ValidPage[i] = (USHORT)Pfn1->PteFrame;
                }
                if (ValidPage[i] == (USHORT)Pfn1->PteFrame) {
                    ValidCheck[i] += 1;
                    break;
                }
            }
        }
        if (OldIrql != 99) {
            MiUnmapPageInHyperSpace (OldIrql);
            OldIrql = 99;
        }

#if defined(_ALPHA_) || defined(_IA64_)
NoCheck:
#endif
        Link = RtlFindClearBitsAndSet (CheckPfnBitMap, 1L, 0);

    }

    for (i = 0; i < 4; i++) {
        if (ValidPage[i] == 0) {
            break;
        }
        PfnX = MI_PFN_ELEMENT(ValidPage[i]);
    }

    UNLOCK_PFN (OldIrql);
    KeLowerIrql (PreviousIrql);
    return;

}
Exemplo n.º 7
0
PVOID
NTAPI
GdiPoolAllocate(
    PGDI_POOL pPool)
{
    PGDI_POOL_SECTION pSection;
    ULONG ulIndex, cjOffset, ulPageBit;
    PLIST_ENTRY ple;
    PVOID pvAlloc, pvBaseAddress;
    SIZE_T cjSize;
    NTSTATUS status;

    /* Disable APCs and acquire the pool lock */
    KeEnterCriticalRegion();
    ExAcquirePushLockExclusive(&pPool->pushlock);

    /* Check if we have a ready section */
    if (!IsListEmpty(&pPool->leReadyList))
    {
        /* Get a free section */
        ple = pPool->leReadyList.Flink;
        pSection = CONTAINING_RECORD(ple, GDI_POOL_SECTION, leReadyLink);
        if (pSection->cAllocCount >= pPool->cSlotsPerSection)
        {
            DPRINT1("pSection->cAllocCount=%lu, pPool->cSlotsPerSection=%lu\n",
                    pSection->cAllocCount, pPool->cSlotsPerSection);
            DBG_DUMP_EVENT_LIST(&pPool->slhLog);
            ASSERT(FALSE);
        }
        ASSERT(pSection->cAllocCount < pPool->cSlotsPerSection);
    }
    else
    {
        /* No, check if we have something on the empty list */
        if (!IsListEmpty(&pPool->leEmptyList))
        {
            /* Yes, remove it from the empty list */
            ple = RemoveHeadList(&pPool->leEmptyList);
            pSection = CONTAINING_RECORD(ple, GDI_POOL_SECTION, leInUseLink);
            pPool->cEmptySections--;
            ASSERT(pSection->cAllocCount == 0);
        }
        else
        {
            /* No, allocate a new section */
            pSection = GdiPoolAllocateSection(pPool);
            if (!pSection)
            {
                DPRINT1("Couldn't allocate a section\n");
                pvAlloc = NULL;
                goto done;
            }
        }

        /* Insert it into the in-use and ready list */
        InsertHeadList(&pPool->leInUseList, &pSection->leInUseLink);
        InsertHeadList(&pPool->leReadyList, &pSection->leReadyLink);
    }

    /* Find and set a single bit */
    ulIndex = RtlFindClearBitsAndSet(&pSection->bitmap, 1, 0);
    ASSERT(ulIndex != MAXULONG);

    /* Calculate the allocation address */
    cjOffset = ulIndex * pPool->cjAllocSize;
    pvAlloc = (PVOID)((ULONG_PTR)pSection->pvBaseAddress + cjOffset);

    /* Check if memory is comitted */
    ulPageBit = 1 << (cjOffset / PAGE_SIZE);
    ulPageBit |= 1 << ((cjOffset + pPool->cjAllocSize - 1) / PAGE_SIZE);
    if ((pSection->ulCommitBitmap & ulPageBit) != ulPageBit)
    {
        /* Commit the pages */
        pvBaseAddress = PAGE_ALIGN(pvAlloc);
        cjSize = ADDRESS_AND_SIZE_TO_SPAN_PAGES(pvAlloc, pPool->cjAllocSize) * PAGE_SIZE;
        status = ZwAllocateVirtualMemory(NtCurrentProcess(),
                                         &pvBaseAddress,
                                         0,
                                         &cjSize,
                                         MEM_COMMIT,
                                         PAGE_READWRITE);
        if (!NT_SUCCESS(status))
        {
            pvAlloc = NULL;
            goto done;
        }

        pSection->ulCommitBitmap |= ulPageBit;
    }

    /* Increase alloc count */
    pSection->cAllocCount++;
    ASSERT(RtlNumberOfSetBits(&pSection->bitmap) == pSection->cAllocCount);
    DBG_LOGEVENT(&pPool->slhLog, EVENT_ALLOCATE, pvAlloc);

    /* Check if section is now busy */
    if (pSection->cAllocCount == pPool->cSlotsPerSection)
    {
        /* Remove the section from the ready list */
        RemoveEntryList(&pSection->leReadyLink);
    }

done:
    /* Release the pool lock and enable APCs */
    ExReleasePushLockExclusive(&pPool->pushlock);
    KeLeaveCriticalRegion();

    DPRINT("GdiPoolallocate: %p\n", pvAlloc);
    return pvAlloc;
}
Exemplo n.º 8
0
/** 
* NtfsAllocateClusters 
* Allocates a run of clusters. The run allocated might be smaller than DesiredClusters.
*/
NTSTATUS
NtfsAllocateClusters(PDEVICE_EXTENSION DeviceExt,
                     ULONG FirstDesiredCluster,
                     ULONG DesiredClusters, 
                     PULONG FirstAssignedCluster, 
                     PULONG AssignedClusters)
{
    NTSTATUS Status;
    PFILE_RECORD_HEADER BitmapRecord;
    PNTFS_ATTR_CONTEXT DataContext;
    ULONGLONG BitmapDataSize;
    PUCHAR BitmapData;
    ULONGLONG FreeClusters = 0;
    RTL_BITMAP Bitmap;
    ULONG AssignedRun;
    ULONG LengthWritten;

    DPRINT1("NtfsAllocateClusters(%p, %lu, %lu, %p, %p)\n", DeviceExt, FirstDesiredCluster, DesiredClusters, FirstAssignedCluster, AssignedClusters);

    BitmapRecord = ExAllocateFromNPagedLookasideList(&DeviceExt->FileRecLookasideList);
    if (BitmapRecord == NULL)
    {
        return STATUS_INSUFFICIENT_RESOURCES;
    }

    Status = ReadFileRecord(DeviceExt, NTFS_FILE_BITMAP, BitmapRecord);
    if (!NT_SUCCESS(Status))
    {
        ExFreeToNPagedLookasideList(&DeviceExt->FileRecLookasideList, BitmapRecord);
        return Status;
    }

    Status = FindAttribute(DeviceExt, BitmapRecord, AttributeData, L"", 0, &DataContext, NULL);
    if (!NT_SUCCESS(Status))
    {
        ExFreeToNPagedLookasideList(&DeviceExt->FileRecLookasideList, BitmapRecord);
        return Status;
    }

    BitmapDataSize = AttributeDataLength(DataContext->pRecord);
    BitmapDataSize = min(BitmapDataSize, 0xffffffff);
    ASSERT((BitmapDataSize * 8) >= DeviceExt->NtfsInfo.ClusterCount);
    BitmapData = ExAllocatePoolWithTag(NonPagedPool, ROUND_UP(BitmapDataSize, DeviceExt->NtfsInfo.BytesPerSector), TAG_NTFS);
    if (BitmapData == NULL)
    {
        ReleaseAttributeContext(DataContext);
        ExFreeToNPagedLookasideList(&DeviceExt->FileRecLookasideList, BitmapRecord);
        return  STATUS_INSUFFICIENT_RESOURCES;
    }

    DPRINT1("Total clusters: %I64x\n", DeviceExt->NtfsInfo.ClusterCount);
    DPRINT1("Total clusters in bitmap: %I64x\n", BitmapDataSize * 8);
    DPRINT1("Diff in size: %I64d B\n", ((BitmapDataSize * 8) - DeviceExt->NtfsInfo.ClusterCount) * DeviceExt->NtfsInfo.SectorsPerCluster * DeviceExt->NtfsInfo.BytesPerSector);

    ReadAttribute(DeviceExt, DataContext, 0, (PCHAR)BitmapData, (ULONG)BitmapDataSize);

    RtlInitializeBitMap(&Bitmap, (PULONG)BitmapData, DeviceExt->NtfsInfo.ClusterCount);
    FreeClusters = RtlNumberOfClearBits(&Bitmap);

    if (FreeClusters < DesiredClusters)
    {
        ReleaseAttributeContext(DataContext);

        ExFreePoolWithTag(BitmapData, TAG_NTFS);
        ExFreeToNPagedLookasideList(&DeviceExt->FileRecLookasideList, BitmapRecord);
        return STATUS_DISK_FULL;
    }
    
    // TODO: Observe MFT reservation zone

    // Can we get one contiguous run?
    AssignedRun = RtlFindClearBitsAndSet(&Bitmap, DesiredClusters, FirstDesiredCluster);

    if (AssignedRun != 0xFFFFFFFF)
    {
        *FirstAssignedCluster = AssignedRun;
        *AssignedClusters = DesiredClusters;
    }
    else
    {
        // we can't get one contiguous run
        *AssignedClusters = RtlFindNextForwardRunClear(&Bitmap, FirstDesiredCluster, FirstAssignedCluster);
        
        if (*AssignedClusters == 0)
        {
            // we couldn't find any runs starting at DesiredFirstCluster
            *AssignedClusters = RtlFindLongestRunClear(&Bitmap, FirstAssignedCluster);
        }
            
    }
                
    Status = WriteAttribute(DeviceExt, DataContext, 0, BitmapData, (ULONG)BitmapDataSize, &LengthWritten, BitmapRecord);
    
    ReleaseAttributeContext(DataContext);

    ExFreePoolWithTag(BitmapData, TAG_NTFS);
    ExFreeToNPagedLookasideList(&DeviceExt->FileRecLookasideList, BitmapRecord);

    return Status;
}
Exemplo n.º 9
0
VOID
IoFreeMapRegisters(
   PADAPTER_OBJECT AdapterObject,
   PVOID MapRegisterBase,
   ULONG NumberOfMapRegisters
   )
/*++

Routine Description:

   This routine deallocates the map registers for the adapter.  If there are
   any queued adapter waiting for an attempt is made to allocate the next
   entry.

Arguments:

   AdapterObject - The adapter object to where the map register should be
        returned.

   MapRegisterBase - The map register base of the registers to be deallocated.

   NumberOfMapRegisters - The number of registers to be deallocated.

Return Value:

   None

--+*/
{
   PADAPTER_OBJECT MasterAdapter;
   LONG MapRegisterNumber;
   PWAIT_CONTEXT_BLOCK Wcb;
   PLIST_ENTRY Packet;
   IO_ALLOCATION_ACTION Action;
   KIRQL Irql;


    //
    // Begin by getting the address of the master adapter.
    //

    if (AdapterObject->MasterAdapter != NULL && MapRegisterBase != NULL) {

        MasterAdapter = AdapterObject->MasterAdapter;

    } else {

        //
        // There are no map registers to return.
        //

        return;
    }

   //
   // Strip no scatter/gather flag.
   //

   MapRegisterBase = (PVOID) ((ULONG) MapRegisterBase & ~NO_SCATTER_GATHER);

   MapRegisterNumber = (PTRANSLATION_ENTRY) MapRegisterBase -
        (PTRANSLATION_ENTRY) MasterAdapter->MapRegisterBase;

   //
   // Acquire the master adapter spinlock which locks the adapter queue and the
   // bit map for the map registers.
   //

   Irql = KfAcquireSpinLock(&MasterAdapter->SpinLock);

   //
   // Return the registers to the bit map.
   //

   RtlClearBits( MasterAdapter->MapRegisters,
                 MapRegisterNumber,
                 NumberOfMapRegisters
                 );

   //
   // Process any requests waiting for map registers in the adapter queue.
   // Requests are processed until a request cannot be satisfied or until
   // there are no more requests in the queue.
   //

   while(TRUE) {

      if ( IsListEmpty(&MasterAdapter->AdapterQueue) ){
         break;
      }

      Packet = RemoveHeadList( &MasterAdapter->AdapterQueue );
      AdapterObject = CONTAINING_RECORD( Packet,
                                         ADAPTER_OBJECT,
                                         AdapterQueue
                                         );
      Wcb = AdapterObject->CurrentWcb;

      //
      // Attempt to allocate map registers for this request. Use the previous
      // register base as a hint.
      //

      MapRegisterNumber = RtlFindClearBitsAndSet( MasterAdapter->MapRegisters,
                                               AdapterObject->NumberOfMapRegisters,
                                               MasterAdapter->NumberOfMapRegisters
                                               );

      if (MapRegisterNumber == -1) {

         //
         // There were not enough free map registers.  Put this request back on
         // the adapter queue where is came from.
         //

         InsertHeadList( &MasterAdapter->AdapterQueue,
                         &AdapterObject->AdapterQueue
                         );

         break;

      }

     KfReleaseSpinLock( &MasterAdapter->SpinLock, Irql );

     AdapterObject->MapRegisterBase = (PVOID) ((PTRANSLATION_ENTRY)
        MasterAdapter->MapRegisterBase + MapRegisterNumber);

     //
     // Set the no scatter/gather flag if scatter/gather not
     // supported.
     //

     if (!AdapterObject->ScatterGather) {

        AdapterObject->MapRegisterBase = (PVOID)
            ((ULONG) AdapterObject->MapRegisterBase | NO_SCATTER_GATHER);

     }

     //
     // Invoke the driver's execution routine now.
     //

      Action = Wcb->DeviceRoutine( Wcb->DeviceObject,
        Wcb->CurrentIrp,
        AdapterObject->MapRegisterBase,
        Wcb->DeviceContext );

      //
      // If the driver wishes to keep the map registers then set the number
      // allocated to zero and set the action to deallocate object.
      //

      if (Action == DeallocateObjectKeepRegisters) {
          AdapterObject->NumberOfMapRegisters = 0;
          Action = DeallocateObject;
      }

      //
      // If the driver would like to have the adapter deallocated,
      // then deallocate any map registers allocated and then release
      // the adapter object.
      //

      if (Action == DeallocateObject) {

             //
             // The map registers registers are deallocated here rather than in
             // IoFreeAdapterChannel.  This limits the number of times
             // this routine can be called recursively possibly overflowing
             // the stack.  The worst case occurs if there is a pending
             // request for the adapter that uses map registers and whos
             // excution routine decallocates the adapter.  In that case if there
             // are no requests in the master adapter queue, then IoFreeMapRegisters
             // will get called again.
             //

          if (AdapterObject->NumberOfMapRegisters != 0) {

             //
             // Deallocate the map registers and clear the count so that
             // IoFreeAdapterChannel will not deallocate them again.
             //

             Irql = KfAcquireSpinLock( &MasterAdapter->SpinLock );

             RtlClearBits( MasterAdapter->MapRegisters,
                           MapRegisterNumber,
                           AdapterObject->NumberOfMapRegisters
                           );

             AdapterObject->NumberOfMapRegisters = 0;

             KfReleaseSpinLock( &MasterAdapter->SpinLock, Irql );
          }

          IoFreeAdapterChannel( AdapterObject );
      }

      Irql = KfAcquireSpinLock( &MasterAdapter->SpinLock );

   }

   KfReleaseSpinLock( &MasterAdapter->SpinLock, Irql );
}
Exemplo n.º 10
0
VOID
IoFreeAdapterChannel(
    IN PADAPTER_OBJECT AdapterObject
    )

/*++

Routine Description:

    This routine is invoked to deallocate the specified adapter object.
    Any map registers that were allocated are also automatically deallocated.
    No checks are made to ensure that the adapter is really allocated to
    a device object.  However, if it is not, then kernel will bugcheck.

    If another device is waiting in the queue to allocate the adapter object
    it will be pulled from the queue and its execution routine will be
    invoked.

Arguments:

    AdapterObject - Pointer to the adapter object to be deallocated.

Return Value:

    None.

--*/

{
    PKDEVICE_QUEUE_ENTRY Packet;
    PWAIT_CONTEXT_BLOCK Wcb;
    PADAPTER_OBJECT MasterAdapter;
    BOOLEAN Busy = FALSE;
    IO_ALLOCATION_ACTION Action;
    KIRQL Irql;
    LONG MapRegisterNumber;

    //
    // Begin by getting the address of the master adapter.
    //

    MasterAdapter = AdapterObject->MasterAdapter;

    //
    // Pull requests of the adapter's device wait queue as long as the
    // adapter is free and there are sufficient map registers available.
    //

    while( TRUE ) {

       //
       // Begin by checking to see whether there are any map registers that
       // need to be deallocated.  If so, then deallocate them now.
       //

       if (AdapterObject->NumberOfMapRegisters != 0) {
           IoFreeMapRegisters( AdapterObject,
                               AdapterObject->MapRegisterBase,
                               AdapterObject->NumberOfMapRegisters
                               );
       }

       //
       // Simply remove the next entry from the adapter's device wait queue.
       // If one was successfully removed, allocate any map registers that it
       // requires and invoke its execution routine.
       //

       Packet = KeRemoveDeviceQueue( &AdapterObject->ChannelWaitQueue );
       if (Packet == NULL) {

           //
           // There are no more requests break out of the loop.
           //

           break;
       }

       Wcb = CONTAINING_RECORD( Packet,
            WAIT_CONTEXT_BLOCK,
            WaitQueueEntry );

       AdapterObject->CurrentWcb = Wcb;
       AdapterObject->NumberOfMapRegisters = Wcb->NumberOfMapRegisters;

        //
        // Check to see whether this driver wishes to allocate any map
        // registers.  If so, then queue the device object to the master
        // adapter queue to wait for them to become available.  If the driver
        // wants map registers, ensure that this adapter has enough total
        // map registers to satisfy the request.
        //

        if (Wcb->NumberOfMapRegisters != 0 &&
            AdapterObject->MasterAdapter != NULL) {

            //
            // Lock the map register bit map and the adapter queue in the
            // master adapter object. The channel structure offset is used as
            // a hint for the register search.
            //

            Irql = KfAcquireSpinLock( &MasterAdapter->SpinLock );

            MapRegisterNumber = -1;

            if (IsListEmpty( &MasterAdapter->AdapterQueue)) {
               MapRegisterNumber = RtlFindClearBitsAndSet( MasterAdapter->MapRegisters,
                                                        Wcb->NumberOfMapRegisters,
                                                        0
                                                        );
            }
            if (MapRegisterNumber == -1) {

               //
               // There were not enough free map registers.  Queue this request
               // on the master adapter where is will wait until some registers
               // are deallocated.
               //

               InsertTailList( &MasterAdapter->AdapterQueue,
                               &AdapterObject->AdapterQueue
                               );
               Busy = 1;

            } else {

                AdapterObject->MapRegisterBase = ((PTRANSLATION_ENTRY)
                    MasterAdapter->MapRegisterBase + MapRegisterNumber);

                //
                // Set the no scatter/gather flag if scatter/gather not
                // supported.
                //

                if (!AdapterObject->ScatterGather) {

                    AdapterObject->MapRegisterBase = (PVOID)
                        ((ULONG) AdapterObject->MapRegisterBase | NO_SCATTER_GATHER);

                }
            }

            KfReleaseSpinLock( &MasterAdapter->SpinLock, Irql );

        } else {

            AdapterObject->MapRegisterBase = NULL;
            AdapterObject->NumberOfMapRegisters = 0;

        }

        //
        // If there were either enough map registers available or no map
        // registers needed to be allocated, invoke the driver's execution
        // routine now.
        //

        if (!Busy) {
            AdapterObject->CurrentWcb = Wcb;
            Action = Wcb->DeviceRoutine( Wcb->DeviceObject,
                Wcb->CurrentIrp,
                AdapterObject->MapRegisterBase,
                Wcb->DeviceContext );

            //
            // If the execution routine would like to have the adapter
            // deallocated, then release the adapter object.
            //

            if (Action == KeepObject) {

               //
               // This request wants to keep the channel a while so break
               // out of the loop.
               //

               break;

            }

            //
            // If the driver wants to keep the map registers then set the
            // number allocated to 0.  This keeps the deallocation routine
            // from deallocating them.
            //

            if (Action == DeallocateObjectKeepRegisters) {
                AdapterObject->NumberOfMapRegisters = 0;
            }

        } else {

           //
           // This request did not get the requested number of map registers so
           // out of the loop.
           //

           break;
        }
    }
}
Exemplo n.º 11
0
void
Test_RtlFindClearBitsAndSet(void)
{
    RTL_BITMAP BitMapHeader;
    ULONG *Buffer;

    Buffer = AllocateGuarded(2 * sizeof(*Buffer));
    Buffer[0] = 0x060F874D;
    Buffer[1] = 0x3F303F30;

    RtlInitializeBitMap(&BitMapHeader, Buffer, 0);
    ok_int(RtlFindClearBitsAndSet(&BitMapHeader, 0, 0), 0);
    ok_int(RtlFindClearBitsAndSet(&BitMapHeader, 0, 3), 0);
    ok_int(RtlFindClearBitsAndSet(&BitMapHeader, 1, 0), -1);
    ok_int(RtlFindClearBitsAndSet(&BitMapHeader, 1, 1), -1);
    ok_hex(Buffer[0], 0x060F874D);

    Buffer[0] = 0x060F874D;
    RtlInitializeBitMap(&BitMapHeader, Buffer, 8);
    ok_int(RtlFindClearBitsAndSet(&BitMapHeader, 1, 0), 1);
    ok_hex(Buffer[0], 0x60f874f);
    ok_int(RtlFindClearBitsAndSet(&BitMapHeader, 1, 1), 4);
    ok_hex(Buffer[0], 0x60f875f);
    ok_int(RtlFindClearBitsAndSet(&BitMapHeader, 1, 2), 5);
    ok_hex(Buffer[0], 0x60f877f);
    ok_int(RtlFindClearBitsAndSet(&BitMapHeader, 2, 0), -1);
    ok_hex(Buffer[0], 0x60f877f);

    Buffer[0] = 0x060F874D;
    RtlInitializeBitMap(&BitMapHeader, Buffer, 32);
    ok_int(RtlFindClearBitsAndSet(&BitMapHeader, 4, 0), 11);
    ok_hex(Buffer[0], 0x60fff4d);
    ok_int(RtlFindClearBitsAndSet(&BitMapHeader, 5, 0), 20);
    ok_hex(Buffer[0], 0x7ffff4d);
    ok_int(RtlFindClearBitsAndSet(&BitMapHeader, 4, 11), 27);
    ok_hex(Buffer[0], 0x7fffff4d);

    Buffer[0] = 0x060F874D;
    ok_int(RtlFindClearBitsAndSet(&BitMapHeader, 4, 12), 20);
    ok_hex(Buffer[0], 0x6ff874d);
    ok_int(RtlFindClearBitsAndSet(&BitMapHeader, 2, 11), 11);
    ok_hex(Buffer[0], 0x6ff9f4d);
    ok_int(RtlFindClearBitsAndSet(&BitMapHeader, 2, 12), 13);
    ok_hex(Buffer[0], 0x6ffff4d);
    FreeGuarded(Buffer);
}
Exemplo n.º 12
0
ULONG NtdllBitmap::FindClearBitsAndSet( ULONG number_to_find, ULONG hint_index )
{
	assert(RtlFindClearBitsAndSet != NULL);
	return RtlFindClearBitsAndSet(this, number_to_find, hint_index);
}
Exemplo n.º 13
0
UINT_PTR FASTCALL
IntSetTimer( PWND Window,
                  UINT_PTR IDEvent,
                  UINT Elapse,
                  TIMERPROC TimerFunc,
                  INT Type)
{
  PTIMER pTmr;
  UINT Ret = IDEvent;
  LARGE_INTEGER DueTime;
  DueTime.QuadPart = (LONGLONG)(-5000000);

#if 0
  /* Windows NT/2k/XP behaviour */
  if (Elapse > MAX_ELAPSE_TIME)
  {
     TRACE("Adjusting uElapse\n");
     Elapse = 1;
  }
#else
  /* Windows XP SP2 and Windows Server 2003 behaviour */
  if (Elapse > MAX_ELAPSE_TIME)
  {
     TRACE("Adjusting uElapse\n");
     Elapse = MAX_ELAPSE_TIME;
  }
#endif

  /* Windows 2k/XP and Windows Server 2003 SP1 behaviour */
  if (Elapse < 10)
  {
     TRACE("Adjusting uElapse\n");
     Elapse = 10;
  }

  /* Passing an IDEvent of 0 and the SetTimer returns 1.
     It will create the timer with an ID of 0 */
  if ((Window) && (IDEvent == 0))
     Ret = 1;

  pTmr = FindTimer(Window, IDEvent, Type);

  if ((!pTmr) && (Window == NULL) && (!(Type & TMRF_SYSTEM)))
  {
      IntLockWindowlessTimerBitmap();

      IDEvent = RtlFindClearBitsAndSet(&WindowLessTimersBitMap, 1, HintIndex);

      if (IDEvent == (UINT_PTR) -1)
      {
         IntUnlockWindowlessTimerBitmap();
         ERR("Unable to find a free window-less timer id\n");
         EngSetLastError(ERROR_NO_SYSTEM_RESOURCES);
         ASSERT(FALSE);
         return 0;
      }

      IDEvent = NUM_WINDOW_LESS_TIMERS - IDEvent;
      Ret = IDEvent;

      IntUnlockWindowlessTimerBitmap();
  }

  if (!pTmr)
  {
     pTmr = CreateTimer();
     if (!pTmr) return 0;

     if (Window && (Type & TMRF_TIFROMWND))
        pTmr->pti = Window->head.pti->pEThread->Tcb.Win32Thread;
     else
     {
        if (Type & TMRF_RIT)
           pTmr->pti = ptiRawInput;
        else
           pTmr->pti = PsGetCurrentThreadWin32Thread();
     }

     pTmr->pWnd    = Window;
     pTmr->cmsCountdown = Elapse;
     pTmr->cmsRate = Elapse;
     pTmr->pfn     = TimerFunc;
     pTmr->nID     = IDEvent;
     pTmr->flags   = Type|TMRF_INIT;
  }
  else
  {
     pTmr->cmsCountdown = Elapse;
     pTmr->cmsRate = Elapse;
  }

  ASSERT(MasterTimer != NULL);
  // Start the timer thread!
  if (TimersListHead.Flink == TimersListHead.Blink) // There is only one timer
     KeSetTimer(MasterTimer, DueTime, NULL);

  return Ret;
}