static BOOL FASTCALL RemoveTimer(PTIMER pTmr) { BOOL Ret = FALSE; if (pTmr) { /* Set the flag, it will be removed when ready */ RemoveEntryList(&pTmr->ptmrList); if ((pTmr->pWnd == NULL) && (!(pTmr->flags & TMRF_SYSTEM))) // System timers are reusable. { UINT_PTR IDEvent; IDEvent = NUM_WINDOW_LESS_TIMERS - pTmr->nID; IntLockWindowlessTimerBitmap(); RtlClearBit(&WindowLessTimersBitMap, IDEvent); IntUnlockWindowlessTimerBitmap(); } UserDereferenceObject(pTmr); Ret = UserDeleteObject( UserHMGetHandle(pTmr), otTimer); } if (!Ret) ERR("Warning: Unable to delete timer\n"); return Ret; }
ULONG NTAPI MiFreePoolPages(IN PVOID StartingVa) { PMMPTE PointerPte, StartPte; PMMPFN Pfn1, StartPfn; PFN_COUNT FreePages, NumberOfPages; KIRQL OldIrql; PMMFREE_POOL_ENTRY FreeEntry, NextEntry, LastEntry; ULONG i, End; ULONG_PTR Offset; // // Handle paged pool // if ((StartingVa >= MmPagedPoolStart) && (StartingVa <= MmPagedPoolEnd)) { // // Calculate the offset from the beginning of paged pool, and convert it // into pages // Offset = (ULONG_PTR)StartingVa - (ULONG_PTR)MmPagedPoolStart; i = (ULONG)(Offset >> PAGE_SHIFT); End = i; // // Now use the end bitmap to scan until we find a set bit, meaning that // this allocation finishes here // while (!RtlTestBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, End)) End++; // // Now calculate the total number of pages this allocation spans. If it's // only one page, add it to the S-LIST instead of freeing it // NumberOfPages = End - i + 1; if ((NumberOfPages == 1) && (ExQueryDepthSList(&MiPagedPoolSListHead) < MiPagedPoolSListMaximum)) { InterlockedPushEntrySList(&MiPagedPoolSListHead, StartingVa); return 1; } /* Delete the actual pages */ PointerPte = MmPagedPoolInfo.FirstPteForPagedPool + i; FreePages = MiDeleteSystemPageableVm(PointerPte, NumberOfPages, 0, NULL); ASSERT(FreePages == NumberOfPages); // // Acquire the paged pool lock // KeAcquireGuardedMutex(&MmPagedPoolMutex); // // Clear the allocation and free bits // RtlClearBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, End); RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, i, NumberOfPages); // // Update the hint if we need to // if (i < MmPagedPoolInfo.PagedPoolHint) MmPagedPoolInfo.PagedPoolHint = i; // // Release the lock protecting the bitmaps // KeReleaseGuardedMutex(&MmPagedPoolMutex); // // And finally return the number of pages freed // return NumberOfPages; }
VOID NTAPI GdiPoolFree( PGDI_POOL pPool, PVOID pvAlloc) { PLIST_ENTRY ple; PGDI_POOL_SECTION pSection = NULL; ULONG_PTR cjOffset; ULONG ulIndex; DPRINT("GdiPoolFree: %p\n", pvAlloc); /* Disable APCs and acquire the pool lock */ KeEnterCriticalRegion(); ExAcquirePushLockExclusive(&pPool->pushlock); /* Loop all used sections */ for (ple = pPool->leInUseList.Flink; ple != &pPool->leInUseList; ple = ple->Flink) { /* Get the pointer to the section */ pSection = CONTAINING_RECORD(ple, GDI_POOL_SECTION, leInUseLink); /* Calculate offset */ cjOffset = (ULONG_PTR)pvAlloc - (ULONG_PTR)pSection->pvBaseAddress; /* Check if the allocation is from this section */ if (cjOffset < pPool->cjSectionSize) { /* Calculate the index of the allocation */ ulIndex = cjOffset / pPool->cjAllocSize; /* Mark it as free */ ASSERT(RtlTestBit(&pSection->bitmap, ulIndex) == TRUE); RtlClearBit(&pSection->bitmap, ulIndex); /* Decrease allocation count */ pSection->cAllocCount--; ASSERT(RtlNumberOfSetBits(&pSection->bitmap) == pSection->cAllocCount); DBG_LOGEVENT(&pPool->slhLog, EVENT_FREE, pvAlloc); /* Check if the section got valid now */ if (pSection->cAllocCount == pPool->cSlotsPerSection - 1) { /* Insert it into the ready list */ InsertTailList(&pPool->leReadyList, &pSection->leReadyLink); } /* Check if it got empty now */ else if (pSection->cAllocCount == 0) { /* Remove the section from the lists */ RemoveEntryList(&pSection->leInUseLink); RemoveEntryList(&pSection->leReadyLink); if (pPool->cEmptySections >= 1) { /* Delete the section */ GdiPoolDeleteSection(pPool, pSection); } else { /* Insert it into the empty list */ InsertHeadList(&pPool->leEmptyList, &pSection->leInUseLink); pPool->cEmptySections++; } } goto done; } } DbgPrint("failed to free. pvAlloc=%p, base=%p, size=%lx\n", pvAlloc, pSection ? pSection->pvBaseAddress : NULL, pPool->cjSectionSize); ASSERT(FALSE); // KeBugCheck() done: /* Release the pool lock and enable APCs */ ExReleasePushLockExclusive(&pPool->pushlock); KeLeaveCriticalRegion(); }
ULONG NTAPI MiFreePoolPages(IN PVOID StartingVa) { PMMPTE PointerPte, StartPte; PMMPFN Pfn1, StartPfn; PFN_NUMBER FreePages, NumberOfPages; KIRQL OldIrql; PMMFREE_POOL_ENTRY FreeEntry, NextEntry, LastEntry; ULONG i, End; // // Handle paged pool // if ((StartingVa >= MmPagedPoolStart) && (StartingVa <= MmPagedPoolEnd)) { // // Calculate the offset from the beginning of paged pool, and convert it // into pages // i = ((ULONG_PTR)StartingVa - (ULONG_PTR)MmPagedPoolStart) >> PAGE_SHIFT; End = i; // // Now use the end bitmap to scan until we find a set bit, meaning that // this allocation finishes here // while (!RtlTestBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, End)) End++; // // Now calculate the total number of pages this allocation spans // NumberOfPages = End - i + 1; /* Delete the actual pages */ PointerPte = MmPagedPoolInfo.FirstPteForPagedPool + i; FreePages = MiDeleteSystemPageableVm(PointerPte, NumberOfPages, 0, NULL); ASSERT(FreePages == NumberOfPages); // // Acquire the paged pool lock // KeAcquireGuardedMutex(&MmPagedPoolMutex); // // Clear the allocation and free bits // RtlClearBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, End); RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, i, NumberOfPages); // // Update the hint if we need to // if (i < MmPagedPoolInfo.PagedPoolHint) MmPagedPoolInfo.PagedPoolHint = i; // // Release the lock protecting the bitmaps // KeReleaseGuardedMutex(&MmPagedPoolMutex); // // And finally return the number of pages freed // return NumberOfPages; }