NTSTATUS MmTrimUserMemory(ULONG Target, ULONG Priority, PULONG NrFreedPages) { PFN_NUMBER CurrentPage; PFN_NUMBER NextPage; NTSTATUS Status; (*NrFreedPages) = 0; CurrentPage = MmGetLRUFirstUserPage(); while (CurrentPage != 0 && Target > 0) { NextPage = MmGetLRUNextUserPage(CurrentPage); Status = MmPageOutPhysicalAddress(CurrentPage); if (NT_SUCCESS(Status)) { DPRINT("Succeeded\n"); Target--; (*NrFreedPages)++; } CurrentPage = NextPage; } return(STATUS_SUCCESS); }
NTSTATUS MmTrimUserMemory(ULONG Target, ULONG Priority, PULONG NrFreedPages) { PFN_NUMBER CurrentPage; PFN_NUMBER NextPage; NTSTATUS Status; (*NrFreedPages) = 0; CurrentPage = MmGetLRUFirstUserPage(); while (CurrentPage != 0 && Target > 0) { Status = MmPageOutPhysicalAddress(CurrentPage); if (NT_SUCCESS(Status)) { DPRINT("Succeeded\n"); Target--; (*NrFreedPages)++; } NextPage = MmGetLRUNextUserPage(CurrentPage); if (NextPage <= CurrentPage) { /* We wrapped around, so we're done */ break; } CurrentPage = NextPage; } return STATUS_SUCCESS; }
NTSTATUS CcRosTrimCache ( ULONG Target, ULONG Priority, PULONG NrFreed) /* * FUNCTION: Try to free some memory from the file cache. * ARGUMENTS: * Target - The number of pages to be freed. * Priority - The priority of free (currently unused). * NrFreed - Points to a variable where the number of pages * actually freed is returned. */ { PLIST_ENTRY current_entry; PROS_VACB current; ULONG PagesFreed; KIRQL oldIrql; LIST_ENTRY FreeList; PFN_NUMBER Page; ULONG i; BOOLEAN FlushedPages = FALSE; DPRINT("CcRosTrimCache(Target %lu)\n", Target); InitializeListHead(&FreeList); *NrFreed = 0; retry: KeAcquireGuardedMutex(&ViewLock); current_entry = VacbLruListHead.Flink; while (current_entry != &VacbLruListHead) { current = CONTAINING_RECORD(current_entry, ROS_VACB, VacbLruListEntry); current_entry = current_entry->Flink; KeAcquireSpinLock(¤t->SharedCacheMap->CacheMapLock, &oldIrql); /* Reference the VACB */ CcRosVacbIncRefCount(current); /* Check if it's mapped and not dirty */ if (current->MappedCount > 0 && !current->Dirty) { /* We have to break these locks because Cc sucks */ KeReleaseSpinLock(¤t->SharedCacheMap->CacheMapLock, oldIrql); KeReleaseGuardedMutex(&ViewLock); /* Page out the VACB */ for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++) { Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT); MmPageOutPhysicalAddress(Page); } /* Reacquire the locks */ KeAcquireGuardedMutex(&ViewLock); KeAcquireSpinLock(¤t->SharedCacheMap->CacheMapLock, &oldIrql); } /* Dereference the VACB */ CcRosVacbDecRefCount(current); /* Check if we can free this entry now */ if (current->ReferenceCount == 0) { ASSERT(!current->Dirty); ASSERT(!current->MappedCount); RemoveEntryList(¤t->CacheMapVacbListEntry); RemoveEntryList(¤t->VacbLruListEntry); InsertHeadList(&FreeList, ¤t->CacheMapVacbListEntry); /* Calculate how many pages we freed for Mm */ PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target); Target -= PagesFreed; (*NrFreed) += PagesFreed; } KeReleaseSpinLock(¤t->SharedCacheMap->CacheMapLock, oldIrql); }