/* * TsmiReadPatchChains * * Purpose: * * Read specified chains value from registry. * */ NTSTATUS TsmiReadPatchChains( _In_ HANDLE sKey, _In_ PUNICODE_STRING ParamName, _In_ VBOX_PATCH *PatchInfo ) { KEY_VALUE_PARTIAL_INFORMATION keyinfo; ULONG ChainsLength = 0, bytesIO; NTSTATUS status; PAGED_CODE(); if (sKey == NULL) return STATUS_INVALID_PARAMETER_1; if (ParamName == NULL) return STATUS_INVALID_PARAMETER_2; if (PatchInfo == NULL) return STATUS_INVALID_PARAMETER_3; status = ZwQueryValueKey(sKey, ParamName, KeyValuePartialInformation, &keyinfo, sizeof(KEY_VALUE_PARTIAL_INFORMATION), &ChainsLength); if (NT_SUCCESS(status)) { return STATUS_BUFFER_TOO_SMALL; // The key value is empty. It should not success with zero-length buffer if there are some data; } if ((status != STATUS_BUFFER_TOO_SMALL) && (status != STATUS_BUFFER_OVERFLOW)) { return status; } // // Allocate buffer for data with given size // PatchInfo->Chains = (PKEY_VALUE_PARTIAL_INFORMATION)ExAllocatePoolWithTagPriority(PagedPool, (SIZE_T)ChainsLength, TSUGUMI_TAG, NormalPoolPriority); if (PatchInfo->Chains == NULL) return STATUS_INSUFFICIENT_RESOURCES; #ifdef _DEBUGMSG DbgPrint("[TSMI] ChainsLength=%lx\n", ChainsLength); #endif //_DEBUGMSG RtlSecureZeroMemory(PatchInfo->Chains, ChainsLength); status = ZwQueryValueKey(sKey, ParamName, KeyValuePartialInformation, PatchInfo->Chains, ChainsLength, &bytesIO); if (NT_SUCCESS(status)) { PatchInfo->ChainsLength = ChainsLength; #ifdef _DEBUGMSG TsmiListPatchChains(PatchInfo->Chains); #endif //_DEBUGMSG } return status; }
// // Wrapper for IovAllocateIrp. Use special pool to allocate the IRP. // This is directly called from IoAllocateIrp. // PIRP IovAllocateIrp( IN CCHAR StackSize, IN BOOLEAN ChargeQuota ) { USHORT allocateSize; UCHAR fixedSize; PIRP irp; UCHAR mustSucceed; USHORT packetSize; #ifndef NO_SPECIAL_IRP // // Should we override normal lookaside caching so that we may catch // more bugs? // SPECIALIRP_IO_ALLOCATE_IRP_1(StackSize, ChargeQuota, &irp) ; if (irp) { return irp ; } #endif // // If special pool is not turned on lets just call the standard // irp allocator. // if (!(IovpVerifierFlags & DRIVER_VERIFIER_SPECIAL_POOLING )) { irp = IopAllocateIrpPrivate(StackSize, ChargeQuota); return irp; } irp = NULL; fixedSize = 0; mustSucceed = 0; packetSize = IoSizeOfIrp(StackSize); allocateSize = packetSize; // // There are no free packets on the lookaside list, or the packet is // too large to be allocated from one of the lists, so it must be // allocated from nonpaged pool. If quota is to be charged, charge it // against the current process. Otherwise, allocate the pool normally. // if (ChargeQuota) { try { irp = ExAllocatePoolWithTagPriority( NonPagedPool, allocateSize, ' prI', HighPoolPrioritySpecialPoolOverrun); } except(EXCEPTION_EXECUTE_HANDLER) { NOTHING; } } else {
PIRP FASTCALL IovpProtectedIrpAllocate( IN CCHAR StackSize, IN BOOLEAN ChargeQuota, IN PETHREAD QuotaThread OPTIONAL ) /*++ Description: This routine allocates an IRP from the special pool using the "replacement IRP" tag. Arguments: StackSize - Number of stack locations to give the new IRP ChargeQuota - TRUE iff quota should be charged against QuotaThread QuotaThread - See above Return Value: Pointer to the memory allocated. --*/ { PIRP pSurrogateIrp; ULONG_PTR irpPtr; SIZE_T sizeOfAllocation; // // We are allocating an IRP from the special pool. Since IRPs may come from // lookaside lists they may be ULONG aligned. The memory manager on the // other hand gaurentees quad-aligned allocations. So to catch all special // pool overrun bugs we "skew" the IRP right up to the edge. // sizeOfAllocation = IoSizeOfIrp(StackSize); ASSERT((sizeOfAllocation % (sizeof(ULONG))) == 0); // // ADRIAO BUGBUG 08/16/98 - Use a quota'd alloc function if one is available // later... // irpPtr = (ULONG_PTR) ExAllocatePoolWithTagPriority( NonPagedPool, sizeOfAllocation, POOL_TAG_PROTECTED_IRP, HighPoolPrioritySpecialPoolOverrun ); pSurrogateIrp = (PIRP) (irpPtr); return pSurrogateIrp; }
VOID * OvsAllocateAlignedMemory(size_t size, UINT16 align) { OVS_VERIFY_IRQL_LE(DISPATCH_LEVEL); ASSERT((align == 8) || (align == 16)); if ((align == 8) || (align == 16)) { /* * XXX: NdisAllocateMemory*() functions don't talk anything about * alignment. Hence using ExAllocatePool*(); */ return (VOID *)ExAllocatePoolWithTagPriority(NonPagedPool, size, OVS_MEMORY_TAG, NormalPoolPriority); } /* Invalid user input. */ return NULL; }
/* * TsmiReadPatchChains * * Purpose: * * Read specified chains value from registry. * */ NTSTATUS TsmiReadPatchChains( _In_ HANDLE sKey, _In_ PUNICODE_STRING ParamName, _In_ PVOID *ParamBuffer, _In_ ULONG *ChainsLength ) { KEY_VALUE_PARTIAL_INFORMATION keyinfo; NTSTATUS status; ULONG bytesIO = 0; if (ChainsLength == NULL) return STATUS_INVALID_PARAMETER_4; status = ZwQueryValueKey(sKey, ParamName, KeyValuePartialInformation, &keyinfo, sizeof(KEY_VALUE_PARTIAL_INFORMATION), &bytesIO); if (NT_SUCCESS(status)) return STATUS_BUFFER_TOO_SMALL; // The key value is empty. It should not success with zero-length buffer if there are some data; if ((status != STATUS_BUFFER_TOO_SMALL) && (status != STATUS_BUFFER_OVERFLOW)) return STATUS_INVALID_PARAMETER; // we got unexpected return // bytesIO contains key value data length *ChainsLength = bytesIO; *ParamBuffer = (PKEY_VALUE_PARTIAL_INFORMATION)ExAllocatePoolWithTagPriority(PagedPool, (SIZE_T)bytesIO, TSUGUMI_TAG, NormalPoolPriority); if (*ParamBuffer == NULL) return STATUS_INSUFFICIENT_RESOURCES; #ifdef _DEBUGMSG DbgPrint("[TSMI] ChainsLength=%lx\n", *ChainsLength); #endif RtlSecureZeroMemory(*ParamBuffer, bytesIO); status = ZwQueryValueKey(sKey, ParamName, KeyValuePartialInformation, *ParamBuffer, bytesIO, &bytesIO); #ifdef _DEBUGMSG if (NT_SUCCESS(status)) { TsmiListPatchChains(*ParamBuffer); } #endif return status; }
BOOLEAN NTAPI InitializeMemoryManagement(VOID) { PSAC_MEMORY_ENTRY Entry; SAC_DBG(SAC_DBG_ENTRY_EXIT, "Entering\n"); /* Allocate a nonpaged heap for us to use */ GlobalMemoryList = ExAllocatePoolWithTagPriority(NonPagedPool, SAC_MEMORY_LIST_SIZE, INITIAL_BLOCK_TAG, HighPoolPriority); if (GlobalMemoryList) { /* Initialize a lock for it */ KeInitializeSpinLock(&MemoryLock); /* Initialize the head of the list */ GlobalMemoryList->Signature = GLOBAL_MEMORY_SIGNATURE; GlobalMemoryList->LocalDescriptor = (PSAC_MEMORY_ENTRY)(GlobalMemoryList + 1); GlobalMemoryList->Size = SAC_MEMORY_LIST_SIZE - sizeof(SAC_MEMORY_LIST); GlobalMemoryList->Next = NULL; /* Initialize the first free entry */ Entry = GlobalMemoryList->LocalDescriptor; Entry->Signature = LOCAL_MEMORY_SIGNATURE; Entry->Tag = FREE_POOL_TAG; Entry->Size = GlobalMemoryList->Size - sizeof(SAC_MEMORY_ENTRY); /* All done */ SAC_DBG(SAC_DBG_ENTRY_EXIT, "Exiting with TRUE.\n"); return TRUE; } /* No pool available to manage our heap */ SAC_DBG(SAC_DBG_ENTRY_EXIT, "Exiting with FALSE. No pool.\n"); return FALSE; }
/* * TsmiCopyPatchChainsData * * Purpose: * * Copy/Refresh patch chains data to global variable. * */ VOID TsmiCopyPatchChainsData( _In_ PVOID *PatchChains, _In_ PVOID Chains, _In_ ULONG ChainsLength ) { if ((PatchChains == NULL) || (Chains == NULL) || (ChainsLength == 0)) return; KeWaitForSingleObject(&g_PatchChainsLock, Executive, KernelMode, FALSE, NULL); if (*PatchChains != NULL) { ExFreePoolWithTag(*PatchChains, TSUGUMI_TAG); *PatchChains = NULL; } *PatchChains = (PVOID)ExAllocatePoolWithTagPriority(PagedPool, ChainsLength, TSUGUMI_TAG, NormalPoolPriority); if (*PatchChains) { RtlSecureZeroMemory(*PatchChains, ChainsLength); RtlCopyMemory(*PatchChains, Chains, ChainsLength); } KeReleaseMutex(&g_PatchChainsLock, FALSE); }
PVOID NTAPI MyAllocatePool(IN SIZE_T PoolSize, IN ULONG Tag, IN PCHAR File, IN ULONG Line) { PVOID p; p = ExAllocatePoolWithTag(NonPagedPool, PoolSize, 'HACK'); RtlZeroMemory(p, PoolSize); SAC_DBG(SAC_DBG_MM, "Returning block 0x%X.\n", p); return p; #if 0 KIRQL OldIrql; PSAC_MEMORY_LIST GlobalDescriptor, NewDescriptor; PSAC_MEMORY_ENTRY LocalDescriptor, NextDescriptor; ULONG GlobalSize, ActualSize; PVOID Buffer; ASSERT(Tag != FREE_POOL_TAG); SAC_DBG(SAC_DBG_MM, "Entering.\n"); /* Acquire the memory allocation lock and align the size request */ KeAcquireSpinLock(&MemoryLock, &OldIrql); PoolSize = ALIGN_UP(PoolSize, ULONGLONG); #if _USE_SAC_HEAP_ALLOCATOR_ GlobalDescriptor = GlobalMemoryList; KeAcquireSpinLock(&MemoryLock, &OldIrql); while (GlobalDescriptor) { ASSERT(GlobalMemoryList->Signature == GLOBAL_MEMORY_SIGNATURE); LocalDescriptor = GlobalDescriptor->LocalDescriptor; GlobalSize = GlobalDescriptor->Size; while (GlobalSize) { ASSERT(LocalDescriptor->Signature == LOCAL_MEMORY_SIGNATURE); if ((LocalDescriptor->Tag == FREE_POOL_TAG) && (LocalDescriptor->Size >= PoolSize)) { break; } GlobalSize -= (LocalDescriptor->Size + sizeof(SAC_MEMORY_ENTRY)); LocalDescriptor = (PSAC_MEMORY_ENTRY)((ULONG_PTR)LocalDescriptor + LocalDescriptor->Size + sizeof(SAC_MEMORY_ENTRY)); } GlobalDescriptor = GlobalDescriptor->Next; } if (!GlobalDescriptor) { KeReleaseSpinLock(&MemoryLock, OldIrql); ActualSize = min( PAGE_SIZE, PoolSize + sizeof(SAC_MEMORY_ENTRY) + sizeof(SAC_MEMORY_LIST)); SAC_DBG(SAC_DBG_MM, "Allocating new space.\n"); NewDescriptor = ExAllocatePoolWithTagPriority(NonPagedPool, ActualSize, ALLOC_BLOCK_TAG, HighPoolPriority); if (!NewDescriptor) { SAC_DBG(SAC_DBG_MM, "No more memory, returning NULL.\n"); return NULL; } KeAcquireSpinLock(&MemoryLock, &OldIrql); NewDescriptor->Signature = GLOBAL_MEMORY_SIGNATURE; NewDescriptor->LocalDescriptor = (PSAC_MEMORY_ENTRY)(NewDescriptor + 1); NewDescriptor->Size = ActualSize - 16; NewDescriptor->Next = GlobalMemoryList; GlobalMemoryList = NewDescriptor; LocalDescriptor = NewDescriptor->LocalDescriptor; LocalDescriptor->Signature = LOCAL_MEMORY_SIGNATURE; LocalDescriptor->Tag = FREE_POOL_TAG; LocalDescriptor->Size = GlobalMemoryList->Size - sizeof(SAC_MEMORY_ENTRY); } SAC_DBG(SAC_DBG_MM, "Found a good sized block.\n"); ASSERT(LocalDescriptor->Tag == FREE_POOL_TAG); ASSERT(LocalDescriptor->Signature == LOCAL_MEMORY_SIGNATURE); if (LocalDescriptor->Size > (PoolSize + sizeof(SAC_MEMORY_ENTRY))) { NextDescriptor = (PSAC_MEMORY_ENTRY)((ULONG_PTR)LocalDescriptor + PoolSize + sizeof(SAC_MEMORY_ENTRY)); if (NextDescriptor->Tag == FREE_POOL_TAG) { NextDescriptor->Tag = FREE_POOL_TAG; NextDescriptor->Signature = LOCAL_MEMORY_SIGNATURE; NextDescriptor->Size = (LocalDescriptor->Size - PoolSize - sizeof(SAC_MEMORY_ENTRY)); LocalDescriptor->Size = PoolSize; } } #else /* Shut the compiler up */ NewDescriptor = GlobalDescriptor = NULL; GlobalSize = (ULONG)NewDescriptor; ActualSize = GlobalSize; NextDescriptor = (PVOID)ActualSize; NewDescriptor = (PVOID)NextDescriptor; /* Use the NT pool allocator */ LocalDescriptor = ExAllocatePoolWithTag(NonPagedPool, PoolSize + sizeof(*LocalDescriptor), Tag); LocalDescriptor->Size = PoolSize; #endif /* Set the tag, and release the lock */ LocalDescriptor->Tag = Tag; KeReleaseSpinLock(&MemoryLock, OldIrql); /* Update our performance counters */ InterlockedIncrement(&TotalAllocations); InterlockedExchangeAdd(&TotalBytesAllocated, LocalDescriptor->Size); /* Return the buffer and zero it */ SAC_DBG(SAC_DBG_MM, "Returning block 0x%X.\n", LocalDescriptor); Buffer = LocalDescriptor + 1; RtlZeroMemory(Buffer, PoolSize); return Buffer; #endif }
HighPoolPrioritySpecialPoolOverrun); } except(EXCEPTION_EXECUTE_HANDLER) { NOTHING; } } else { // // Attempt to allocate the pool from non-paged pool. If this // fails, and the caller's previous mode was kernel then allocate // the pool as must succeed. // irp = ExAllocatePoolWithTagPriority( NonPagedPool, allocateSize, ' prI', HighPoolPrioritySpecialPoolOverrun); if (!irp) { mustSucceed = IRP_ALLOCATED_MUST_SUCCEED; if (KeGetPreviousMode() == KernelMode ) { irp = ExAllocatePoolWithTagPriority( NonPagedPoolMustSucceed, allocateSize, ' prI', HighPoolPrioritySpecialPoolOverrun); } } } if (!irp) {