// Routine to check that an allocation header looks valid. Asserts on failure. void DbgValidateHeader(DbgAllocHeader *h) { _ASSERTE((h->m_Magic1 == CDA_MAGIC_1) && (*CDA_MAGIC2(h) == CDA_MAGIC_2) && ((unsigned)h->m_Next != CDA_INV_PATTERN) && ((unsigned)h->m_Prev != CDA_INV_PATTERN)); if (g_AllocGuard) for (unsigned i = 0; i < CDA_GUARD_BYTES; i++) _ASSERTE(CDA_DATA(h, h->m_Length + i) == CDA_GUARD_PATTERN); }
// Free a packet allocated with DbgAlloc. void __stdcall DbgFree(void *b, void **ppvCallstack, BOOL isArray) { SCAN_IGNORE_FAULT; // tell the static contract analysis tool to ignore FAULTS due to calls of 'new' in code called by this function STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_DEBUG_ONLY; if (!g_DbgEnabled) { if (b) // check for null pointer Win98 doesn't like being // called to free null pointers. ClrFreeInProcessHeap(0, b); return; } // Technically it's possible to get here without having gone through // DbgAlloc (since it's legal to deallocate a NULL pointer), so we // better check for initializtion to be on the safe side. if (!g_HeapInitialized) DbgAllocInit(); CDA_LOCK(); // Check all active packets still look OK. if (g_ConstantRecheck) DbgValidateActivePackets(NULL, NULL); // Count this call to DbgFree. CDA_STATS_INC(Frees); if (b == NULL) { CDA_STATS_INC(NullFrees); CDA_UNLOCK(); return; } // Locate the packet header in front of the data packet. DbgAllocHeader *h = CDA_DATA_TO_HEADER(b); // Verify not calling delete [] on new, and vice versa //_ASSERTE (h->m_IsArray == isArray); // Check that the header looks OK. DbgValidateHeader(h); // Count the total number of bytes we've freed so far. CDA_STATS_ADD(FreeBytes, h->m_Length); // Unlink the packet from the live packet queue. if (h->m_Prev) h->m_Prev->m_Next = h->m_Next; else g_AllocListFirst = h->m_Next; if (h->m_Next) h->m_Next->m_Prev = h->m_Prev; else g_AllocListLast = h->m_Prev; // Zap our link pointers so we'll spot corruption sooner. h->m_Next = (DbgAllocHeader *)(UINT_PTR)CDA_INV_PATTERN; h->m_Prev = (DbgAllocHeader *)(UINT_PTR)CDA_INV_PATTERN; // Zap the tag fields in the header so we'll spot double deallocations // straight away. h->m_Magic1 = CDA_INV_PATTERN; *CDA_MAGIC2(h) = CDA_INV_PATTERN; // Poison the user's data area so that continued access to it after the // deallocation will likely cause an assertion that much sooner. if (g_PoisonPackets) memset(b, CDA_DEALLOC_PATTERN, h->m_Length); // Record the callstack of the deallocator (handy for debugging double // deallocation problems). for (unsigned i = 0; i < g_CallStackDepth; i++) CDA_DEALLOC_STACK(h, i) = ppvCallstack[i]; // put the pack on the free list for a while. Delete the one that it replaces. if (g_PoisonPackets) { DbgAllocHeader* tmp = g_AllocFreeQueue[g_AllocFreeQueueCur]; g_AllocFreeQueue[g_AllocFreeQueueCur] = h; h = tmp; g_AllocFreeQueueCur++; if (g_AllocFreeQueueCur >= g_FreeQueueSize) g_AllocFreeQueueCur = 0; } CDA_UNLOCK(); if (h) { if (g_PagePerAlloc) { // In page per alloc mode we decommit the pages allocated, but leave // them reserved so that we never reuse the same virtual addresses. ClrVirtualFree(h, h->m_Length + CDA_SIZEOF_HEADER() + CDA_OPT_GUARD_BYTES, MEM_DECOMMIT); } else ClrHeapFree(g_HeapHandle, 0, h); } }
// Allocate a block of memory at least n bytes big. void * __stdcall DbgAlloc(size_t n, void **ppvCallstack, BOOL isArray) { STATIC_CONTRACT_NOTHROW; // Initialize if necessary (DbgAllocInit takes care of the synchronization). if (!g_HeapInitialized) DbgAllocInit(); if (!g_DbgEnabled) return ClrAllocInProcessHeap(0, n); CDA_LOCK(); // Count calls to this routine and the number that specify 0 bytes of // allocation. This needs to be done under the lock since the counters // themselves aren't synchronized. CDA_STATS_INC(Allocs); if (n == 0) CDA_STATS_INC(ZeroAllocs); CDA_UNLOCK(); // Allocate enough memory for the caller, our debugging header and possibly // some guard bytes. unsigned length = CDA_SIZEOF_HEADER() + (unsigned)n + CDA_OPT_GUARD_BYTES; DbgAllocHeader *h; if (g_PagePerAlloc) { // In page per alloc mode we allocate a number of whole pages. The // actual packet is placed at the end of the second to last page and the // last page is reserved but never commited (so will cause an access // violation if touched). This will catch heap crawl real quick. unsigned pages = ((length + (g_PageSize - 1)) / g_PageSize) + 1; h = (DbgAllocHeader *)ClrVirtualAlloc(NULL, pages * g_PageSize, MEM_RESERVE, PAGE_NOACCESS); if (h) { ClrVirtualAlloc(h, (pages - 1) * g_PageSize, MEM_COMMIT, PAGE_READWRITE); h = (DbgAllocHeader *)((BYTE *)h + (g_PageSize - (length % g_PageSize))); } } else h = (DbgAllocHeader *)ClrHeapAlloc(g_HeapHandle, 0, length); CDA_LOCK(); if (h == NULL) { // Whoops, allocation failure. Record it. CDA_STATS_INC(AllocFailures); LOG((LF_DBGALLOC, LL_ALWAYS, "DbgAlloc: alloc fail for %u bytes\n", n)); } else { // Check all active packets still look OK. if (g_ConstantRecheck) DbgValidateActivePackets(h, &CDA_DATA(h, n + CDA_OPT_GUARD_BYTES)); // Count the total number of bytes we've allocated so far. CDA_STATS_ADD(AllocBytes, n); // Record the largest amount of concurrent allocations we ever see // during the life of the process. if((g_AllocStats.m_AllocBytes - g_AllocStats.m_FreeBytes) > g_AllocStats.m_MaxAlloc) g_AllocStats.m_MaxAlloc = g_AllocStats.m_AllocBytes - g_AllocStats.m_FreeBytes; // Fill in the packet debugging header. for (unsigned i = 0; i < g_CallStackDepth; i++) { CDA_ALLOC_STACK(h, i) = ppvCallstack[i]; CDA_DEALLOC_STACK(h, i) = NULL; } h->m_hmod = NULL; h->m_SN = g_NextSN++; h->m_Length = (unsigned)n; h->m_IsArray = isArray; h->m_Prev = g_AllocListLast; h->m_Next = NULL; h->m_Magic1 = CDA_MAGIC_1; *CDA_MAGIC2(h) = CDA_MAGIC_2; // If the user wants to breakpoint on the allocation of a specific // packet, do it now. if (g_BreakOnAlloc && (h->m_SN == g_BreakOnAllocNumber)) _ASSERTE(!"Hit memory allocation # for breakpoint"); // Link the packet into the queue of live packets. if (g_AllocListLast != NULL) { g_AllocListLast->m_Next = h; g_AllocListLast = h; } if (g_AllocListFirst == NULL) { _ASSERTE(g_AllocListLast == NULL); g_AllocListFirst = h; g_AllocListLast = h; } if (g_PoisonPackets) memset(CDA_HEADER_TO_DATA(h), CDA_ALLOC_PATTERN, n); // Write a guard pattern after the user data to trap overwrites. if (g_AllocGuard) memset(&CDA_DATA(h, n), CDA_GUARD_PATTERN, CDA_GUARD_BYTES); // See if our allocator makes the list of most frequent allocators. if (g_UsageByAllocator) { // Look for an existing entry in the table for our EIP, or for the // first empty slot (the table is kept in sorted order, so the first // empty slot marks the end of the table). unsigned i; for (i = 0; i < g_TopAllocatorsSlots; i++) { if (g_TopAllocators[i].m_EIP == ppvCallstack[0]) { // We already have an entry for this allocator. Incrementing // the count may allow us to move the allocator up the // table. g_TopAllocators[i].m_Count++; g_TopAllocators[i].m_TotalBytes += n; if ((i > 0) && (g_TopAllocators[i].m_Count > g_TopAllocators[i - 1].m_Count)) { DbgAllocTop tmp = g_TopAllocators[i - 1]; g_TopAllocators[i - 1] = g_TopAllocators[i]; g_TopAllocators[i] = tmp; } break; } if (g_TopAllocators[i].m_EIP == NULL) { // We've found an empty slot, we weren't in the table. This // is the right place to put the entry though, since we've // only done a single allocation. g_TopAllocators[i].m_EIP = ppvCallstack[0]; g_TopAllocators[i].m_Count = 1; g_TopAllocators[i].m_TotalBytes = n; break; } } if (i == g_TopAllocatorsSlots) { // Ran out of space in the table, need to expand it. unsigned slots = g_TopAllocatorsSlots ? g_TopAllocatorsSlots * 2 : CDA_TOP_ALLOCATORS; DbgAllocTop *newtab = (DbgAllocTop*)ClrAllocInProcessHeap(0, slots*sizeof(DbgAllocTop)); if (newtab) { // Copy old contents over. if (g_TopAllocatorsSlots) { memcpy(newtab, g_TopAllocators, sizeof(DbgAllocTop) * g_TopAllocatorsSlots); delete [] g_TopAllocators; } // Install new table. g_TopAllocators = newtab; g_TopAllocatorsSlots = slots; // Add new entry to tail. g_TopAllocators[i].m_EIP = ppvCallstack[0]; g_TopAllocators[i].m_Count = 1; g_TopAllocators[i].m_TotalBytes = n; // And initialize the rest of the entries to empty. memset(&g_TopAllocators[i + 1], 0, sizeof(DbgAllocTop) * (slots - (i + 1))); } } } // Count how many allocations of each size range we get. Allocations // above a certain size are all dumped into one bucket. if (g_LogDist) { if (n > CDA_MAX_DIST_SIZE) g_LargeAllocs++; else { for (unsigned i = CDA_DIST_BUCKET_SIZE - 1; i <= CDA_MAX_DIST_SIZE; i += CDA_DIST_BUCKET_SIZE) if (n <= i) { g_AllocBuckets[i/CDA_DIST_BUCKET_SIZE]++; break; } } } } CDA_UNLOCK(); return h ? CDA_HEADER_TO_DATA(h) : NULL; }