static void printNumber(ef_number number, ef_number base) { char buffer[NUMBER_BUFFER_SIZE]; char *s = &buffer[NUMBER_BUFFER_SIZE]; int size; do { ef_number digit; if (--s == buffer) EF_Abort("Internal error printing number."); digit = number % base; if (digit < 10) *s = '0' + digit; else *s = 'a' + digit - 10; } while ((number /= base) > 0); size = &buffer[NUMBER_BUFFER_SIZE] - s; if (size > 0) write(2, s, size); }
extern C_LINKAGE void DpsEfenceCheckLeaks(void) { register Slot * slot = allocationList; register size_t count = slotCount; if ( allocationList == 0 ) EF_Abort("DpsEfenceCheckLeaks() called before first DpsMalloc()."); lock(); if ( !noAllocationListProtection ) Page_AllowAccess(allocationList, allocationListSize); for ( ; count > 0; count-- ) { if ( slot->mode == ALLOCATED ) { fprintf(stderr, "Non-freed memory at 0x%x size:%d at %s:%d\n", slot->userAddress, slot->userSize, slot->filename, slot->fileline); } slot++; } if ( !noAllocationListProtection ) Page_DenyAccess(allocationList, allocationListSize); release(); return; }
extern C_LINKAGE void free(void * address) { if ( address == 0 ) return; if ( allocationList == 0 ) EF_Abort("free() called before first malloc()."); lock(); free_locked(address); release(); }
extern C_LINKAGE void * realloc(void * oldBuffer, size_t newSize) { void * newBuffer = 0; if ( allocationList == 0 ) initialize(); /* This sets EF_ALIGNMENT */ lock(); newBuffer = memalign_locked(EF_ALIGNMENT, newSize); if ( oldBuffer ) { size_t size; Slot * slot; Page_AllowAccess(allocationList, allocationListSize); noAllocationListProtection = 1; slot = slotForUserAddress(oldBuffer); if ( slot == 0 ) EF_Abort( "realloc(%a, %d): address not from malloc()." ,oldBuffer ,newSize); if ( newSize < (size = slot->userSize) ) size = newSize; if ( size > 0 ) memcpy(newBuffer, oldBuffer, size); free_locked(oldBuffer); noAllocationListProtection = 0; Page_DenyAccess(allocationList, allocationListSize); if ( size < newSize ) memset(&(((char *)newBuffer)[size]), 0, newSize - size); /* Internal memory was re-protected in free() */ } release(); return newBuffer; }
extern C_LINKAGE void * _DpsRealloc(void * oldBuffer, size_t newSize, const char *filename, size_t fileline) { void * newBuffer = 0; if ( allocationList == 0 ) initialize(); /* This sets EF_ALIGNMENT */ /* fprintf(stderr, "DpsRealloc: %p at %s:%d\n", oldBuffer, filename, fileline); */ lock(); newBuffer = _DpsMalloc(newSize, filename, fileline); if ( oldBuffer ) { size_t size; Slot * slot; Page_AllowAccess(allocationList, allocationListSize); noAllocationListProtection = 1; slot = slotForUserAddress(oldBuffer); if ( slot == 0 ) EF_Abort("DpsRealloc(%a, %d): address not from DpsMalloc() at %s:%d.", oldBuffer, newSize, filename, fileline); if ( newSize < (size = slot->userSize) ) size = newSize; if ( size > 0 ) dps_memcpy(newBuffer, oldBuffer, size); /* was: dps_memmove */ DpsFree(oldBuffer); noAllocationListProtection = 0; Page_DenyAccess(allocationList, allocationListSize); if ( size < newSize ) memset(&(((char *)newBuffer)[size]), 0, newSize - size); /* Internal memory was re-protected in free() */ } release(); return newBuffer; }
void * Page_Create(size_t size) { caddr_t allocation; /* * In this version, "startAddr" is a _hint_, not a demand. * When the memory I map here is contiguous with other * mappings, the allocator can coalesce the memory from two * or more mappings into one large contiguous chunk, and thus * might be able to find a fit that would not otherwise have * been possible. I could _force_ it to be contiguous by using * the MMAP_FIXED flag, but I don't want to stomp on memory mappings * generated by other software, etc. */ allocation = (caddr_t) mmap( startAddr ,(int)size ,PROT_READ|PROT_WRITE ,MAP_PRIVATE|MAP_ANONYMOUS ,-1 ,0); #ifndef __hpux /* * Set the "address hint" for the next mmap() so that it will abut * the mapping we just created. * * HP/UX 9.01 has a kernel bug that makes mmap() fail sometimes * when given a non-zero address hint, so we'll leave the hint set * to zero on that system. HP recently told me this is now fixed. * Someone please tell me when it is probable to assume that most * of those systems that were running 9.01 have been upgraded. */ startAddr = allocation + size; #endif if ( allocation == (caddr_t)-1 ) EF_Abort("mmap() failed at %s:%d: %s [startAddr:0x%x, size:%d]", __FILE__, __LINE__, stringErrorReport(), startAddr, size); return (void *)allocation; }
extern C_LINKAGE void * ef_realloc(void * oldBuffer, size_t newSize) { void * newBuffer = ef_malloc(newSize); lock(); if ( oldBuffer ) { size_t size; Slot * slot; Page_AllowAccess(allocationList, allocationListSize); noAllocationListProtection = 1; slot = slotForUserAddress(oldBuffer); if ( slot == 0 ) EF_Abort( "realloc(%a, %d): address not from malloc()." ,oldBuffer ,newSize); if ( newSize < (size = slot->userSize) ) size = newSize; if ( size > 0 ) memcpy(newBuffer, oldBuffer, size); ef_free(oldBuffer); noAllocationListProtection = 0; Page_DenyAccess(allocationList, allocationListSize); if ( size < newSize ) memset(&(((char *)newBuffer)[size]), 0, newSize - size); /* Internal memory was re-protected in free() */ } unlock(); return newBuffer; }
extern C_LINKAGE void _DpsFree(void * address, const char *filename, size_t fileline) { Slot * slot; Slot * previousSlot = 0; Slot * nextSlot = 0; if ( address == 0 ) return; /* fprintf(stderr, "DpsFree: %p at %s:%d\n", address, filename, fileline); */ if ( allocationList == 0 ) EF_Abort("DpsFree() called before first DpsMalloc() at %s:%d.", filename, fileline); lock(); if ( !noAllocationListProtection ) Page_AllowAccess(allocationList, allocationListSize); slot = slotForUserAddress(address); if ( !slot ) { /* EF_Abort("DpsFree(%a): address not from DpsMalloc() at %s:%d.", address, filename, fileline);*/ EF_Print("DpsFree(%a): address not from DpsMalloc() at %s:%d.\n", address, filename, fileline); release(); return; } /* EF_Print("DpsFree(%a): slot=%a\n", address, slot); */ if ( slot->mode != ALLOCATED ) { if ( internalUse && slot->mode == INTERNAL_USE ) /* Do nothing. */; else { if (slot->mode == FREE) EF_Print("DpsFree(%a) FREE\n", address); if (slot->mode == PROTECTED) EF_Print("DpsFree(%a) PROTECTED\n", address); if (slot->mode == INTERNAL_USE) EF_Print("DpsFree(%a) INTERNAL_USE\n", address); if (slot->mode == NOT_IN_USE) EF_Print("DpsFree(%a) NOT_IN_USE\n", address); EF_Abort("DpsFree(%a): freeing free memory at %s:%d.", address, filename, fileline); } } if ( EF_PROTECT_FREE ) slot->mode = PROTECTED; else slot->mode = FREE; /* * Free memory is _always_ set to deny access. When EF_PROTECT_FREE * is true, free memory is never reallocated, so it remains access * denied for the life of the process. When EF_PROTECT_FREE is false, * the memory may be re-allocated, at which time access to it will be * allowed again. * * Some operating systems allow munmap() with single-page resolution, * and allow you to un-map portions of a region, rather than the * entire region that was mapped with mmap(). On those operating * systems, we can release protected free pages with Page_Delete(), * in the hope that the swap space attached to those pages will be * released as well. */ if ( EF_PROTECT_FREE ) Page_Delete(slot->internalAddress, slot->internalSize); else Page_DenyAccess(slot->internalAddress, slot->internalSize); previousSlot = slotForInternalAddressPreviousTo(slot->internalAddress); nextSlot = slotForInternalAddress( ((char *)slot->internalAddress) + slot->internalSize); if ( previousSlot && (previousSlot->mode == slot->mode) ) { /* Coalesce previous slot with this one. */ previousSlot->internalSize += slot->internalSize; slot->internalAddress = slot->userAddress = 0; slot->internalSize = slot->userSize = 0; slot->mode = NOT_IN_USE; slot = previousSlot; unUsedSlots++; } if ( nextSlot && (nextSlot->mode == slot->mode) ) { /* Coalesce next slot with this one. */ slot->internalSize += nextSlot->internalSize; nextSlot->internalAddress = nextSlot->userAddress = 0; nextSlot->internalSize = nextSlot->userSize = 0; nextSlot->mode = NOT_IN_USE; unUsedSlots++; } /* slot->userAddress = slot->internalAddress; slot->userSize = slot->internalSize; */ if ( !noAllocationListProtection ) Page_DenyAccess(allocationList, allocationListSize); release(); /* fprintf(stderr, "DpsFree Done\n");*/ }
/* * This is the memory allocator. When asked to allocate a buffer, allocate * it in such a way that the end of the buffer is followed by an inaccessable * memory page. If software overruns that buffer, it will touch the bad page * and get an immediate segmentation fault. It's then easy to zero in on the * offending code with a debugger. * * There are a few complications. If the user asks for an odd-sized buffer, * we would have to have that buffer start on an odd address if the byte after * the end of the buffer was to be on the inaccessable page. Unfortunately, * there is lots of software that asks for odd-sized buffers and then * requires that the returned address be word-aligned, or the size of the * buffer be a multiple of the word size. An example are the string-processing * functions on Sun systems, which do word references to the string memory * and may refer to memory up to three bytes beyond the end of the string. * For this reason, I take the alignment requests to memalign() and valloc() * seriously, and * * Electric Fence wastes lots of memory. I do a best-fit allocator here * so that it won't waste even more. It's slow, but thrashing because your * working set is too big for a system's RAM is even slower. */ static void * _DpsMemalign(size_t alignment, size_t userSize, const char *filename, size_t fileline) { register Slot *slot, *slot2; register size_t count; Slot * fullSlot = 0; Slot * emptySlots[2]; size_t internalSize; size_t slack; char * address; if ( allocationList == 0 ) initialize(); lock(); if ( userSize == 0 && !EF_ALLOW_MALLOC_0 && strcmp(filename, "efence.c")) EF_Abort("Allocating 0 bytes, probably a bug at %s:%d.", filename, fileline); /* * If EF_PROTECT_BELOW is set, all addresses returned by malloc() * and company will be page-aligned. */ if ( !EF_PROTECT_BELOW && alignment > 1 ) { if ( (slack = userSize % alignment) != 0 ) userSize += alignment - slack; } /* * The internal size of the buffer is rounded up to the next page-size * boudary, and then we add another page's worth of memory for the * dead page. */ internalSize = userSize + bytesPerPage; if ( (slack = internalSize % bytesPerPage) != 0 ) internalSize += bytesPerPage - slack; /* * These will hold the addresses of two empty Slot structures, that * can be used to hold information for any memory I create, and any * memory that I mark free. */ emptySlots[0] = 0; emptySlots[1] = 0; /* * The internal memory used by the allocator is currently * inaccessable, so that errant programs won't scrawl on the * allocator's arena. I'll un-protect it here so that I can make * a new allocation. I'll re-protect it before I return. */ if ( !noAllocationListProtection ) Page_AllowAccess(allocationList, allocationListSize); /* * If I'm running out of empty slots, create some more before * I don't have enough slots left to make an allocation. */ if ( !internalUse && unUsedSlots < 7 ) { allocateMoreSlots(); } /* * Iterate through all of the slot structures. Attempt to find a slot * containing free memory of the exact right size. Accept a slot with * more memory than we want, if the exact right size is not available. * Find two slot structures that are not in use. We will need one if * we split a buffer into free and allocated parts, and the second if * we have to create new memory and mark it as free. * */ slot = allocationList; slot2 = &slot[slotCount - 1]; while (slot <= slot2) { if ( slot->mode == FREE && slot->internalSize >= internalSize ) { if ( !fullSlot ||slot->internalSize < fullSlot->internalSize) { fullSlot = slot; if ( slot->internalSize == internalSize && emptySlots[0] ) break; /* All done, */ } } else if ( slot->mode == NOT_IN_USE ) { if ( !emptySlots[0] ) emptySlots[0] = slot; else if ( !emptySlots[1] ) emptySlots[1] = slot; else if ( fullSlot && fullSlot->internalSize == internalSize ) break; /* All done. */ } if ( slot2->mode == FREE && slot2->internalSize >= internalSize ) { if ( !fullSlot ||slot2->internalSize < fullSlot->internalSize) { fullSlot = slot2; if ( slot2->internalSize == internalSize && emptySlots[0] ) break; /* All done, */ } } else if ( slot2->mode == NOT_IN_USE ) { if ( !emptySlots[0] ) emptySlots[0] = slot2; else if ( !emptySlots[1] ) emptySlots[1] = slot2; else if ( fullSlot && fullSlot->internalSize == internalSize ) break; /* All done. */ } slot++; slot2--; } /* for ( slot = allocationList, count = slotCount ; count > 0; count-- ) { if ( slot->mode == FREE && slot->internalSize >= internalSize ) { if ( !fullSlot ||slot->internalSize < fullSlot->internalSize){ fullSlot = slot; if ( slot->internalSize == internalSize && emptySlots[0] ) break; *//* All done, *//* } } else if ( slot->mode == NOT_IN_USE ) { if ( !emptySlots[0] ) emptySlots[0] = slot; else if ( !emptySlots[1] ) emptySlots[1] = slot; else if ( fullSlot && fullSlot->internalSize == internalSize ) break; *//* All done. *//* } slot++; } */ if ( !emptySlots[0] ) EF_InternalError("No empty slot 0."); if ( !fullSlot ) { /* * I get here if I haven't been able to find a free buffer * with all of the memory I need. I'll have to create more * memory. I'll mark it all as free, and then split it into * free and allocated portions later. */ size_t chunkSize = MEMORY_CREATION_SIZE; if ( !emptySlots[1] ) EF_InternalError("No empty slot 1."); if ( chunkSize < internalSize ) chunkSize = internalSize; if ( (slack = chunkSize % bytesPerPage) != 0 ) chunkSize += bytesPerPage - slack; /* Use up one of the empty slots to make the full slot. */ fullSlot = emptySlots[0]; emptySlots[0] = emptySlots[1]; fullSlot->internalAddress = Page_Create(chunkSize); fullSlot->internalSize = chunkSize; fullSlot->mode = FREE; unUsedSlots--; /* Fill the slot if it was specified to do so. */ if ( EF_FILL != -1 ) memset( (char *)fullSlot->internalAddress ,EF_FILL ,chunkSize); } /* * If I'm allocating memory for the allocator's own data structures, * mark it INTERNAL_USE so that no errant software will be able to * free it. */ if ( internalUse ) fullSlot->mode = INTERNAL_USE; else fullSlot->mode = ALLOCATED; /* * If the buffer I've found is larger than I need, split it into * an allocated buffer with the exact amount of memory I need, and * a free buffer containing the surplus memory. */ if ( fullSlot->internalSize > internalSize ) { emptySlots[0]->internalSize = fullSlot->internalSize - internalSize; emptySlots[0]->internalAddress = ((char *)fullSlot->internalAddress) + internalSize; emptySlots[0]->mode = FREE; fullSlot->internalSize = internalSize; unUsedSlots--; } if ( !EF_PROTECT_BELOW ) { /* * Arrange the buffer so that it is followed by an inaccessable * memory page. A buffer overrun that touches that page will * cause a segmentation fault. */ address = (char *)fullSlot->internalAddress; /* Set up the "live" page. */ if ( internalSize - bytesPerPage > 0 ) Page_AllowAccess( fullSlot->internalAddress ,internalSize - bytesPerPage); address += internalSize - bytesPerPage; /* Set up the "dead" page. */ if ( EF_PROTECT_FREE ) Page_Delete(address, bytesPerPage); else Page_DenyAccess(address, bytesPerPage); /* Figure out what address to give the user. */ address -= userSize; } else { /* EF_PROTECT_BELOW != 0 */ /* * Arrange the buffer so that it is preceded by an inaccessable * memory page. A buffer underrun that touches that page will * cause a segmentation fault. */ address = (char *)fullSlot->internalAddress; /* Set up the "dead" page. */ if ( EF_PROTECT_FREE ) Page_Delete(address, bytesPerPage); else Page_DenyAccess(address, bytesPerPage); address += bytesPerPage; /* Set up the "live" page. */ if ( internalSize - bytesPerPage > 0 ) Page_AllowAccess(address, internalSize - bytesPerPage); } fullSlot->userAddress = address; fullSlot->userSize = userSize; fullSlot->fileline = fileline; dps_strncpy(fullSlot->filename, filename, DPS_FILENAMELEN); /* if (slotCount > 1) DpsSort(allocationList, slotCount, sizeof(Slot), (qsort_cmp)cmp_Slot);*/ /* * Make the pool's internal memory inaccessable, so that the program * being debugged can't stomp on it. */ if ( !internalUse ) Page_DenyAccess(allocationList, allocationListSize); release(); /* if (address == 0x292d3000) { int r = 1 / 0; printf("Error r:%d\n"); }*/ /* fprintf(stderr, " -- allocated: %p @ %s:%d\n", address, filename, fileline); */ return address; }
static void free_locked(void * address) { Slot * slot; Slot * previousSlot = 0; Slot * nextSlot = 0; if ( address == 0 ) return; if ( !noAllocationListProtection ) Page_AllowAccess(allocationList, allocationListSize); slot = slotForUserAddress(address); if ( !slot ) EF_Abort("free(%a): address not from malloc().", address); if ( slot->mode != ALLOCATED ) { if ( internalUse && slot->mode == INTERNAL_USE ) /* Do nothing. */; else { EF_Abort( "free(%a): freeing free memory." ,address); } } if ( EF_PROTECT_FREE ) slot->mode = PROTECTED; else slot->mode = FREE; /* * Free memory is _always_ set to deny access. When EF_PROTECT_FREE * is true, free memory is never reallocated, so it remains access * denied for the life of the process. When EF_PROTECT_FREE is false, * the memory may be re-allocated, at which time access to it will be * allowed again. * * Some operating systems allow munmap() with single-page resolution, * and allow you to un-map portions of a region, rather than the * entire region that was mapped with mmap(). On those operating * systems, we can release protected free pages with Page_Delete(), * in the hope that the swap space attached to those pages will be * released as well. */ Page_Delete(slot->internalAddress, slot->internalSize); previousSlot = slotForInternalAddressPreviousTo(slot->internalAddress); nextSlot = slotForInternalAddress( ((char *)slot->internalAddress) + slot->internalSize); if ( previousSlot && previousSlot->mode == slot->mode ) { /* Coalesce previous slot with this one. */ previousSlot->internalSize += slot->internalSize; slot->internalAddress = slot->userAddress = 0; slot->internalSize = slot->userSize = 0; slot->mode = NOT_IN_USE; slot = previousSlot; unUsedSlots++; } if ( nextSlot && nextSlot->mode == slot->mode ) { /* Coalesce next slot with this one. */ slot->internalSize += nextSlot->internalSize; nextSlot->internalAddress = nextSlot->userAddress = 0; nextSlot->internalSize = nextSlot->userSize = 0; nextSlot->mode = NOT_IN_USE; unUsedSlots++; } slot->userAddress = slot->internalAddress; slot->userSize = slot->internalSize; if ( !noAllocationListProtection ) Page_DenyAccess(allocationList, allocationListSize); }
extern C_LINKAGE void ef_free(void * address) { Slot * slot; Slot * previousSlot = 0; Slot * nextSlot = 0; //printf(" ::free %p \n",address); lock(); if ( address == 0 ) { unlock(); return; } if ( allocationList == 0 ) EF_Abort("free() called before first malloc()."); if ( !noAllocationListProtection ) Page_AllowAccess(allocationList, allocationListSize); slot = slotForUserAddress(address); if ( !slot ) EF_Abort("free(%a): address not from malloc().", address); if ( slot->mode != ALLOCATED ) { if ( internalUse && slot->mode == INTERNAL_USE ) /* Do nothing. */; else { EF_Abort( "free(%a): freeing free memory." ,address); } } if ( EF_PROTECT_FREE ) slot->mode = PROTECTED; else slot->mode = FREE; if ( EF_FREE_WIPES ) memset(slot->userAddress, 0xbd, slot->userSize); previousSlot = slotForInternalAddressPreviousTo(slot->internalAddress); nextSlot = slotForInternalAddress( ((char *)slot->internalAddress) + slot->internalSize); if ( previousSlot && (previousSlot->mode == FREE || previousSlot->mode == PROTECTED) ) { /* Coalesce previous slot with this one. */ previousSlot->internalSize += slot->internalSize; if ( EF_PROTECT_FREE ) previousSlot->mode = PROTECTED; slot->internalAddress = slot->userAddress = 0; slot->internalSize = slot->userSize = 0; slot->mode = NOT_IN_USE; slot = previousSlot; unUsedSlots++; } if ( nextSlot && (nextSlot->mode == FREE || nextSlot->mode == PROTECTED) ) { /* Coalesce next slot with this one. */ slot->internalSize += nextSlot->internalSize; nextSlot->internalAddress = nextSlot->userAddress = 0; nextSlot->internalSize = nextSlot->userSize = 0; nextSlot->mode = NOT_IN_USE; unUsedSlots++; } slot->userAddress = slot->internalAddress; slot->userSize = slot->internalSize; /* * Free memory is _always_ set to deny access. When EF_PROTECT_FREE * is true, free memory is never reallocated, so it remains access * denied for the life of the process. When EF_PROTECT_FREE is false, * the memory may be re-allocated, at which time access to it will be * allowed again. */ Page_DenyAccess(slot->internalAddress, slot->internalSize); if ( !noAllocationListProtection ) Page_DenyAccess(allocationList, allocationListSize); unlock(); }
/* * internalError is called for those "shouldn't happen" errors in the * allocator. */ static void internalError(void) { EF_Abort("Internal error in allocator."); }