/* * A basic header of information wrapper allocator. Simply stores * information as a header, returns the memory + 1 past it, can be * retrieved again with - 1. Where type is stat_mem_block_t*. */ void *stat_mem_allocate(size_t size, size_t line, const char *file, const char *expr) { stat_mem_block_t *info = (stat_mem_block_t*)malloc(size + IDENT_MEM_TOP); void *data = (void *)((char*)info + IDENT_MEM_TOP); if(GMQCC_UNLIKELY(!info)) return NULL; info->line = line; info->size = size; info->file = file; info->expr = expr; info->prev = NULL; info->next = stat_mem_block_root; /* Write identifier */ memcpy(info + 1, IDENT_MEM, IDENT_SIZE); /* likely since it only happens once */ if (GMQCC_LIKELY(stat_mem_block_root != NULL)) { VALGRIND_MAKE_MEM_DEFINED(stat_mem_block_root, IDENT_MEM_TOP); stat_mem_block_root->prev = info; VALGRIND_MAKE_MEM_NOACCESS(stat_mem_block_root, IDENT_MEM_TOP); } stat_mem_block_root = info; stat_mem_allocated += size; stat_mem_high += size; stat_mem_allocated_total ++; if (stat_mem_high > stat_mem_peak) stat_mem_peak = stat_mem_high; VALGRIND_MALLOCLIKE_BLOCK(data, size, IDENT_MEM_TOP, 0); return data; }
int main(void) { int *array, *array3; int x; array = custom_alloc(sizeof(int) * 10); array[8] = 8; array[9] = 8; array[10] = 10; // invalid write (ok w/o MALLOCLIKE -- in superblock) custom_free(array); // ok custom_free((void*)0x1); // invalid free array3 = malloc(sizeof(int) * 10); custom_free(array3); // mismatched free (ok without MALLOCLIKE) make_leak(); x = array[0]; // use after free (ok without MALLOCLIKE/MAKE_MEM_NOACCESS) // (nb: initialised because is_zeroed==1 above) // unfortunately not identified as being in a free'd // block because the freeing of the block and shadow // chunk isn't postponed. // Bug 137073: passing 0 to MALLOCLIKE_BLOCK was causing an assertion // failure. Test for this (and likewise for FREELIKE_BLOCK). VALGRIND_MALLOCLIKE_BLOCK(0,0,0,0); VALGRIND_FREELIKE_BLOCK(0,0); return x; // leak from make_leak() }
/* * A basic header of information wrapper allocator. Simply stores * information as a header, returns the memory + 1 past it, can be * retrieved again with - 1. Where type is stat_mem_block_t*. */ void *stat_mem_allocate(size_t size, size_t line, const char *file) { stat_mem_block_t *info = (stat_mem_block_t*)malloc(sizeof(stat_mem_block_t) + size); void *data = (void*)(info + 1); if(GMQCC_UNLIKELY(!info)) return NULL; info->line = line; info->size = size; info->file = file; info->prev = NULL; info->next = stat_mem_block_root; /* likely since it only happens once */ if (GMQCC_LIKELY(stat_mem_block_root != NULL)) { VALGRIND_MAKE_MEM_DEFINED(stat_mem_block_root, sizeof(stat_mem_block_t)); stat_mem_block_root->prev = info; VALGRIND_MAKE_MEM_NOACCESS(stat_mem_block_root, sizeof(stat_mem_block_t)); } stat_mem_block_root = info; stat_mem_allocated += size; stat_mem_high += size; stat_mem_allocated_total ++; if (stat_mem_high > stat_mem_peak) stat_mem_peak = stat_mem_high; VALGRIND_MALLOCLIKE_BLOCK(data, size, sizeof(stat_mem_block_t), 0); return data; }
void* pool_alloc(size_t index) { void* p = pool_get(index); #ifdef USE_VALGRIND VALGRIND_MALLOCLIKE_BLOCK(p, pool_size(index), 0, 0); #endif return p; }
void* egg_secure_alloc_full (const char *tag, size_t length, int flags) { Block *block; void *memory = NULL; if (tag == NULL) tag = "?"; if (length > 0xFFFFFFFF / 2) { if (egg_secure_warnings) fprintf (stderr, "tried to allocate an insane amount of memory: %lu\n", (unsigned long)length); return NULL; } /* Can't allocate zero bytes */ if (length == 0) return NULL; DO_LOCK (); for (block = all_blocks; block; block = block->next) { memory = sec_alloc (block, tag, length); if (memory) break; } /* None of the current blocks have space, allocate new */ if (!memory) { block = sec_block_create (length, tag); if (block) memory = sec_alloc (block, tag, length); } #ifdef WITH_VALGRIND if (memory != NULL) VALGRIND_MALLOCLIKE_BLOCK (memory, length, sizeof (void*), 1); #endif DO_UNLOCK (); if (!memory && (flags & EGG_SECURE_USE_FALLBACK)) { memory = egg_memory_fallback (NULL, length); if (memory) /* Our returned memory is always zeroed */ memset (memory, 0, length); } if (!memory) errno = ENOMEM; return memory; }
static void ucm_malloc_allocated(void *ptr, size_t size, const char *debug_name) { VALGRIND_MALLOCLIKE_BLOCK(ptr, size, 0, 0); if (ucm_malloc_is_address_in_heap(ptr)) { ucm_trace("%s(size=%zu)=%p, in heap [%p..%p]", debug_name, size, ptr, ucm_malloc_hook_state.heap_start, ucm_malloc_hook_state.heap_end); } else { ucm_trace("%s(size=%zu)=%p, mmap'ed", debug_name, size, ptr); ucm_malloc_mmaped_ptr_add(ptr); } }
void * qxl_alloc (struct qxl_mem *mem, unsigned long n_bytes) { void *addr = mspace_malloc (mem->space, n_bytes); #ifdef DEBUG_QXL_MEM VALGRIND_MALLOCLIKE_BLOCK(addr, n_bytes, 0, 0); #ifdef DEBUG_QXL_MEM_VERBOSE fprintf(stderr, "alloc %p: %ld\n", addr, n_bytes); #endif #endif return addr; }
int main(void) { int *array, *array3; int x; array = custom_alloc(sizeof(int) * 10); array[8] = 8; array[9] = 8; array[10] = 10; // invalid write (ok w/o MALLOCLIKE -- in superblock) VALGRIND_RESIZEINPLACE_BLOCK(array, sizeof(int) * 10, sizeof(int) * 5, RZ); array[4] = 7; array[5] = 9; // invalid write // Make the entire array defined again such that it can be verified whether // the red zone is marked properly when resizing in place. VALGRIND_MAKE_MEM_DEFINED(array, sizeof(int) * 10); VALGRIND_RESIZEINPLACE_BLOCK(array, sizeof(int) * 5, sizeof(int) * 7, RZ); if (array[5]) array[4]++; // uninitialized read of array[5] array[5] = 11; array[6] = 7; array[7] = 8; // invalid write // invalid realloc VALGRIND_RESIZEINPLACE_BLOCK(array+1, sizeof(int) * 7, sizeof(int) * 8, RZ); custom_free(array); // ok custom_free((void*)0x1); // invalid free array3 = malloc(sizeof(int) * 10); custom_free(array3); // mismatched free (ok without MALLOCLIKE) make_leak(); x = array[0]; // use after free (ok without MALLOCLIKE/MAKE_MEM_NOACCESS) // (nb: initialised because is_zeroed==1 above) // unfortunately not identified as being in a free'd // block because the freeing of the block and shadow // chunk isn't postponed. // Bug 137073: passing 0 to MALLOCLIKE_BLOCK was causing an assertion // failure. Test for this (and likewise for FREELIKE_BLOCK). VALGRIND_MALLOCLIKE_BLOCK(0,0,0,0); VALGRIND_FREELIKE_BLOCK(0,0); return x; // leak from make_leak() }
void *get_mem(unsigned long int size) { void *rv = (void *)-1; size_t msize = get_multiple_of_pagesize(size); rv = mmap((void *)0, msize, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0); if ((rv == (void *)-1) || (rv == (void *)0)) { return get_mem_recovery(size); } VALGRIND_MALLOCLIKE_BLOCK(rv, msize, 0, 0); return rv; }
void* pool_alloc_size(size_t size) { size_t index = pool_index(size); void* p; if(index < POOL_COUNT) return pool_alloc(index); // p = pool_get(index); // else p = virtual_alloc(size); #ifdef USE_VALGRIND VALGRIND_MALLOCLIKE_BLOCK(p, size, 0, 0); #endif return p; }
/** * Wrapper around DRM_IOCTL_I915_GEM_MMAP. Returns MAP_FAILED on error. */ void* anv_gem_mmap(struct anv_device *device, uint32_t gem_handle, uint64_t offset, uint64_t size, uint32_t flags) { struct drm_i915_gem_mmap gem_mmap = { .handle = gem_handle, .offset = offset, .size = size, .flags = flags, }; int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_MMAP, &gem_mmap); if (ret != 0) return MAP_FAILED; VG(VALGRIND_MALLOCLIKE_BLOCK(gem_mmap.addr_ptr, gem_mmap.size, 0, 1)); return (void *)(uintptr_t) gem_mmap.addr_ptr; }
void* pool_alloc(size_t index) { #ifdef USE_VALGRIND VALGRIND_DISABLE_ERROR_REPORTING; #endif pool_local_t* pool = pool_local; void* p = pool_get(pool, index); TRACK_ALLOC(p, POOL_MIN << index); #ifdef USE_VALGRIND VALGRIND_ENABLE_ERROR_REPORTING; VALGRIND_MALLOCLIKE_BLOCK(p, pool_size(index), 0, 0); #endif return p; }
struct page *region_get_mem(size_t s) { size_t request_bytes; void *mem; struct page *newp; /* Don't get less than K * RPAGESIZE extra memory (K * RPAGESIZE is the minimum useful size for something on unused_pages) */ if (s + K * RPAGESIZE < MINIMUM_MEM_REQUEST) request_bytes = MINIMUM_MEM_REQUEST; else request_bytes = s; #if 0 request_bytes = ALIGN(request_bytes, 65536); #endif mem = (struct page *)MMAP(0, request_bytes+RPAGESIZE, PROT_READ|PROT_WRITE, MAP_PRIVATE); if (!mem) { out_of_memory(); abort(); } VALGRIND_MALLOCLIKE_BLOCK(mem, request_bytes+RPAGESIZE, 0, 0); // VALGRIND_MAKE_NOACCESS(mem, request_bytes+RPAGESIZE); newp = PALIGN(mem, RPAGESIZE); VALGRIND_MAKE_WRITABLE(newp, sizeof(struct page)); memset(newp, 0, sizeof(struct page)); if (mem == newp) /* Maybe we were lucky! */ request_bytes += RPAGESIZE; addbyaddress(newp); /* Add the new memory to unused_pages */ #ifndef NMEMDEBUG set_region_range(newp, (char *)newp + s, FREEPAGE); #endif total_page_count += request_bytes >> RPAGELOG; newp->pagecount = request_bytes >> RPAGELOG; assert(newp->pagecount > 0); newp->free = 1; addfront(&unused_pages, newp); return newp; }
// has a redzone static void* custom_alloc(int size) { #define RZ 8 static void* hp = 0; // current heap pointer static void* hp_lim = 0; // maximum usable byte in current block int size2 = size + RZ*2; void* p; if (hp + size2 > hp_lim) { hp = get_superblock(); hp_lim = hp + SUPERBLOCK_SIZE - 1; } p = hp + RZ; hp += size2; VALGRIND_MALLOCLIKE_BLOCK( p, size, RZ, /*is_zeroed*/1 ); return (void*)p; }
void *allocFromIonMemory(char *fileName, int lineNbr, size_t length) { PsmPartition ionwm = _ionwm(NULL); PsmAddress address; void *block; address = Psm_zalloc(fileName, lineNbr, ionwm, length); if (address == 0) { putErrmsg("Can't allocate ION working memory.", itoa(length)); return NULL; } block = psp(ionwm, address); memset(block, 0, length); #ifdef HAVE_VALGRIND_VALGRIND_H VALGRIND_MALLOCLIKE_BLOCK(block, length, 0, 1); #endif return block; }
/* * @- virtual memory * allocations affect only the logical VM resources. */ void * GDKmmap(const char *path, int mode, size_t len) { void *ret = MT_mmap(path, mode, len); if (ret == (void *) -1L) { GDKmemfail("GDKmmap", len); ret = MT_mmap(path, mode, len); if (ret != (void *) -1L) { THRprintf(GDKstdout, "#GDKmmap: recovery ok. Continuing..\n"); } } ALLOCDEBUG fprintf(stderr, "#GDKmmap " SZFMT " " PTRFMT "\n", len, PTRFMTCAST ret); if (ret != (void *) -1L) { /* since mmap directly have content we say it's zero-ed * memory */ VALGRIND_MALLOCLIKE_BLOCK(ret, len, 0, 1); meminc(len, "GDKmmap"); } return (void *) ret; }
void* pool_alloc_size(size_t size) { size_t index = pool_index(size); if(index < POOL_COUNT) return pool_alloc(index); #ifdef USE_VALGRIND VALGRIND_DISABLE_ERROR_REPORTING; #endif size = pool_adjust_size(size); void* p = pool_alloc_pages(size); TRACK_ALLOC(p, size); #ifdef USE_VALGRIND VALGRIND_ENABLE_ERROR_REPORTING; VALGRIND_MALLOCLIKE_BLOCK(p, size, 0, 0); #endif return p; }
int main(void) { assert(1 == sizeof(char)); assert(2 == sizeof(short)); assert(4 == sizeof(int)); assert(8 == sizeof(long long)); //------------------------------------------------------------------------ // Sources of undefined values //------------------------------------------------------------------------ // Stack, 32-bit { volatile int undef_stack_int; fprintf(stderr, "\nUndef 1 of 8 (stack, 32 bit)\n"); x += (undef_stack_int == 0x12345678 ? 10 : 21); } // Stack, 32-bit, recently modified. Nb: we have to do the register // mucking about to make sure that the modification isn't fenced by a // store/load pair and thus not seen (see origin-not-quite.c). { volatile int undef_stack_int; register int modified_undef_stack_int; fprintf(stderr, "\nUndef 2 of 8 (stack, 32 bit)\n"); modified_undef_stack_int = undef_stack_int; modified_undef_stack_int++; x += (modified_undef_stack_int == 0x1234 ? 11 : 22); } // Stack, 64-bit. XXX: gets reported with two identical origins. { volatile long long undef_stack_longlong; fprintf(stderr, "\nUndef 3 of 8 (stack, 64 bit)\n"); x += (undef_stack_longlong == 0x1234567812345678LL ? 11 : 22); } // Malloc block, uninitialised, 32-bit { int* ptr_to_undef_malloc_int = malloc(sizeof(int)); int undef_malloc_int = *ptr_to_undef_malloc_int; fprintf(stderr, "\nUndef 4 of 8 (mallocd, 32-bit)\n"); x += (undef_malloc_int == 0x12345678 ? 12 : 23); } // Realloc block, uninitialised { int* ptr_to_undef_malloc_int2 = malloc(sizeof(int)); // Allocate a big chunk to ensure that a new block is allocated. int* ptr_to_undef_realloc_int = realloc(ptr_to_undef_malloc_int2, 4096); // Have to move past the first 4 bytes, which were copied from the // malloc'd block. int undef_realloc_int = *(ptr_to_undef_realloc_int+1); fprintf(stderr, "\nUndef 5 of 8 (realloc)\n"); x += (undef_realloc_int == 0x12345678 ? 13 : 24); } // Custom-allocated block, non-zeroed { int undef_custom_alloc_int; VALGRIND_MALLOCLIKE_BLOCK(&undef_custom_alloc_int, sizeof(int), /*rzB*/0, /*is_zeroed*/0); fprintf(stderr, "\nUndef 6 of 8 (MALLOCLIKE_BLOCK)\n"); x += (undef_custom_alloc_int == 0x12345678 ? 14 : 25); } // Heap segment (brk), uninitialised { int* ptr_to_new_brk_limit = sbrk(4096); int undef_brk_int = *ptr_to_new_brk_limit; fprintf(stderr, "\nUndef 7 of 8 (brk)\n"); x += (undef_brk_int == 0x12345678 ? 15 : 26); } // User block, marked as undefined { int undef_user_int = 0; VALGRIND_MAKE_MEM_UNDEFINED(&undef_user_int, sizeof(int)); fprintf(stderr, "\nUndef 8 of 8 (MAKE_MEM_UNDEFINED)\n"); x += (undef_user_int == 0x12345678 ? 16 : 27); } //------------------------------------------------------------------------ // Sources of defined values //------------------------------------------------------------------------ // Heap block (calloc), initialised { int* ptr_to_def_calloc_int = calloc(1, sizeof(int)); int def_calloc_int = *ptr_to_def_calloc_int; fprintf(stderr, "\nDef 1 of 3\n"); x += (def_calloc_int == 0x12345678 ? 17 : 28); } // Custom-allocated block, non-zeroed { int def_custom_alloc_int = 0; fprintf(stderr, "\nDef 2 of 3\n"); VALGRIND_MALLOCLIKE_BLOCK(&def_custom_alloc_int, sizeof(int), /*rzB*/0, /*is_zeroed*/1); x += (def_custom_alloc_int == 0x12345678 ? 18 : 29); } // mmap block, initialised { int* ptr_to_def_mmap_int = mmap(0, 4096, PROT_READ, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); int def_mmap_int = *ptr_to_def_mmap_int; fprintf(stderr, "\nDef 3 of 3\n"); x += (def_mmap_int == 0x12345678 ? 19 : 30); } return x; }
void* egg_secure_realloc_full (void *memory, size_t length, int flags) { Block *block = NULL; size_t previous = 0; int donew = 0; void *alloc = NULL; if (length > 0xFFFFFFFF / 2) { if (egg_secure_warnings) fprintf (stderr, "tried to allocate an insane amount of memory: %lu\n", (unsigned long)length); return NULL; } if (memory == NULL) return egg_secure_alloc_full (length, flags); if (!length) { egg_secure_free_full (memory, flags); return NULL; } DO_LOCK (); /* Find out where it belongs to */ for (block = all_blocks; block; block = block->next) { if (sec_is_valid_word (block, memory)) { previous = sec_allocated (block, memory); #ifdef WITH_VALGRIND /* Let valgrind think we are unallocating so that it'll validate */ VALGRIND_FREELIKE_BLOCK (memory, sizeof (word_t)); #endif alloc = sec_realloc (block, memory, length); #ifdef WITH_VALGRIND /* Now tell valgrind about either the new block or old one */ VALGRIND_MALLOCLIKE_BLOCK (alloc ? alloc : memory, alloc ? length : previous, sizeof (word_t), 1); #endif break; } } /* If it didn't work we may need to allocate a new block */ if (block && !alloc) donew = 1; if (block && block->used == 0) sec_block_destroy (block); DO_UNLOCK (); if (!block) { if ((flags & GKR_SECURE_USE_FALLBACK)) { /* * In this case we can't zero the returned memory, * because we don't know what the block size was. */ return egg_memory_fallback (memory, length); } else { if (egg_secure_warnings) fprintf (stderr, "memory does not belong to mate-keyring: 0x%08lx\n", (unsigned long)memory); ASSERT (0 && "memory does does not belong to mate-keyring"); return NULL; } } if (donew) { alloc = egg_secure_alloc_full (length, flags); if (alloc) { memcpy (alloc, memory, previous); egg_secure_free_full (memory, flags); } } if (!alloc) errno = ENOMEM; return alloc; }
int main(void) { assert(1 == sizeof(char)); assert(2 == sizeof(short)); assert(4 == sizeof(int)); assert(8 == sizeof(long long)); //------------------------------------------------------------------------ // Sources of undefined values //------------------------------------------------------------------------ // Stack, 32-bit { volatile int undef_stack_int; fprintf(stderr, "\nUndef 1 of 8 (stack, 32 bit)\n"); x += (undef_stack_int == 0x12345678 ? 10 : 21); } // Stack, 32-bit, recently modified. Nb: we have to do the register // mucking about to make sure that the modification isn't fenced by a // store/load pair and thus not seen (see origin-not-quite.c). { volatile int undef_stack_int; register int modified_undef_stack_int; fprintf(stderr, "\nUndef 2 of 8 (stack, 32 bit)\n"); modified_undef_stack_int = undef_stack_int; modified_undef_stack_int++; x += (modified_undef_stack_int == 0x1234 ? 11 : 22); } // Stack, 64-bit. XXX: gets reported with two identical origins. { volatile long long undef_stack_longlong; fprintf(stderr, "\nUndef 3 of 8 (stack, 64 bit)\n"); x += (undef_stack_longlong == 0x1234567812345678LL ? 11 : 22); } // Malloc block, uninitialised, 32-bit { int* ptr_to_undef_malloc_int = malloc(sizeof(int)); int undef_malloc_int = *ptr_to_undef_malloc_int; fprintf(stderr, "\nUndef 4 of 8 (mallocd, 32-bit)\n"); x += (undef_malloc_int == 0x12345678 ? 12 : 23); } // Realloc block, uninitialised { int* ptr_to_undef_malloc_int2 = malloc(sizeof(int)); // Allocate a big chunk to ensure that a new block is allocated. int* ptr_to_undef_realloc_int = realloc(ptr_to_undef_malloc_int2, 4096); // Have to move past the first 4 bytes, which were copied from the // malloc'd block. int undef_realloc_int = *(ptr_to_undef_realloc_int+1); fprintf(stderr, "\nUndef 5 of 8 (realloc)\n"); x += (undef_realloc_int == 0x12345678 ? 13 : 24); } // Custom-allocated block, non-zeroed { int undef_custom_alloc_int; VALGRIND_MALLOCLIKE_BLOCK(&undef_custom_alloc_int, sizeof(int), /*rzB*/0, /*is_zeroed*/0); fprintf(stderr, "\nUndef 6 of 8 (MALLOCLIKE_BLOCK)\n"); x += (undef_custom_alloc_int == 0x12345678 ? 14 : 25); } // Heap segment (brk), uninitialised // CURRENTLY DISABLED. Why? // - On Darwin, sbrk() is implemented via vm_allocate() which always zeroes // its allocated memory. For a while we used use a separate .exp file // for Darwin, but we add an extra printf on Darwin only so that it // cannot be successfully matched on non-Darwin platforms. // - On Ubuntu 9.04 configured with --enable-only32bit, the brk symbol // shows up as "???" // - Our current treatment of brk is suspect; whole new pages allocated // with brk should arguably be marked defined -- see the big comment // above track_new_mem_brk() in memcheck/mc_main.c. //#if defined(VGO_darwin) fprintf(stderr, "\nUndef 7 of 8 (brk)\n"); // fprintf(stderr, "\n(no complaint; sbrk initialises memory on Darwin)\n"); fprintf(stderr, "\n(currently disabled)\n"); //#else // { // int* ptr_to_new_brk_limit = sbrk(4096); // int undef_brk_int = *ptr_to_new_brk_limit; // fprintf(stderr, "\nUndef 7 of 8 (brk)\n"); // x += (undef_brk_int == 0x12345678 ? 15 : 26); // } //#endif // User block, marked as undefined { int undef_user_int = 0; VALGRIND_MAKE_MEM_UNDEFINED(&undef_user_int, sizeof(int)); fprintf(stderr, "\nUndef 8 of 8 (MAKE_MEM_UNDEFINED)\n"); x += (undef_user_int == 0x12345678 ? 16 : 27); } //------------------------------------------------------------------------ // Sources of defined values //------------------------------------------------------------------------ // Heap block (calloc), initialised { int* ptr_to_def_calloc_int = calloc(1, sizeof(int)); int def_calloc_int = *ptr_to_def_calloc_int; fprintf(stderr, "\nDef 1 of 3\n"); x += (def_calloc_int == 0x12345678 ? 17 : 28); } // Custom-allocated block, non-zeroed { int def_custom_alloc_int = 0; fprintf(stderr, "\nDef 2 of 3\n"); VALGRIND_MALLOCLIKE_BLOCK(&def_custom_alloc_int, sizeof(int), /*rzB*/0, /*is_zeroed*/1); x += (def_custom_alloc_int == 0x12345678 ? 18 : 29); } // mmap block, initialised { int* ptr_to_def_mmap_int = mmap(0, 4096, PROT_READ, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); int def_mmap_int = *ptr_to_def_mmap_int; fprintf(stderr, "\nDef 3 of 3\n"); x += (def_mmap_int == 0x12345678 ? 19 : 30); } return x; }
void *stat_mem_reallocate(void *ptr, size_t size, size_t line, const char *file) { stat_mem_block_t *oldinfo = NULL; stat_mem_block_t *newinfo; if (GMQCC_UNLIKELY(!ptr)) return stat_mem_allocate(size, line, file); /* stay consistent with glibc */ if (GMQCC_UNLIKELY(!size)) { stat_mem_deallocate(ptr); return NULL; } oldinfo = ((stat_mem_block_t*)ptr - 1); newinfo = ((stat_mem_block_t*)malloc(sizeof(stat_mem_block_t) + size)); if (GMQCC_UNLIKELY(!newinfo)) { stat_mem_deallocate(ptr); return NULL; } VALGRIND_MALLOCLIKE_BLOCK(newinfo + 1, size, sizeof(stat_mem_block_t), 0); /* we need access to the old info redzone */ VALGRIND_MAKE_MEM_DEFINED(oldinfo, sizeof(stat_mem_block_t)); memcpy(newinfo+1, oldinfo+1, oldinfo->size); if (oldinfo->prev) { /* just need access for a short period */ VALGRIND_MAKE_MEM_DEFINED(oldinfo->prev, sizeof(stat_mem_block_t)); oldinfo->prev->next = oldinfo->next; /* don't need access anymore */ VALGRIND_MAKE_MEM_NOACCESS(oldinfo->prev, sizeof(stat_mem_block_t)); } if (oldinfo->next) { /* just need access for a short period */ VALGRIND_MAKE_MEM_DEFINED(oldinfo->next, sizeof(stat_mem_block_t)); oldinfo->next->prev = oldinfo->prev; /* don't need access anymore */ VALGRIND_MAKE_MEM_NOACCESS(oldinfo->next, sizeof(stat_mem_block_t)); } /* move ahead */ if (oldinfo == stat_mem_block_root) stat_mem_block_root = oldinfo->next; /* we need access to the redzone for the newinfo block */ VALGRIND_MAKE_MEM_DEFINED(newinfo, sizeof(stat_mem_block_t)); newinfo->line = line; newinfo->size = size; newinfo->file = file; newinfo->prev = NULL; newinfo->next = stat_mem_block_root; /* * likely since the only time there is no root is when it's * being initialized first. */ if (GMQCC_LIKELY(stat_mem_block_root != NULL)) { /* we need access to the root */ VALGRIND_MAKE_MEM_DEFINED(stat_mem_block_root, sizeof(stat_mem_block_t)); stat_mem_block_root->prev = newinfo; /* kill access */ VALGRIND_MAKE_MEM_NOACCESS(stat_mem_block_root, sizeof(stat_mem_block_t)); } stat_mem_block_root = newinfo; stat_mem_allocated -= oldinfo->size; stat_mem_high -= oldinfo->size; stat_mem_allocated += newinfo->size; stat_mem_high += newinfo->size; /* * we're finished with the redzones, lets kill the access * to them. */ VALGRIND_MAKE_MEM_NOACCESS(newinfo, sizeof(stat_mem_block_t)); VALGRIND_MAKE_MEM_NOACCESS(oldinfo, sizeof(stat_mem_block_t)); if (stat_mem_high > stat_mem_peak) stat_mem_peak = stat_mem_high; free(oldinfo); VALGRIND_FREELIKE_BLOCK(ptr, sizeof(stat_mem_block_t)); return newinfo + 1; }