void dm_pool_free(struct dm_pool *p, void *ptr) { struct chunk *c = p->chunk; while (c) { if (((char *) c < (char *) ptr) && ((char *) c->end > (char *) ptr)) { c->begin = ptr; #ifdef VALGRIND_POOL VALGRIND_MAKE_MEM_NOACCESS(c->begin, c->end - c->begin); #endif break; } if (p->spare_chunk) _free_chunk(p->spare_chunk); c->begin = (char *) (c + 1); #ifdef VALGRIND_POOL VALGRIND_MAKE_MEM_NOACCESS(c->begin, c->end - c->begin); #endif p->spare_chunk = c; c = c->prev; } if (!c) log_error(INTERNAL_ERROR "pool_free asked to free pointer " "not in pool"); else p->chunk = c; }
static void stat_dump_mem_leaks(void) { stat_mem_block_t *info; /* we need access to the root for this */ VALGRIND_MAKE_MEM_DEFINED(stat_mem_block_root, sizeof(stat_mem_block_t)); for (info = stat_mem_block_root; info; info = info->next) { /* we need access to the block */ VALGRIND_MAKE_MEM_DEFINED(info, sizeof(stat_mem_block_t)); con_out("lost: %u (bytes) at %s:%u from expression `%s`\n", info->size, info->file, info->line, info->expr ); stat_dump_mem_contents(info, OPTS_OPTION_U16(OPTION_MEMDUMPCOLS)); /* * we're finished with the access, the redzone should be marked * inaccesible so that invalid read/writes that could 'step-into' * those redzones will show up as invalid read/writes in valgrind. */ VALGRIND_MAKE_MEM_NOACCESS(info, sizeof(stat_mem_block_t)); } VALGRIND_MAKE_MEM_NOACCESS(stat_mem_block_root, sizeof(stat_mem_block_t)); }
void ContextMemoryManager::newChunk() { // Increment index to chunk list ++d_indexChunkList; Assert(d_chunkList.size() == d_indexChunkList, "Index should be at the end of the list"); // Create new chunk if no free chunk available if(d_freeChunks.empty()) { d_chunkList.push_back((char*)malloc(chunkSizeBytes)); if(d_chunkList.back() == NULL) { throw std::bad_alloc(); } #ifdef CVC4_VALGRIND VALGRIND_MAKE_MEM_NOACCESS(d_chunkList.back(), chunkSizeBytes); #endif /* CVC4_VALGRIND */ } // If there is a free chunk, use that else { d_chunkList.push_back(d_freeChunks.back()); d_freeChunks.pop_back(); } // Set up the current chunk pointers d_nextFree = d_chunkList.back(); d_endChunk = d_nextFree + chunkSizeBytes; }
static size_t sec_allocated (Block *block, void *memory) { Cell *cell; word_t *word; ASSERT (block); ASSERT (memory); word = memory; --word; #ifdef WITH_VALGRIND VALGRIND_MAKE_MEM_DEFINED (word, sizeof (word_t)); #endif /* Lookup the meta for this memory block (using guard pointer) */ ASSERT (sec_is_valid_word (block, word)); ASSERT (pool_valid (*word)); cell = *word; sec_check_guards (cell); ASSERT (cell->next == NULL); ASSERT (cell->prev == NULL); ASSERT (cell->allocated > 0); #ifdef WITH_VALGRIND VALGRIND_MAKE_MEM_NOACCESS (word, sizeof (word_t)); #endif return cell->allocated; }
/* * A basic header of information wrapper allocator. Simply stores * information as a header, returns the memory + 1 past it, can be * retrieved again with - 1. Where type is stat_mem_block_t*. */ void *stat_mem_allocate(size_t size, size_t line, const char *file, const char *expr) { stat_mem_block_t *info = (stat_mem_block_t*)malloc(size + IDENT_MEM_TOP); void *data = (void *)((char*)info + IDENT_MEM_TOP); if(GMQCC_UNLIKELY(!info)) return NULL; info->line = line; info->size = size; info->file = file; info->expr = expr; info->prev = NULL; info->next = stat_mem_block_root; /* Write identifier */ memcpy(info + 1, IDENT_MEM, IDENT_SIZE); /* likely since it only happens once */ if (GMQCC_LIKELY(stat_mem_block_root != NULL)) { VALGRIND_MAKE_MEM_DEFINED(stat_mem_block_root, IDENT_MEM_TOP); stat_mem_block_root->prev = info; VALGRIND_MAKE_MEM_NOACCESS(stat_mem_block_root, IDENT_MEM_TOP); } stat_mem_block_root = info; stat_mem_allocated += size; stat_mem_high += size; stat_mem_allocated_total ++; if (stat_mem_high > stat_mem_peak) stat_mem_peak = stat_mem_high; VALGRIND_MALLOCLIKE_BLOCK(data, size, IDENT_MEM_TOP, 0); return data; }
void _cairo_region_fini (cairo_region_t *region) { assert (! CAIRO_REFERENCE_COUNT_HAS_REFERENCE (®ion->ref_count)); pixman_region32_fini (®ion->rgn); VG (VALGRIND_MAKE_MEM_NOACCESS (region, sizeof (cairo_region_t))); }
static inline void sec_write_guards (Cell *cell) { #ifdef WITH_VALGRIND VALGRIND_MAKE_MEM_UNDEFINED (cell->words, sizeof (word_t)); VALGRIND_MAKE_MEM_UNDEFINED (cell->words + cell->n_words - 1, sizeof (word_t)); #endif ((void**)cell->words)[0] = (void*)cell; ((void**)cell->words)[cell->n_words - 1] = (void*)cell; #ifdef WITH_VALGRIND VALGRIND_MAKE_MEM_NOACCESS (cell->words, sizeof (word_t)); VALGRIND_MAKE_MEM_NOACCESS (cell->words + cell->n_words - 1, sizeof (word_t)); #endif }
void ContextMemoryManager::pop() { #ifdef CVC4_VALGRIND for (auto allocation : d_allocations.back()) { VALGRIND_MEMPOOL_FREE(this, allocation); } d_allocations.pop_back(); #endif /* CVC4_VALGRIND */ Assert(d_nextFreeStack.size() > 0 && d_endChunkStack.size() > 0); // Restore state from stack d_nextFree = d_nextFreeStack.back(); d_nextFreeStack.pop_back(); d_endChunk = d_endChunkStack.back(); d_endChunkStack.pop_back(); // Free all the new chunks since the last push while(d_indexChunkList > d_indexChunkListStack.back()) { d_freeChunks.push_back(d_chunkList.back()); #ifdef CVC4_VALGRIND VALGRIND_MAKE_MEM_NOACCESS(d_chunkList.back(), chunkSizeBytes); #endif /* CVC4_VALGRIND */ d_chunkList.pop_back(); --d_indexChunkList; } d_indexChunkListStack.pop_back(); // Delete excess free chunks while(d_freeChunks.size() > maxFreeChunks) { free(d_freeChunks.front()); d_freeChunks.pop_front(); } }
void * _cairo_freepool_alloc_from_new_pool (cairo_freepool_t *freepool) { cairo_freelist_pool_t *pool; int poolsize; if (freepool->pools != &freepool->embedded_pool) poolsize = 2 * freepool->pools->size; else poolsize = (128 * freepool->nodesize + 8191) & -8192; pool = malloc (sizeof (cairo_freelist_pool_t) + poolsize); if (unlikely (pool == NULL)) return pool; pool->next = freepool->pools; freepool->pools = pool; pool->size = poolsize; pool->rem = poolsize - freepool->nodesize; pool->data = (uint8_t *) (pool + 1) + freepool->nodesize; VG (VALGRIND_MAKE_MEM_NOACCESS (pool->data, poolsize)); VG (VALGRIND_MAKE_MEM_UNDEFINED (pool->data, freepool->nodesize)); return pool + 1; }
static Cell* sec_neighbor_after (Block *block, Cell *cell) { word_t *word; ASSERT (cell); ASSERT (block); word = cell->words + cell->n_words; if (!sec_is_valid_word (block, word)) return NULL; #ifdef WITH_VALGRIND VALGRIND_MAKE_MEM_DEFINED (word, sizeof (word_t)); #endif cell = *word; sec_check_guards (cell); #ifdef WITH_VALGRIND VALGRIND_MAKE_MEM_NOACCESS (word, sizeof (word_t)); #endif return cell; }
static inline void sec_check_guards (Cell *cell) { #ifdef WITH_VALGRIND VALGRIND_MAKE_MEM_DEFINED (cell->words, sizeof (word_t)); VALGRIND_MAKE_MEM_DEFINED (cell->words + cell->n_words - 1, sizeof (word_t)); #endif ASSERT(((void**)cell->words)[0] == (void*)cell); ASSERT(((void**)cell->words)[cell->n_words - 1] == (void*)cell); #ifdef WITH_VALGRIND VALGRIND_MAKE_MEM_NOACCESS (cell->words, sizeof (word_t)); VALGRIND_MAKE_MEM_NOACCESS (cell->words + cell->n_words - 1, sizeof (word_t)); #endif }
/* * A basic header of information wrapper allocator. Simply stores * information as a header, returns the memory + 1 past it, can be * retrieved again with - 1. Where type is stat_mem_block_t*. */ void *stat_mem_allocate(size_t size, size_t line, const char *file) { stat_mem_block_t *info = (stat_mem_block_t*)malloc(sizeof(stat_mem_block_t) + size); void *data = (void*)(info + 1); if(GMQCC_UNLIKELY(!info)) return NULL; info->line = line; info->size = size; info->file = file; info->prev = NULL; info->next = stat_mem_block_root; /* likely since it only happens once */ if (GMQCC_LIKELY(stat_mem_block_root != NULL)) { VALGRIND_MAKE_MEM_DEFINED(stat_mem_block_root, sizeof(stat_mem_block_t)); stat_mem_block_root->prev = info; VALGRIND_MAKE_MEM_NOACCESS(stat_mem_block_root, sizeof(stat_mem_block_t)); } stat_mem_block_root = info; stat_mem_allocated += size; stat_mem_high += size; stat_mem_allocated_total ++; if (stat_mem_high > stat_mem_peak) stat_mem_peak = stat_mem_high; VALGRIND_MALLOCLIKE_BLOCK(data, size, sizeof(stat_mem_block_t), 0); return data; }
void dm_pool_abandon_object(struct dm_pool *p) { #ifdef VALGRIND_POOL VALGRIND_MAKE_MEM_NOACCESS(p->chunk, p->object_len); #endif p->object_len = 0; p->object_alignment = DEFAULT_ALIGNMENT; }
void _cairo_polygon_fini (cairo_polygon_t *polygon) { if (polygon->edges != polygon->edges_embedded) xmemory_free (polygon->edges); VG (VALGRIND_MAKE_MEM_NOACCESS (polygon, sizeof (cairo_polygon_t))); }
void valgrindMakeMemNoaccess(uintptr_t address, uintptr_t size) { #if defined(VALGRIND_REQUEST_LOGS) VALGRIND_PRINTF_BACKTRACE("Marking an area as noaccess at 0x%lx of size %lu\n", address, size); #endif /* defined(VALGRIND_REQUEST_LOGS) */ VALGRIND_MAKE_MEM_NOACCESS(address, size); }
void _cairo_pen_fini (cairo_pen_t *pen) { if (pen->vertices != pen->vertices_embedded) free (pen->vertices); VG (VALGRIND_MAKE_MEM_NOACCESS (pen, sizeof (cairo_pen_t))); }
void reset() { next->reset(); eden.reset(); #ifdef HAVE_VALGRIND_H VALGRIND_MAKE_MEM_NOACCESS(next->start().as_int(), next->size()); VALGRIND_MAKE_MEM_DEFINED(current->start().as_int(), current->size()); #endif }
void _cairo_freelist_free (cairo_freelist_t *freelist, void *voidnode) { cairo_freelist_node_t *node = voidnode; if (node) { node->next = freelist->first_free_node; freelist->first_free_node = node; VG (VALGRIND_MAKE_MEM_NOACCESS (node, freelist->nodesize)); } }
void _cairo_stroke_style_fini (cairo_stroke_style_t *style) { free (style->dash); style->dash = NULL; style->num_dashes = 0; VG (VALGRIND_MAKE_MEM_NOACCESS (style, sizeof (cairo_stroke_style_t))); }
/* * bucket_vg_mark_noaccess -- (internal) marks memory block as no access for vg */ static void bucket_vg_mark_noaccess(struct palloc_heap *heap, struct block_container *bc, struct memory_block m) { if (On_valgrind) { size_t rsize = m.size_idx * bc->unit_size; void *block_data = heap_get_block_data(heap, m); VALGRIND_MAKE_MEM_NOACCESS(block_data, rsize); } }
void slist_alloc(struct simple_list* slist, size_t growth) { const size_t delta = slist->element_size + gutter; slist->capacity += growth; slist->key = realloc(slist->key, slist->key_size * slist->capacity); slist->value = realloc(slist->value, delta * slist->capacity); #ifndef NVALGRIND VALGRIND_MAKE_MEM_NOACCESS(slist->value, delta * slist->capacity); #endif }
void _cairo_freepool_fini (cairo_freepool_t *freepool) { cairo_freelist_pool_t *pool = freepool->pools; while (pool != &freepool->embedded_pool) { cairo_freelist_pool_t *next = pool->next; free (pool); pool = next; } VG (VALGRIND_MAKE_MEM_NOACCESS (freepool, sizeof (freepool))); }
void stat_mem_deallocate(void *ptr) { stat_mem_block_t *info = NULL; if (GMQCC_UNLIKELY(!ptr)) return; info = ((stat_mem_block_t*)ptr - 1); /* * we need access to the redzone that represents the info block * so lets do that. */ VALGRIND_MAKE_MEM_DEFINED(info, sizeof(stat_mem_block_t)); stat_mem_deallocated += info->size; stat_mem_high -= info->size; stat_mem_deallocated_total ++; if (info->prev) { /* just need access for a short period */ VALGRIND_MAKE_MEM_DEFINED(info->prev, sizeof(stat_mem_block_t)); info->prev->next = info->next; /* don't need access anymore */ VALGRIND_MAKE_MEM_NOACCESS(info->prev, sizeof(stat_mem_block_t)); } if (info->next) { /* just need access for a short period */ VALGRIND_MAKE_MEM_DEFINED(info->next, sizeof(stat_mem_block_t)); info->next->prev = info->prev; /* don't need access anymore */ VALGRIND_MAKE_MEM_NOACCESS(info->next, sizeof(stat_mem_block_t)); } /* move ahead */ if (info == stat_mem_block_root) stat_mem_block_root = info->next; free(info); VALGRIND_MAKE_MEM_NOACCESS(info, sizeof(stat_mem_block_t)); VALGRIND_FREELIKE_BLOCK(ptr, sizeof(stat_mem_block_t)); }
void pop(pool *p) { level_list *l = p->levels; p->levels = l->next; VALGRIND_DESTROY_MEMPOOL(l->where); VALGRIND_MAKE_MEM_NOACCESS(l->where, p->where-l->where); p->where = l->where; if(USE_MMAP) munmap(l, sizeof(level_list)); else free(l); }
void BakerGC::reset() { check_growth_finish(); next->reset(); eden->reset(); #ifdef HAVE_VALGRIND_H (void)VALGRIND_MAKE_MEM_NOACCESS(next->start().as_int(), next->size()); (void)VALGRIND_MAKE_MEM_DEFINED(current->start().as_int(), current->size()); #endif mprotect(next->start(), next->size(), PROT_NONE); mprotect(current->start(), current->size(), PROT_READ | PROT_WRITE); }
static inline void sec_clear_noaccess (void *memory, size_t from, size_t to) { char *ptr = memory; ASSERT (from <= to); #ifdef WITH_VALGRIND VALGRIND_MAKE_MEM_UNDEFINED (ptr + from, to - from); #endif memset (ptr + from, 0, to - from); #ifdef WITH_VALGRIND VALGRIND_MAKE_MEM_NOACCESS (ptr + from, to - from); #endif }
struct pool * allocate_pool() { struct pool *p = malloc(sizeof(struct pool)); assert(p); p->allocated = 4096; p->used = 0; p->buf = malloc(p->allocated); assert(p->buf); memset(p->buf, 0, p->allocated); VALGRIND_CREATE_MEMPOOL(p, 0, 0); VALGRIND_MAKE_MEM_NOACCESS(p->buf, p->allocated); return p; }
void _cairo_freepool_init (cairo_freepool_t *freepool, unsigned nodesize) { freepool->first_free_node = NULL; freepool->pools = &freepool->embedded_pool; freepool->nodesize = nodesize; freepool->embedded_pool.next = NULL; freepool->embedded_pool.size = sizeof (freepool->embedded_data); freepool->embedded_pool.rem = sizeof (freepool->embedded_data); freepool->embedded_pool.data = freepool->embedded_data; VG (VALGRIND_MAKE_MEM_NOACCESS (freepool->embedded_data, sizeof (freepool->embedded_data))); }
void* get_superblock(void) { void* p = mmap( 0, SUPERBLOCK_SIZE, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0 ); assert(p != ((void*)(-1))); // Mark it no access; although it's addressible we don't want the // program to be using it unless its handed out by custom_alloc() // with redzones, better not to have it VALGRIND_MAKE_MEM_NOACCESS(p, SUPERBLOCK_SIZE); return p; }
ContextMemoryManager::ContextMemoryManager() : d_indexChunkList(0) { // Create initial chunk d_chunkList.push_back((char*)malloc(chunkSizeBytes)); d_nextFree = d_chunkList.back(); if(d_nextFree == NULL) { throw std::bad_alloc(); } d_endChunk = d_nextFree + chunkSizeBytes; #ifdef CVC4_VALGRIND VALGRIND_CREATE_MEMPOOL(this, 0, false); VALGRIND_MAKE_MEM_NOACCESS(d_nextFree, chunkSizeBytes); d_allocations.push_back(std::vector<char*>()); #endif /* CVC4_VALGRIND */ }