/* Release all memory acquired by this allocator. */ static void gs_heap_free_all(gs_memory_t * mem, uint free_mask, client_name_t cname) { gs_malloc_memory_t *const mmem = (gs_malloc_memory_t *) mem; gx_monitor_t *mon = mmem->monitor; /* * We don't perform locking during this process since the 'monitor' * is contained in this allocator, and will get freed along the way. * It is only called at exit, and there better not be any threads * accessing this allocator. */ mmem->monitor = NULL; /* delete reference to this monitor */ gx_monitor_free(mon); /* free the monitor */ if (free_mask & FREE_ALL_DATA) { gs_malloc_block_t *bp = mmem->allocated; gs_malloc_block_t *np; for (; bp != 0; bp = np) { np = bp->next; if_debug3('a', "[a]gs_heap_free_all(%s) 0x%lx(%u)\n", client_name_string(bp->cname), (ulong) (bp + 1), bp->size); gs_alloc_fill(bp + 1, gs_alloc_fill_free, bp->size); free(bp); } } if (free_mask & FREE_ALL_ALLOCATOR) free(mem); }
/* from any other structures (like a hash table). */ void gx_bits_cache_free(gx_bits_cache * bc, gx_cached_bits_head * cbh, gx_bits_cache_chunk * bck) { uint size = cbh->size; bc->csize--; bc->bsize -= size; bck->allocated -= size; gs_alloc_fill(cbh, gs_alloc_fill_deleted, size); cbh->size = size; /* gs_alloc_fill may have overwritten */ cb_head_set_free(cbh); }
/* or to 0 if we are at the end of the chunk, and return -1. */ int gx_bits_cache_alloc(gx_bits_cache * bc, uint32_t lsize, gx_cached_bits_head ** pcbh) { #define ssize ((uint)lsize) uint32_t lsize1 = lsize + sizeof(gx_cached_bits_head); #define ssize1 ((uint)lsize1) uint cnext = bc->cnext; gx_bits_cache_chunk *bck = bc->chunks; uint left = bck->size - cnext; gx_cached_bits_head *cbh; gx_cached_bits_head *cbh_next; uint fsize = 0; if (lsize1 > bck->size - cnext && lsize != left) { /* Not enough room to allocate in this chunk. */ *pcbh = 0; return -1; } /* Look for and/or free enough space. */ cbh = cbh_next = (gx_cached_bits_head *) (bck->data + cnext); while (fsize < ssize1 && fsize != ssize) { if (!cb_head_is_free(cbh_next)) { /* Ask the caller to free the entry. */ if (fsize) cbh->size = fsize; *pcbh = cbh_next; return -1; } fsize += cbh_next->size; if_debug2('K', "[K]merging free bits 0x%lx(%u)\n", (uint32_t) cbh_next, cbh_next->size); cbh_next = (gx_cached_bits_head *) ((byte *) cbh + fsize); } if (fsize > ssize) { /* fsize >= ssize1 */ cbh_next = (gx_cached_bits_head *) ((byte *) cbh + ssize); cbh_next->size = fsize - ssize; cb_head_set_free(cbh_next); if_debug2('K', "[K]shortening bits 0x%lx by %u (initial)\n", (uint32_t) cbh, fsize - ssize); } gs_alloc_fill(cbh, gs_alloc_fill_block, ssize); cbh->size = ssize; bc->bsize += ssize; bc->csize++; bc->cnext += ssize; bck->allocated += ssize; *pcbh = cbh; return 0; #undef ssize #undef ssize1 }
static void * gs_heap_resize_object(gs_memory_t * mem, void *obj, uint new_num_elements, client_name_t cname) { gs_malloc_memory_t *mmem = (gs_malloc_memory_t *) mem; gs_malloc_block_t *ptr = (gs_malloc_block_t *) obj - 1; gs_memory_type_ptr_t pstype = ptr->type; uint old_size = gs_object_size(mem, obj) + sizeof(gs_malloc_block_t); uint new_size = gs_struct_type_size(pstype) * new_num_elements + sizeof(gs_malloc_block_t); gs_malloc_block_t *new_ptr; if (new_size == old_size) return obj; if (mmem->monitor) gx_monitor_enter(mmem->monitor); /* Exclusive access */ new_ptr = (gs_malloc_block_t *) gs_realloc(ptr, old_size, new_size); if (new_ptr == 0) return 0; if (new_ptr->prev) new_ptr->prev->next = new_ptr; else mmem->allocated = new_ptr; if (new_ptr->next) new_ptr->next->prev = new_ptr; new_ptr->size = new_size - sizeof(gs_malloc_block_t); mmem->used -= old_size; mmem->used += new_size; if (mmem->monitor) gx_monitor_leave(mmem->monitor); /* Done with exclusive access */ if (new_size > old_size) gs_alloc_fill((byte *) new_ptr + old_size, gs_alloc_fill_alloc, new_size - old_size); return new_ptr + 1; }
static void gs_heap_free_object(gs_memory_t * mem, void *ptr, client_name_t cname) { gs_malloc_memory_t *mmem = (gs_malloc_memory_t *) mem; gs_malloc_block_t *bp; gs_memory_type_ptr_t pstype; struct_proc_finalize((*finalize)); if_debug3('a', "[a-]gs_free(%s) 0x%lx(%u)\n", client_name_string(cname), (ulong) ptr, (ptr == 0 ? 0 : ((gs_malloc_block_t *) ptr)[-1].size)); if (ptr == 0) return; pstype = ((gs_malloc_block_t *) ptr)[-1].type; finalize = pstype->finalize; if (finalize != 0) { if_debug3('u', "[u]finalizing %s 0x%lx (%s)\n", struct_type_name_string(pstype), (ulong) ptr, client_name_string(cname)); (*finalize) (ptr); } if (mmem->monitor) gx_monitor_enter(mmem->monitor); /* Exclusive access */ bp = mmem->allocated; /* If 'finalize' releases a memory, this function could be called recursively and change mmem->allocated. */ if (ptr == bp + 1) { mmem->allocated = bp->next; mmem->used -= bp->size + sizeof(gs_malloc_block_t); if (mmem->allocated) mmem->allocated->prev = 0; if (mmem->monitor) gx_monitor_leave(mmem->monitor); /* Done with exclusive access */ gs_alloc_fill(bp, gs_alloc_fill_free, bp->size + sizeof(gs_malloc_block_t)); free(bp); } else { gs_malloc_block_t *np; /* * bp == 0 at this point is an error, but we'd rather have an * error message than an invalid access. */ if (bp) { for (; (np = bp->next) != 0; bp = np) { if (ptr == np + 1) { bp->next = np->next; if (np->next) np->next->prev = bp; mmem->used -= np->size + sizeof(gs_malloc_block_t); if (mmem->monitor) gx_monitor_leave(mmem->monitor); /* Done with exclusive access */ gs_alloc_fill(np, gs_alloc_fill_free, np->size + sizeof(gs_malloc_block_t)); free(np); return; } } } if (mmem->monitor) gx_monitor_leave(mmem->monitor); /* Done with exclusive access */ lprintf2("%s: free 0x%lx not found!\n", client_name_string(cname), (ulong) ptr); free((char *)((gs_malloc_block_t *) ptr - 1)); } }
/* Allocate various kinds of blocks. */ static byte * gs_heap_alloc_bytes(gs_memory_t * mem, uint size, client_name_t cname) { gs_malloc_memory_t *mmem = (gs_malloc_memory_t *) mem; byte *ptr = 0; #ifdef DEBUG const char *msg; static const char *const ok_msg = "OK"; # define set_msg(str) (msg = (str)) #else # define set_msg(str) DO_NOTHING #endif /* Exclusive acces so our decisions and changes are 'atomic' */ if (mmem->monitor) gx_monitor_enter(mmem->monitor); if (size > mmem->limit - sizeof(gs_malloc_block_t)) { /* Definitely too large to allocate; also avoids overflow. */ set_msg("exceeded limit"); } else { uint added = size + sizeof(gs_malloc_block_t); if (mmem->limit - added < mmem->used) set_msg("exceeded limit"); else if ((ptr = (byte *) malloc(added)) == 0) set_msg("failed"); else { gs_malloc_block_t *bp = (gs_malloc_block_t *) ptr; /* * We would like to check that malloc aligns blocks at least as * strictly as the compiler (as defined by ARCH_ALIGN_MEMORY_MOD). * However, Microsoft VC 6 does not satisfy this requirement. * See gsmemory.h for more explanation. */ set_msg(ok_msg); if (mmem->allocated) mmem->allocated->prev = bp; bp->next = mmem->allocated; bp->prev = 0; bp->size = size; bp->type = &st_bytes; bp->cname = cname; mmem->allocated = bp; ptr = (byte *) (bp + 1); mmem->used += size + sizeof(gs_malloc_block_t); if (mmem->used > mmem->max_used) mmem->max_used = mmem->used; } } if (mmem->monitor) gx_monitor_leave(mmem->monitor); /* Done with exclusive access */ /* We don't want to 'fill' under mutex to keep the window smaller */ if (ptr) gs_alloc_fill(ptr, gs_alloc_fill_alloc, size); #ifdef DEBUG if (gs_debug_c('a') || msg != ok_msg) dlprintf4("[a+]gs_malloc(%s)(%u) = 0x%lx: %s\n", client_name_string(cname), size, (ulong) ptr, msg); #endif return ptr; #undef set_msg }