generic * gp_realloc(generic *old, size_t size, const char *usage) { if (!old) return gp_alloc(size, usage); validate(old); /* if block gets moved, old block is marked free. If not, we'll * remark it later */ mark_free(old); { struct frame_struct *p = (struct frame_struct *) old - 1; size_t total = size + RESERVED_SIZE + 1; p = realloc(p, total); if (!p) int_error(NO_CARET, "Out of memory"); TRACE_ALLOC(("gp_realloc %d for %s (was %d)\n", (int) size, usage ? usage : "<unknown>", p->requested_size)); bytes_allocated += size - p->requested_size; mark(p, size, usage); return (generic *) (p + 1); } }
cell_t *alloc_array(secd_t *secd, size_t size) { /* look through the list of arrays */ cell_t *cur = secd->arrlist; while (not_nil(mcons_next(cur))) { if (is_array_free(secd, cur)) { size_t cursize = arrmeta_size(secd, cur); if (cursize >= size) { /* allocate this gap */ if (cursize > size + 1) { /* make a free gap after */ cell_t *newmeta = cur + size + 1; cell_t *prevmeta = mcons_prev(cur); init_meta(secd, newmeta, prevmeta, cur); cur->as.mcons.prev = newmeta; prevmeta->as.mcons.next = newmeta; mark_free(newmeta, true); } mark_free(cur, false); return meta_mem(cur); } } cur = mcons_next(cur); } /* no chunks of sufficient size found, move secd->arrayptr */ if (secd->arrayptr - secd->fixedptr <= (int)size) return &secd_out_of_memory; /* create new metadata cons at arrayptr - size - 1 */ cell_t *oldmeta = secd->arrayptr; cell_t *meta = oldmeta - size - 1; init_meta(secd, meta, oldmeta, SECD_NIL); oldmeta->as.mcons.next = meta; secd->arrayptr = meta; memdebugf("NEW ARR[%ld], size %zd\n", cell_index(secd, meta), size); mark_free(meta, false); return meta_mem(meta); }
void free_array(secd_t *secd, cell_t *mem) { assertv(mem <= secd->arrlist, "free_array: tried to free arrlist"); assertv(secd->arrayptr < mem, "free_array: not an array"); cell_t *meta = arr_meta(mem); cell_t *prev = mcons_prev(meta); assertv(meta->nref == 0, "free_array: someone seems to still use the array"); mark_free(meta, true); if (meta != secd->arrayptr) { if (is_array_free(secd, prev)) { /* merge with the previous array */ cell_t *pprev = prev->as.mcons.prev; pprev->as.mcons.next = meta; meta->as.mcons.prev = pprev; } cell_t *next = mcons_next(meta); if (is_array_free(secd, next)) { /* merge with the next array */ cell_t *newprev = meta->as.mcons.prev; next->as.mcons.prev = newprev; newprev->as.mcons.next = next; } mark_free(meta, true); } else { /* move arrayptr into the array area */ prev->as.mcons.next = SECD_NIL; secd->arrayptr = prev; if (is_array_free(secd, prev)) { /* at most one array after 'arr' may be free */ cell_t *pprev = prev->as.mcons.prev; pprev->as.mcons.next = SECD_NIL; secd->arrayptr = pprev; } } memdebugf("FREE ARR[%ld]", cell_index(secd, meta)); }
void checked_free(generic *p) { struct frame_struct *frame; validate(p); mark_free(p); /* trap attempts to free twice */ frame = (struct frame_struct *) p - 1; TRACE_ALLOC(("free %d for %s\n", frame->requested_size, (frame->use ? frame->use : "(NULL)"))); bytes_allocated -= frame->requested_size; free(frame); }
static void insert_block(void *ptr) { void *next = NULL; void *prev = NULL; mark_free(ptr); #ifdef CLOBBER_FREED_MEMORY memset(ptr, 0x7F, get_size(ptr)); #endif SpinLockAcquire(&ShemDynAllocShmem->mutex); if (!ShemDynAllocShmem->head) { ShemDynAllocShmem->head = ptr; ShemDynAllocShmem->tail = ptr; } else { if ((intptr_t) ptr < (intptr_t) ShemDynAllocShmem->head) { next = ShemDynAllocShmem->head; ShemDynAllocShmem->head = ptr; } else if ((intptr_t) ptr > (intptr_t) ShemDynAllocShmem->tail) { prev = ShemDynAllocShmem->tail; ShemDynAllocShmem->tail = ptr; } else { prev = ShemDynAllocShmem->head; while (prev) { next = get_next(prev); if (((intptr_t) ptr > (intptr_t) prev) && ((intptr_t) ptr < (intptr_t) next)) break; prev = next; } // Assert(prev != NULL); // Assert(next != NULL); } } set_prev(ptr, prev); set_next(ptr, next); if (prev) { set_next(prev, ptr); if (coalesce_blocks(prev, ptr)) { if (ShemDynAllocShmem->tail == ptr) ShemDynAllocShmem->tail = prev; ptr = prev; } } if (next) { set_prev(next, ptr); coalesce_blocks(ptr, next); } SpinLockRelease(&ShemDynAllocShmem->mutex); }