apr_status_t heap_destroy(void *data) { heap_t *hp = (heap_t *)data; // break the memnode ring before freeing *hp->active->ref = NULL; apr_allocator_free(hp->allocator, hp->active); return APR_SUCCESS; }
static apr_status_t alloc_cleanup(void *data) { apr_bucket_alloc_t *list = data; apr_allocator_free(list->allocator, list->blocks); #if APR_POOL_DEBUG if (list->pool && list->allocator != apr_pool_allocator_get(list->pool)) { apr_allocator_destroy(list->allocator); } #endif return APR_SUCCESS; }
APU_DECLARE_NONSTD(void) apr_bucket_alloc_destroy(apr_bucket_alloc_t *list) { if (list->pool) { apr_pool_cleanup_kill(list->pool, list, alloc_cleanup); } apr_allocator_free(list->allocator, list->blocks); #if APR_POOL_DEBUG if (list->pool && list->allocator != apr_pool_allocator_get(list->pool)) { apr_allocator_destroy(list->allocator); } #endif }
APU_DECLARE_NONSTD(void) apr_bucket_free(void *mem) { node_header_t *node = (node_header_t *)((char *)mem - SIZEOF_NODE_HEADER_T); apr_bucket_alloc_t *list = node->alloc; if (node->size == SMALL_NODE_SIZE) { check_not_already_free(node); node->next = list->freelist; list->freelist = node; } else { apr_allocator_free(list->allocator, node->memnode); } }
void buffer_resize(buffer_t *self, int avail) { int size, new_size; apr_memnode_t *new_node; if (buffer_available(self) >= avail) return; size = buffer_len(self); new_size = size + avail; new_node = apr_allocator_alloc(self->allocator, new_size); memcpy((char *)new_node + APR_MEMNODE_T_SIZE, buffer_ptr(self), size); apr_allocator_free(self->allocator, self->node); self->node = new_node; self->offset = 0; }
apr_status_t heap_gc(heap_t *hp, term_t *roots[], int root_sizes[], int nroots) { apr_memnode_t *saved_active; apr_memnode_t *gc_node, *copy_node; int node_size; int i, j; if (hp->gc_last == NULL) // gc never run gc_node = hp->active; else gc_node = hp->gc_last->next; node_size = node_alloc_size(gc_node); // if gc_last node has enough space then use it for // live term copies, otherwise, create a new node // NB: gc_last may point to gc_node if (hp->gc_last != NULL && hp->gc_last != gc_node && node_free_space(hp->gc_last) >= node_size) copy_node = hp->gc_last; else copy_node = apr_allocator_alloc(hp->allocator, node_size); // temporarily make copy_node active; restore later saved_active = hp->active; hp->active = copy_node; hp->hend = heap_htop(hp); // save gc_node reference for seek_alive; // non-NULL gc_spot means gc in progress hp->gc_spot = gc_node; for (i = 0; i < nroots; i++) for (j = 0; j < root_sizes[i]; j++) seek_live(&roots[i][j], saved_active, hp); assert(hp->active == copy_node); // no overflow hp->gc_spot = NULL; // restore active node if (saved_active != gc_node) hp->active = saved_active; // insert copy_node into the ring: // if gc_node is the last node left // if copy_node is non-empty and was just created; // free copy_node if it was just created // and not put on the list if (gc_node->next == gc_node || (node_alloc_size(copy_node) > 0 && copy_node != hp->gc_last)) { list_insert(copy_node, gc_node); hp->gc_last = copy_node; } else if (copy_node != hp->gc_last) { if (hp->active == copy_node) hp->active = gc_node->next; apr_allocator_free(hp->allocator, copy_node); } hp->alloc_size -= node_alloc_size(gc_node); // reclaim memory list_remove(gc_node); gc_node->next = NULL; apr_allocator_free(hp->allocator, gc_node); // after gc is run, anticipated need is zero hp->hend = heap_htop(hp); return APR_SUCCESS; }