/** * This will defrag free blocks that are contiguous in the memory */ void rhock_defrag() { rhock_memory_addr addr = 0; rhock_memory_addr last_free = RHOCK_NULL; while (addr < RHOCK_MEMORY) { struct rhock_memory_chunk *c = CHUNK_AT(addr); if (CHUNK_FREE(c)) { // If a chunk is free, try to defrag it with the last free if it directly // follow it if (last_free == RHOCK_NULL) { last_free = addr; } else { struct rhock_memory_chunk *merge = CHUNK_AT(last_free); merge->size += CHUNK_HEADER+c->size; } } else { last_free = RHOCK_NULL; // This will happen when reallocating a block which is directly followed by // free space if (c->next == addr+CHUNK_HEADER+c->size) { struct rhock_memory_chunk *merge = CHUNK_AT(addr+CHUNK_HEADER+c->size); c->size += CHUNK_HEADER+merge->size; c->next = merge->next; } } addr += CHUNK_HEADER + c->size; } }
static void gnm_cell_copy_free (GnmCellCopy *cc) { if (cc->texpr) { gnm_expr_top_unref (cc->texpr); cc->texpr = NULL; } value_release (cc->val); cc->val = NULL; CHUNK_FREE (cell_copy_pool, cc); }
static inline void free_avl_node (avl_tree_t *tree, avl_node_t *node) { CHUNK_FREE(tree, node); tree->n--; }
/** * Go through the freelists and free puddles with no used chunks. * * @return * Number of freed puddles. */ static size_t mempool_free_puddles (mempool_struct *pool) { size_t chunksize_real, nrof_arrays, i, j, freed; mempool_chunk_struct *last_free, *chunk; mempool_puddle_struct *puddle, *next_puddle; HARD_ASSERT(pool != NULL); if (pool->flags & MEMPOOL_BYPASS_POOLS) { return 0; } freed = 0; for (i = 0; i < MEMPOOL_NROF_FREELISTS; i++) { chunksize_real = sizeof(mempool_chunk_struct) + (pool->chunksize << i); nrof_arrays = pool->expand_size >> i; /* Free empty puddles and setup puddle-local freelists */ for (puddle = pool->puddlelist[i], pool->puddlelist[i] = NULL; puddle != NULL; puddle = next_puddle) { next_puddle = puddle->next; /* Count free chunks in puddle, and set up a local freelist */ puddle->first_free = puddle->last_free = NULL; puddle->nrof_free = 0; for (j = 0; j < nrof_arrays; j++) { chunk = (mempool_chunk_struct *) (((char *) puddle->first_chunk) + chunksize_real * j); /* Find free chunks. */ if (CHUNK_FREE(MEM_USERDATA(chunk))) { if (puddle->nrof_free == 0) { puddle->first_free = chunk; puddle->last_free = chunk; chunk->next = NULL; } else { chunk->next = puddle->first_free; puddle->first_free = chunk; } puddle->nrof_free++; } } /* Can we actually free this puddle? */ if (puddle->nrof_free == nrof_arrays || (deiniting && pool == pool_puddle)) { /* Yup. Forget about it. */ efree(puddle->first_chunk); if (!deiniting || pool != pool_puddle) { mempool_return(pool_puddle, puddle); } pool->nrof_free[i] -= nrof_arrays; pool->nrof_allocated[i] -= nrof_arrays; freed++; } else { /* Nope, keep this puddle: put it back into the tracking list */ puddle->next = pool->puddlelist[i]; pool->puddlelist[i] = puddle; } } /* Sort the puddles by amount of free chunks. It will let us set up the * freelist so that the chunks from the fullest puddles are used first. * This should (hopefully) help us free some of the lesser-used puddles * earlier. */ pool->puddlelist[i] = sort_linked_list(pool->puddlelist[i], 0, sort_puddle_by_nrof_free, NULL, NULL, NULL); /* Finally: restore the global freelist */ pool->freelist[i] = &end_marker; last_free = &end_marker; for (puddle = pool->puddlelist[i]; puddle != NULL; puddle = puddle->next) { if (puddle->nrof_free > 0) { if (pool->freelist[i] == &end_marker) { pool->freelist[i] = puddle->first_free; } else { last_free->next = puddle->first_free; } puddle->last_free->next = &end_marker; last_free = puddle->last_free; } } } return freed; }