void mslab_free(struct mempool *pool, struct mslab *slab, void *ptr) { /* put object to garbage list */ *(void **)ptr = slab->free_list; slab->free_list = ptr; slab->nfree++; if (slab->nfree == 1) { /** * Add this slab to the rbtree which contains partially * populated slabs. */ mslab_tree_insert(&pool->free_slabs, slab); } else if (slab->nfree == pool->objcount) { /** Free the slab. */ mslab_tree_remove(&pool->free_slabs, slab); if (pool->spare > slab) { slab_list_del(&pool->slabs, &pool->spare->slab, next_in_list); slab_put(pool->cache, &pool->spare->slab); pool->spare = slab; } else if (pool->spare) { slab_list_del(&pool->slabs, &slab->slab, next_in_list); slab_put(pool->cache, &slab->slab); } else { pool->spare = slab; } } }
void * obuf_reserve_slow(struct obuf *buf, size_t size) { struct iovec *iov = &buf->iov[buf->pos]; size_t capacity = buf->capacity[buf->pos]; if (iov->iov_len > 0) { /* Move to the next buffer. */ if (buf->pos + 1 >= SMALL_OBUF_IOV_MAX) return NULL; buf->pos++; iov = &buf->iov[buf->pos]; capacity = buf->capacity[buf->pos]; } assert(iov->iov_len == 0); /* Make sure the next buffer can store size. */ if (size > capacity) { if (capacity > 0) { /* Simply realloc. */ while (capacity < size) capacity = capacity * 2; struct slab *slab = slab_get(buf->slabc, capacity); if (slab == NULL) return NULL; struct slab *old = slab_from_data(buf->iov[buf->pos].iov_base); slab_put(buf->slabc, old); buf->iov[buf->pos].iov_base = slab_data(slab); buf->capacity[buf->pos] = slab_capacity(slab); } else if (obuf_alloc_pos(buf, size) == NULL) { return NULL; } } assert(buf->iov[buf->pos].iov_len + size <= buf->capacity[buf->pos]); return (char*) buf->iov[buf->pos].iov_base + buf->iov[buf->pos].iov_len; }
int main() { srand(time(0)); struct slab_arena arena; struct slab_cache cache; slab_arena_create(&arena, 0, UINT_MAX, 4000000, MAP_PRIVATE); slab_cache_create(&cache, &arena, 0); int i = 0; while (i < ITERATIONS) { int run = random() % NRUNS; int size = random() % MAX_ALLOC; if (runs[run]) { slab_put(&cache, runs[run]); } runs[run] = slab_get(&cache, size); fail_unless(runs[run]); slab_cache_check(&cache); i++; } slab_cache_destroy(&cache); }
void mempool_destroy(struct mempool *pool) { struct slab *slab, *tmp; rlist_foreach_entry_safe(slab, &pool->slabs.slabs, next_in_list, tmp) slab_put(pool->cache, slab); }
void region_free(struct region *region) { struct slab *slab, *tmp; rlist_foreach_entry_safe(slab, ®ion->slabs.slabs, next_in_list, tmp) slab_put(region->cache, slab); slab_list_create(®ion->slabs); }
void obuf_destroy(struct obuf *buf) { int i; for (i = 0; i < buf->n_iov; i++) { struct slab *slab = slab_from_data(buf->iov[i].iov_base); slab_put(buf->slabc, slab); } #ifndef NDEBUG obuf_create(buf, buf->slabc, buf->start_capacity); #endif }
/** * Release all memory down to new_size; new_size has to be previously * obtained by calling region_used(). */ void region_truncate(struct region *region, size_t used) { ssize_t cut_size = region_used(region) - used; assert(cut_size >= 0); while (! rlist_empty(®ion->slabs.slabs)) { struct rslab *slab = rlist_first_entry(®ion->slabs.slabs, struct rslab, slab.next_in_list); if (slab->used > cut_size) { /* This is the last slab to trim. */ slab->used -= cut_size; cut_size = 0; break; } cut_size -= slab->used; /* Remove the entire slab. */ slab_list_del(®ion->slabs, &slab->slab, next_in_list); slab_put(region->cache, &slab->slab); } assert(cut_size == 0); region->slabs.stats.used = used; }