int main() { struct quota quota; struct slab_arena arena; quota_init("a, 0); slab_arena_create(&arena, "a, 0, 0, MAP_PRIVATE); slab_arena_print(&arena); slab_arena_destroy(&arena); quota_init("a, SLAB_MIN_SIZE); slab_arena_create(&arena, "a, 1, 1, MAP_PRIVATE); slab_arena_print(&arena); void *ptr = slab_map(&arena); slab_arena_print(&arena); void *ptr1 = slab_map(&arena); printf("going beyond the limit: %s\n", ptr1 ? "(ptr)" : "(nil)"); slab_arena_print(&arena); slab_unmap(&arena, ptr); slab_unmap(&arena, ptr1); slab_arena_print(&arena); slab_arena_destroy(&arena); quota_init("a, 2000000); slab_arena_create(&arena, "a, 3000000, 1, MAP_PRIVATE); slab_arena_print(&arena); slab_arena_destroy(&arena); }
void * slab_malloc(const char *file, unsigned int line, size_t size) { alloc_header_t *res; size_t real; assert(size < 1 << 24); real = (size + sizeof(*res) + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1); if (real < SMALL_CUTOFF) { res = slab_alloc(slabset_create(real)); slab_alloc_count++; slab_alloc_size += size; } else { res = slab_map(slab_round_up(real)); big_alloc_count++; big_alloc_size += size; slab_log_alloc(res, size); } #if SLAB_DEBUG & SLAB_DEBUG_HEADER res->file_id = get_file_id(file); res->size = size; res->line = line; res->magic = ALLOC_MAGIC; #else *res = size; (void)file; (void)line; #endif return res + 1; }
#include <pthread.h> struct slab_arena arena; struct quota quota; int THREADS = 8; int ITERATIONS = 1009 /* 100003 */; int OSCILLATION = 137; int FILL = SLAB_MIN_SIZE/sizeof(pthread_t); void * run(void *p __attribute__((unused))) { unsigned int seed = (unsigned int) pthread_self(); int iterations = rand_r(&seed) % ITERATIONS; pthread_t **slabs = slab_map(&arena); for (int i = 0; i < iterations; i++) { int oscillation = rand_r(&seed) % OSCILLATION; for (int osc = 0; osc < oscillation; osc++) { slabs[osc] = (pthread_t *) slab_map(&arena); for (int fill = 0; fill < FILL; fill += 100) { slabs[osc][fill] = pthread_self(); } } sched_yield(); for (int osc = 0; osc < oscillation; osc++) { for (int fill = 0; fill < FILL; fill+= 100) { fail_unless(slabs[osc][fill] == pthread_self()); } slab_unmap(&arena, slabs[osc]);
static void slab_unalloc(void *ptr, size_t size) { struct slab *slab, *new_next; assert(size < SMALL_CUTOFF); slab = (struct slab*)((((unsigned long)ptr | (slab_pagesize() - 1)) + 1) - sizeof(*slab)); *(void**)ptr = slab->free; slab->free = ptr; slab->parent->nallocs--; if (slab->used-- == slab->parent->items_per_slab && slab->parent->child != slab) { /* Unlink from current position, relink as parent's first child. */ new_next = slab->parent->child; assert(new_next != NULL); if (slab->prev) slab->prev->next = slab->next; if (slab->next) slab->next->prev = slab->prev; if ((slab->prev = new_next->prev)) slab->prev->next = slab; slab->next = new_next; new_next->prev = slab; slab->parent->child = slab; assert(!slab->next || slab == slab->next->prev); assert(!slab->prev || slab == slab->prev->next); } else if (!slab->used) { slab_log_free(slab, size); /* Unlink slab from its parent. */ slab->parent->nslabs--; if (slab->prev) slab->prev->next = slab->next; if (slab->next) slab->next->prev = slab->prev; new_next = slab->next ? slab->next : slab->prev; if (slab == slab->parent->child) slab->parent->child = new_next; if (new_next) { assert(!new_next->next || new_next == new_next->next->prev); assert(!new_next->prev || new_next == new_next->prev->next); } #if SLAB_RESERVE /* Make sure we have enough free slab pages. */ while (free_slab_count < SLAB_RESERVE) { struct slab *tslab; void *item; item = slab_map(slab_pagesize()); tslab = (struct slab*)((char*)item + slab_pagesize() - sizeof(*slab)); tslab->base = item; tslab->prev = free_slab_tail; free_slab_tail = tslab; if (!free_slab_head) free_slab_head = tslab; else { slab_unprotect(tslab->prev); tslab->prev->next = tslab; slab_protect(tslab->prev); } free_slab_count++; slab_count++; } #endif /* Link to list of free slabs. */ slab->parent = NULL; slab->next = NULL; slab->prev = free_slab_tail; if (slab->prev) { slab_unprotect(slab->prev); slab->prev->next = slab; slab_protect(slab->prev); } else free_slab_head = slab; slab_protect(slab); free_slab_tail = slab; free_slab_count++; #if MAX_SLAB_FREE >= 0 /* Unlink and unmap old slabs, so accesses to stale-enough * pointers will fault. */ while (free_slab_count > MAX_SLAB_FREE) { struct slab *tslab; tslab = free_slab_tail; slab_unprotect(tslab); free_slab_tail = tslab->prev; if (tslab->prev) { slab_unprotect(tslab->prev); tslab->prev->next = NULL; slab_protect(tslab->prev); } else free_slab_head = NULL; free_slab_count--; slab_count--; slab_log_unmap(slab); munmap(slab->base, slab_pagesize()); } #endif } (void)size; }
static void * slab_alloc(struct slabset *sset) { struct slab *slab; void **item; if (!sset->child || !sset->child->free) { unsigned int ii, step; /* Allocate new slab. */ if (free_slab_head) { slab = free_slab_head; slab_unprotect(slab); if (!(free_slab_head = slab->next)) free_slab_tail = NULL; } else { item = slab_map(slab_pagesize()); slab = (struct slab*)((char*)item + slab_pagesize() - sizeof(*slab)); slab->base = item; slab_count++; } slab_log_alloc(slab, sset->size); /* Populate free list. */ step = (sset->size + SLAB_ALIGN - 1) & ~(SLAB_ALIGN - 1); for (ii = 1, item = slab->free = slab->base; ii < sset->items_per_slab; ++ii, item = (*item = (char*)item + step)); *item = NULL; /* Link to parent slabset. */ slab->parent = sset; slab->prev = sset->child; if (slab->prev) { slab->next = slab->prev->next; slab->prev->next = slab; if (slab->next) slab->next->prev = slab; } else slab->next = NULL; assert(!slab->next || slab == slab->next->prev); assert(!slab->prev || slab == slab->prev->next); sset->child = slab; sset->nslabs++; } slab = sset->child; item = slab->free; assert(((unsigned long)item & (slab_pagesize() - 1)) <= (slab_pagesize() - sizeof(*slab) - sset->size)); slab->free = *item; if (++slab->used == sset->items_per_slab) { if (sset->child != slab) { /* Unlink slab and reinsert before sset->child. */ if (slab->prev) slab->prev->next = slab->next; if (slab->next) slab->next->prev = slab->prev; if ((slab->prev = sset->child->prev)) slab->prev->next = slab; if ((slab->next = sset->child)) slab->next->prev = slab; assert(!slab->next || slab == slab->next->prev); assert(!slab->prev || slab == slab->prev->next); } else if (slab->next) { /* Advance sset->child to next pointer. */ sset->child = slab->next; } } sset->nallocs++; memset(item, 0, sset->size); return item; }