/* * palloc_reservation_create -- creates a volatile reservation of a * memory block. * * The first step in the allocation of a new block is reserving it in * the transient heap - which is represented by the bucket abstraction. * * To provide optimal scaling for multi-threaded applications and reduce * fragmentation the appropriate bucket is chosen depending on the * current thread context and to which allocation class the requested * size falls into. * * Once the bucket is selected, just enough memory is reserved for the * requested size. The underlying block allocation algorithm * (best-fit, next-fit, ...) varies depending on the bucket container. */ static int palloc_reservation_create(struct palloc_heap *heap, size_t size, palloc_constr constructor, void *arg, uint64_t extra_field, uint16_t object_flags, uint16_t class_id, struct pobj_action_internal *out) { int err = 0; struct memory_block *new_block = &out->m; ASSERT(class_id < UINT8_MAX); struct alloc_class *c = class_id == 0 ? heap_get_best_class(heap, size) : alloc_class_by_id(heap_alloc_classes(heap), (uint8_t)class_id); if (c == NULL) { ERR("no allocation class for size %lu bytes", size); errno = EINVAL; return -1; } /* * The caller provided size in bytes, but buckets operate in * 'size indexes' which are multiples of the block size in the * bucket. * * For example, to allocate 500 bytes from a bucket that * provides 256 byte blocks two memory 'units' are required. */ ssize_t size_idx = alloc_class_calc_size_idx(c, size); if (size_idx < 0) { ERR("allocation class not suitable for size %lu bytes", size); errno = EINVAL; return -1; } ASSERT(size_idx <= UINT32_MAX); new_block->size_idx = (uint32_t)size_idx; struct bucket *b = heap_bucket_acquire(heap, c); err = heap_get_bestfit_block(heap, b, new_block); if (err != 0) goto out; if (alloc_prep_block(heap, new_block, constructor, arg, extra_field, object_flags, &out->offset) != 0) { /* * Constructor returned non-zero value which means * the memory block reservation has to be rolled back. */ if (new_block->type == MEMORY_BLOCK_HUGE) { bucket_insert_block(b, new_block); } err = ECANCELED; goto out; } /* * Each as of yet unfulfilled reservation needs to be tracked in the * runtime state. * The memory block cannot be put back into the global state unless * there are no active reservations. */ if ((out->resvp = bucket_current_resvp(b)) != NULL) util_fetch_and_add64(out->resvp, 1); out->lock = new_block->m_ops->get_lock(new_block); out->new_state = MEMBLOCK_ALLOCATED; out: heap_bucket_release(heap, b); if (err == 0) return 0; errno = err; return -1; }
static void test_heap(void) { struct mock_pop *mpop = MMAP_ANON_ALIGNED(MOCK_POOL_SIZE, Ut_mmap_align); PMEMobjpool *pop = &mpop->p; memset(pop, 0, MOCK_POOL_SIZE); pop->heap_offset = (uint64_t)((uint64_t)&mpop->heap - (uint64_t)mpop); pop->p_ops.persist = obj_heap_persist; pop->p_ops.memset_persist = obj_heap_memset_persist; pop->p_ops.base = pop; pop->set = MALLOC(sizeof(*(pop->set))); pop->set->options = 0; pop->set->directory_based = 0; struct stats *s = stats_new(pop); UT_ASSERTne(s, NULL); void *heap_start = (char *)pop + pop->heap_offset; uint64_t heap_size = MOCK_POOL_SIZE - sizeof(PMEMobjpool); struct palloc_heap *heap = &pop->heap; struct pmem_ops *p_ops = &pop->p_ops; UT_ASSERT(heap_check(heap_start, heap_size) != 0); UT_ASSERT(heap_init(heap_start, heap_size, &pop->heap_size, p_ops) == 0); UT_ASSERT(heap_boot(heap, heap_start, heap_size, &pop->heap_size, pop, p_ops, s, pop->set) == 0); UT_ASSERT(heap_buckets_init(heap) == 0); UT_ASSERT(pop->heap.rt != NULL); test_alloc_class_bitmap_correctness(); test_container((struct block_container *)container_new_ravl(heap), heap); test_container((struct block_container *)container_new_seglists(heap), heap); struct alloc_class *c_small = heap_get_best_class(heap, 1); struct alloc_class *c_big = heap_get_best_class(heap, 2048); UT_ASSERT(c_small->unit_size < c_big->unit_size); /* new small buckets should be empty */ UT_ASSERT(c_big->type == CLASS_RUN); struct memory_block blocks[MAX_BLOCKS] = { {0, 0, 1, 0}, {0, 0, 1, 0}, {0, 0, 1, 0} }; struct bucket *b_def = heap_bucket_acquire_by_id(heap, DEFAULT_ALLOC_CLASS_ID); for (int i = 0; i < MAX_BLOCKS; ++i) { heap_get_bestfit_block(heap, b_def, &blocks[i]); UT_ASSERT(blocks[i].block_off == 0); } heap_bucket_release(heap, b_def); struct memory_block old_run = {0, 0, 1, 0}; struct memory_block new_run = {0, 0, 0, 0}; struct alloc_class *c_run = heap_get_best_class(heap, 1024); struct bucket *b_run = heap_bucket_acquire(heap, c_run); /* * Allocate blocks from a run until one run is exhausted. */ UT_ASSERTne(heap_get_bestfit_block(heap, b_run, &old_run), ENOMEM); int *nresv = bucket_current_resvp(b_run); do { new_run.chunk_id = 0; new_run.block_off = 0; new_run.size_idx = 1; UT_ASSERTne(heap_get_bestfit_block(heap, b_run, &new_run), ENOMEM); UT_ASSERTne(new_run.size_idx, 0); *nresv = 0; } while (old_run.block_off != new_run.block_off); *nresv = 0; heap_bucket_release(heap, b_run); stats_delete(pop, s); UT_ASSERT(heap_check(heap_start, heap_size) == 0); heap_cleanup(heap); UT_ASSERT(heap->rt == NULL); FREE(pop->set); MUNMAP_ANON_ALIGNED(mpop, MOCK_POOL_SIZE); }