/* * palloc_boot -- initializes allocator section */ int palloc_boot(struct palloc_heap *heap, void *heap_start, uint64_t heap_size, uint64_t *sizep, void *base, struct pmem_ops *p_ops, struct stats *stats, struct pool_set *set) { return heap_boot(heap, heap_start, heap_size, sizep, base, p_ops, stats, set); }
static void test_recycler(void) { struct mock_pop *mpop = MMAP_ANON_ALIGNED(MOCK_POOL_SIZE, Ut_mmap_align); PMEMobjpool *pop = &mpop->p; memset(pop, 0, MOCK_POOL_SIZE); pop->heap_offset = (uint64_t)((uint64_t)&mpop->heap - (uint64_t)mpop); pop->p_ops.persist = obj_heap_persist; pop->p_ops.memset_persist = obj_heap_memset_persist; pop->p_ops.base = pop; pop->set = MALLOC(sizeof(*(pop->set))); pop->set->options = 0; pop->set->directory_based = 0; void *heap_start = (char *)pop + pop->heap_offset; uint64_t heap_size = MOCK_POOL_SIZE - sizeof(PMEMobjpool); struct palloc_heap *heap = &pop->heap; struct pmem_ops *p_ops = &pop->p_ops; struct stats *s = stats_new(pop); UT_ASSERTne(s, NULL); UT_ASSERT(heap_check(heap_start, heap_size) != 0); UT_ASSERT(heap_init(heap_start, heap_size, &pop->heap_size, p_ops) == 0); UT_ASSERT(heap_boot(heap, heap_start, heap_size, &pop->heap_size, pop, p_ops, s, pop->set) == 0); UT_ASSERT(heap_buckets_init(heap) == 0); UT_ASSERT(pop->heap.rt != NULL); /* trigger heap bucket populate */ struct memory_block m = MEMORY_BLOCK_NONE; m.size_idx = 1; struct bucket *b = heap_bucket_acquire_by_id(heap, DEFAULT_ALLOC_CLASS_ID); UT_ASSERT(heap_get_bestfit_block(heap, b, &m) == 0); heap_bucket_release(heap, b); int ret; struct recycler *r = recycler_new(&pop->heap, 10000 /* never recalc */); UT_ASSERTne(r, NULL); init_run_with_score(pop->heap.layout, 0, 64); init_run_with_score(pop->heap.layout, 1, 128); init_run_with_score(pop->heap.layout, 15, 0); struct memory_block mrun = {0, 0, 1, 0}; struct memory_block mrun2 = {1, 0, 1, 0}; memblock_rebuild_state(&pop->heap, &mrun); memblock_rebuild_state(&pop->heap, &mrun2); ret = recycler_put(r, &mrun, recycler_calc_score(&pop->heap, &mrun, NULL)); UT_ASSERTeq(ret, 0); ret = recycler_put(r, &mrun2, recycler_calc_score(&pop->heap, &mrun2, NULL)); UT_ASSERTeq(ret, 0); struct memory_block mrun_ret = MEMORY_BLOCK_NONE; mrun_ret.size_idx = 1; struct memory_block mrun2_ret = MEMORY_BLOCK_NONE; mrun2_ret.size_idx = 1; ret = recycler_get(r, &mrun_ret); UT_ASSERTeq(ret, 0); ret = recycler_get(r, &mrun2_ret); UT_ASSERTeq(ret, 0); UT_ASSERTeq(mrun2.chunk_id, mrun2_ret.chunk_id); UT_ASSERTeq(mrun.chunk_id, mrun_ret.chunk_id); init_run_with_score(pop->heap.layout, 7, 256); init_run_with_score(pop->heap.layout, 2, 64); init_run_with_score(pop->heap.layout, 5, 512); init_run_with_score(pop->heap.layout, 10, 128); mrun.chunk_id = 7; mrun2.chunk_id = 2; struct memory_block mrun3 = {5, 0, 1, 0}; struct memory_block mrun4 = {10, 0, 1, 0}; memblock_rebuild_state(&pop->heap, &mrun3); memblock_rebuild_state(&pop->heap, &mrun4); mrun_ret.size_idx = 1; mrun2_ret.size_idx = 1; struct memory_block mrun3_ret = MEMORY_BLOCK_NONE; mrun3_ret.size_idx = 1; struct memory_block mrun4_ret = MEMORY_BLOCK_NONE; mrun4_ret.size_idx = 1; ret = recycler_put(r, &mrun, recycler_calc_score(&pop->heap, &mrun, NULL)); UT_ASSERTeq(ret, 0); ret = recycler_put(r, &mrun2, recycler_calc_score(&pop->heap, &mrun2, NULL)); UT_ASSERTeq(ret, 0); ret = recycler_put(r, &mrun3, recycler_calc_score(&pop->heap, &mrun3, NULL)); UT_ASSERTeq(ret, 0); ret = recycler_put(r, &mrun4, recycler_calc_score(&pop->heap, &mrun4, NULL)); UT_ASSERTeq(ret, 0); ret = recycler_get(r, &mrun2_ret); UT_ASSERTeq(ret, 0); ret = recycler_get(r, &mrun4_ret); UT_ASSERTeq(ret, 0); ret = recycler_get(r, &mrun_ret); UT_ASSERTeq(ret, 0); ret = recycler_get(r, &mrun3_ret); UT_ASSERTeq(ret, 0); UT_ASSERTeq(mrun.chunk_id, mrun_ret.chunk_id); UT_ASSERTeq(mrun2.chunk_id, mrun2_ret.chunk_id); UT_ASSERTeq(mrun3.chunk_id, mrun3_ret.chunk_id); UT_ASSERTeq(mrun4.chunk_id, mrun4_ret.chunk_id); init_run_with_max_block(pop->heap.layout, 1); struct memory_block mrun5 = {1, 0, 1, 0}; memblock_rebuild_state(&pop->heap, &mrun5); ret = recycler_put(r, &mrun5, recycler_calc_score(&pop->heap, &mrun5, NULL)); UT_ASSERTeq(ret, 0); struct memory_block mrun5_ret = MEMORY_BLOCK_NONE; mrun5_ret.size_idx = 11; ret = recycler_get(r, &mrun5_ret); UT_ASSERTeq(ret, ENOMEM); mrun5_ret = MEMORY_BLOCK_NONE; mrun5_ret.size_idx = 10; ret = recycler_get(r, &mrun5_ret); UT_ASSERTeq(ret, 0); recycler_delete(r); stats_delete(pop, s); heap_cleanup(heap); UT_ASSERT(heap->rt == NULL); FREE(pop->set); MUNMAP_ANON_ALIGNED(mpop, MOCK_POOL_SIZE); }
static void test_mock_pool_allocs() { void *real_address = ZALLOC(MOCK_POOL_SIZE * 2); addr = (void *)ALIGN_CEILING((uint64_t)real_address, (uint64_t)Ut_pagesize); mock_pop = &addr->p; mock_pop->addr = addr; mock_pop->size = MOCK_POOL_SIZE; mock_pop->rdonly = 0; mock_pop->is_pmem = 0; mock_pop->heap_offset = offsetof(struct mock_pop, ptr); UT_ASSERTeq(mock_pop->heap_offset % Ut_pagesize, 0); mock_pop->heap_size = MOCK_POOL_SIZE - mock_pop->heap_offset; mock_pop->nlanes = 1; mock_pop->lanes_offset = sizeof(PMEMobjpool); mock_pop->is_master_replica = 1; mock_pop->persist_local = (persist_local_fn)pmem_msync; mock_pop->flush_local = (flush_local_fn)pmem_msync; mock_pop->drain_local = drain_empty; mock_pop->p_ops.persist = obj_persist; mock_pop->p_ops.flush = obj_flush; mock_pop->p_ops.drain = obj_drain; mock_pop->p_ops.memcpy_persist = obj_memcpy; mock_pop->p_ops.memset_persist = obj_memset; mock_pop->p_ops.base = mock_pop; mock_pop->p_ops.pool_size = mock_pop->size; mock_pop->redo = redo_log_config_new(addr, &mock_pop->p_ops, redo_log_check_offset, mock_pop, REDO_NUM_ENTRIES); void *heap_start = (char *)mock_pop + mock_pop->heap_offset; uint64_t heap_size = mock_pop->heap_size; heap_init(heap_start, heap_size, &mock_pop->p_ops); heap_boot(&mock_pop->heap, heap_start, heap_size, mock_pop, &mock_pop->p_ops); /* initialize runtime lanes structure */ mock_pop->lanes_desc.runtime_nlanes = (unsigned)mock_pop->nlanes; lane_boot(mock_pop); UT_ASSERTne(mock_pop->heap.rt, NULL); test_malloc_free_loop(MALLOC_FREE_SIZE); /* * Allocating till OOM and freeing the objects in a loop for different * buckets covers basically all code paths except error cases. */ test_oom_allocs(TEST_HUGE_ALLOC_SIZE); test_oom_allocs(TEST_TINY_ALLOC_SIZE); test_oom_allocs(TEST_HUGE_ALLOC_SIZE); test_oom_allocs(TEST_SMALL_ALLOC_SIZE); test_oom_allocs(TEST_MEGA_ALLOC_SIZE); test_realloc(TEST_SMALL_ALLOC_SIZE, TEST_MEDIUM_ALLOC_SIZE); test_realloc(TEST_HUGE_ALLOC_SIZE, TEST_MEGA_ALLOC_SIZE); lane_cleanup(mock_pop); heap_cleanup(&mock_pop->heap); FREE(real_address); }
static void test_heap(void) { struct mock_pop *mpop = MMAP_ANON_ALIGNED(MOCK_POOL_SIZE, Ut_mmap_align); PMEMobjpool *pop = &mpop->p; memset(pop, 0, MOCK_POOL_SIZE); pop->heap_offset = (uint64_t)((uint64_t)&mpop->heap - (uint64_t)mpop); pop->p_ops.persist = obj_heap_persist; pop->p_ops.memset_persist = obj_heap_memset_persist; pop->p_ops.base = pop; pop->set = MALLOC(sizeof(*(pop->set))); pop->set->options = 0; pop->set->directory_based = 0; struct stats *s = stats_new(pop); UT_ASSERTne(s, NULL); void *heap_start = (char *)pop + pop->heap_offset; uint64_t heap_size = MOCK_POOL_SIZE - sizeof(PMEMobjpool); struct palloc_heap *heap = &pop->heap; struct pmem_ops *p_ops = &pop->p_ops; UT_ASSERT(heap_check(heap_start, heap_size) != 0); UT_ASSERT(heap_init(heap_start, heap_size, &pop->heap_size, p_ops) == 0); UT_ASSERT(heap_boot(heap, heap_start, heap_size, &pop->heap_size, pop, p_ops, s, pop->set) == 0); UT_ASSERT(heap_buckets_init(heap) == 0); UT_ASSERT(pop->heap.rt != NULL); test_alloc_class_bitmap_correctness(); test_container((struct block_container *)container_new_ravl(heap), heap); test_container((struct block_container *)container_new_seglists(heap), heap); struct alloc_class *c_small = heap_get_best_class(heap, 1); struct alloc_class *c_big = heap_get_best_class(heap, 2048); UT_ASSERT(c_small->unit_size < c_big->unit_size); /* new small buckets should be empty */ UT_ASSERT(c_big->type == CLASS_RUN); struct memory_block blocks[MAX_BLOCKS] = { {0, 0, 1, 0}, {0, 0, 1, 0}, {0, 0, 1, 0} }; struct bucket *b_def = heap_bucket_acquire_by_id(heap, DEFAULT_ALLOC_CLASS_ID); for (int i = 0; i < MAX_BLOCKS; ++i) { heap_get_bestfit_block(heap, b_def, &blocks[i]); UT_ASSERT(blocks[i].block_off == 0); } heap_bucket_release(heap, b_def); struct memory_block old_run = {0, 0, 1, 0}; struct memory_block new_run = {0, 0, 0, 0}; struct alloc_class *c_run = heap_get_best_class(heap, 1024); struct bucket *b_run = heap_bucket_acquire(heap, c_run); /* * Allocate blocks from a run until one run is exhausted. */ UT_ASSERTne(heap_get_bestfit_block(heap, b_run, &old_run), ENOMEM); int *nresv = bucket_current_resvp(b_run); do { new_run.chunk_id = 0; new_run.block_off = 0; new_run.size_idx = 1; UT_ASSERTne(heap_get_bestfit_block(heap, b_run, &new_run), ENOMEM); UT_ASSERTne(new_run.size_idx, 0); *nresv = 0; } while (old_run.block_off != new_run.block_off); *nresv = 0; heap_bucket_release(heap, b_run); stats_delete(pop, s); UT_ASSERT(heap_check(heap_start, heap_size) == 0); heap_cleanup(heap); UT_ASSERT(heap->rt == NULL); FREE(pop->set); MUNMAP_ANON_ALIGNED(mpop, MOCK_POOL_SIZE); }
/* * lane_allocator_init -- initializes allocator section */ static int lane_allocator_boot(PMEMobjpool *pop) { return heap_boot(pop); }
static void test_heap() { struct mock_pop *mpop = Malloc(MOCK_POOL_SIZE); PMEMobjpool *pop = &mpop->p; memset(pop, 0, MOCK_POOL_SIZE); pop->size = MOCK_POOL_SIZE; pop->heap_size = MOCK_POOL_SIZE - sizeof(PMEMobjpool); pop->heap_offset = (uint64_t)((uint64_t)&mpop->heap - (uint64_t)mpop); pop->p_ops.persist = obj_heap_persist; pop->p_ops.memset_persist = obj_heap_memset_persist; pop->p_ops.base = pop; pop->p_ops.pool_size = pop->size; void *heap_start = (char *)pop + pop->heap_offset; uint64_t heap_size = pop->heap_size; struct palloc_heap *heap = &pop->heap; struct pmem_ops *p_ops = &pop->p_ops; UT_ASSERT(heap_check(heap_start, heap_size) != 0); UT_ASSERT(heap_init(heap_start, heap_size, p_ops) == 0); UT_ASSERT(heap_boot(heap, heap_start, heap_size, pop, p_ops) == 0); UT_ASSERT(pop->heap.rt != NULL); struct bucket *b_small = heap_get_best_bucket(heap, 1); struct bucket *b_big = heap_get_best_bucket(heap, 2048); UT_ASSERT(b_small->unit_size < b_big->unit_size); struct bucket *b_def = heap_get_best_bucket(heap, CHUNKSIZE); UT_ASSERT(b_def->unit_size == CHUNKSIZE); /* new small buckets should be empty */ UT_ASSERT(b_small->type == BUCKET_RUN); UT_ASSERT(b_big->type == BUCKET_RUN); struct memory_block blocks[MAX_BLOCKS] = { {0, 0, 1, 0}, {0, 0, 1, 0}, {0, 0, 1, 0} }; for (int i = 0; i < MAX_BLOCKS; ++i) { heap_get_bestfit_block(heap, b_def, &blocks[i]); UT_ASSERT(blocks[i].block_off == 0); } struct memory_block prev; heap_get_adjacent_free_block(heap, b_def, &prev, blocks[1], 1); UT_ASSERT(prev.chunk_id == blocks[0].chunk_id); struct memory_block cnt; heap_get_adjacent_free_block(heap, b_def, &cnt, blocks[0], 0); UT_ASSERT(cnt.chunk_id == blocks[1].chunk_id); struct memory_block next; heap_get_adjacent_free_block(heap, b_def, &next, blocks[1], 0); UT_ASSERT(next.chunk_id == blocks[2].chunk_id); UT_ASSERT(heap_check(heap_start, heap_size) == 0); heap_cleanup(heap); UT_ASSERT(heap->rt == NULL); Free(mpop); }
static void test_heap() { struct mock_pop *mpop = Malloc(MOCK_POOL_SIZE); PMEMobjpool *pop = &mpop->p; memset(pop, 0, MOCK_POOL_SIZE); pop->size = MOCK_POOL_SIZE; pop->heap_size = MOCK_POOL_SIZE - sizeof(PMEMobjpool); pop->heap_offset = (uint64_t)((uint64_t)&mpop->heap - (uint64_t)mpop); pop->persist = obj_heap_persist; UT_ASSERT(heap_check(pop) != 0); UT_ASSERT(heap_init(pop) == 0); UT_ASSERT(heap_boot(pop) == 0); UT_ASSERT(pop->heap != NULL); struct bucket *b_small = heap_get_best_bucket(pop, 1); struct bucket *b_big = heap_get_best_bucket(pop, 2048); UT_ASSERT(b_small->unit_size < b_big->unit_size); struct bucket *b_def = heap_get_best_bucket(pop, CHUNKSIZE); UT_ASSERT(b_def->unit_size == CHUNKSIZE); /* new small buckets should be empty */ UT_ASSERT(b_small->type == BUCKET_RUN); UT_ASSERT(b_big->type == BUCKET_RUN); struct memory_block blocks[MAX_BLOCKS] = { {0, 0, 1, 0}, {0, 0, 1, 0}, {0, 0, 1, 0} }; for (int i = 0; i < MAX_BLOCKS; ++i) { heap_get_bestfit_block(pop, b_def, &blocks[i]); UT_ASSERT(blocks[i].block_off == 0); } struct memory_block *blocksp[MAX_BLOCKS] = {NULL}; struct memory_block prev; heap_get_adjacent_free_block(pop, b_def, &prev, blocks[1], 1); UT_ASSERT(prev.chunk_id == blocks[0].chunk_id); blocksp[0] = &prev; struct memory_block cnt; heap_get_adjacent_free_block(pop, b_def, &cnt, blocks[0], 0); UT_ASSERT(cnt.chunk_id == blocks[1].chunk_id); blocksp[1] = &cnt; struct memory_block next; heap_get_adjacent_free_block(pop, b_def, &next, blocks[1], 0); UT_ASSERT(next.chunk_id == blocks[2].chunk_id); blocksp[2] = &next; struct operation_context *ctx = operation_init(pop, NULL); struct memory_block result = heap_coalesce(pop, blocksp, MAX_BLOCKS, HEAP_OP_FREE, ctx); operation_process(ctx); operation_delete(ctx); UT_ASSERT(result.size_idx == 3); UT_ASSERT(result.chunk_id == prev.chunk_id); UT_ASSERT(heap_check(pop) == 0); heap_cleanup(pop); UT_ASSERT(pop->heap == NULL); Free(mpop); }
/* * palloc_boot -- initializes allocator section */ int palloc_boot(struct palloc_heap *heap, void *heap_start, uint64_t heap_size, void *base, struct pmem_ops *p_ops) { return heap_boot(heap, heap_start, heap_size, base, p_ops); }