/* * heap_coalesce_huge -- finds neighbours of a huge block, removes them from the * volatile state and returns the resulting block */ struct memory_block heap_coalesce_huge(struct palloc_heap *heap, const struct memory_block *m) { const struct memory_block *blocks[3] = {NULL, m, NULL}; struct bucket *b = heap_get_default_bucket(heap); struct memory_block prev = MEMORY_BLOCK_NONE; if (heap_get_adjacent_free_block(heap, m, &prev, 1) == 0 && b->c_ops->get_rm_exact(b->container, &prev) == 0) { blocks[0] = &prev; } struct memory_block next = MEMORY_BLOCK_NONE; if (heap_get_adjacent_free_block(heap, m, &next, 0) == 0 && b->c_ops->get_rm_exact(b->container, &next) == 0) { blocks[2] = &next; } return heap_coalesce(heap, blocks, 3); }
/* * heap_free_block -- creates free persistent state of a memory block */ struct memory_block heap_free_block(PMEMobjpool *pop, struct bucket *b, struct memory_block m, void *hdr, uint64_t *op_result) { struct memory_block *blocks[3] = {NULL, &m, NULL}; struct memory_block prev = {0}; if (heap_get_adjacent_free_block(pop, &prev, m, 1) == 0 && bucket_get_rm_block_exact(b, prev) == 0) { blocks[0] = &prev; } struct memory_block next = {0}; if (heap_get_adjacent_free_block(pop, &next, m, 0) == 0 && bucket_get_rm_block_exact(b, next) == 0) { blocks[2] = &next; } struct memory_block res = heap_coalesce(pop, blocks, 3, HEAP_OP_FREE, hdr, op_result); return res; }
/* * prealloc_construct -- resizes an existing memory block with a constructor * * The block offset is written persistently into the off variable, but only * after the constructor function has been called. * * If successful function returns zero. Otherwise an error number is returned. */ int prealloc_construct(PMEMobjpool *pop, uint64_t *off, size_t size, void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg), void *arg, uint64_t data_off) { if (size <= pmalloc_usable_size(pop, *off)) return 0; size_t sizeh = size + sizeof (struct allocation_header); int err = 0; struct allocation_header *alloc = alloc_get_header(pop, *off); struct bucket *b = heap_get_best_bucket(pop, alloc->size); uint32_t add_size_idx = bucket_calc_units(b, sizeh - alloc->size); uint32_t new_size_idx = bucket_calc_units(b, sizeh); uint64_t real_size = new_size_idx * bucket_unit_size(b); struct memory_block cnt = get_mblock_from_alloc(pop, b, alloc); if ((err = heap_lock_if_run(pop, cnt)) != 0) return err; struct memory_block next = {0}; if ((err = heap_get_adjacent_free_block(pop, &next, cnt, 0)) != 0) goto error; if (next.size_idx < add_size_idx) { err = ENOMEM; goto error; } if ((err = heap_get_exact_block(pop, b, &next, add_size_idx)) != 0) goto error; struct memory_block *blocks[2] = {&cnt, &next}; uint64_t op_result; void *hdr; struct memory_block m = heap_coalesce(pop, blocks, 2, HEAP_OP_ALLOC, &hdr, &op_result); void *block_data = heap_get_block_data(pop, m); void *datap = block_data + sizeof (struct allocation_header); if (constructor != NULL) constructor(pop, datap + data_off, arg); struct lane_section *lane; if ((err = lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR)) != 0) goto error; struct allocator_lane_section *sec = (struct allocator_lane_section *)lane->layout; redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET, pop_offset(pop, &alloc->size), real_size); redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER, pop_offset(pop, hdr), op_result); redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO); if (lane_release(pop) != 0) { ERR("Failed to release the lane"); ASSERT(0); } if (heap_unlock_if_run(pop, cnt) != 0) { ERR("Failed to release run lock"); ASSERT(0); } return 0; error: if (heap_unlock_if_run(pop, cnt) != 0) { ERR("Failed to release run lock"); ASSERT(0); } return err; }
/* * prealloc_construct -- resizes an existing memory block with a constructor * * The block offset is written persistently into the off variable, but only * after the constructor function has been called. * * If successful function returns zero. Otherwise an error number is returned. */ int prealloc_construct(PMEMobjpool *pop, uint64_t *off, size_t size, void (*constructor)(PMEMobjpool *pop, void *ptr, size_t usable_size, void *arg), void *arg, uint64_t data_off) { if (size <= pmalloc_usable_size(pop, *off)) return 0; size_t sizeh = size + sizeof (struct allocation_header); int err; struct allocation_header *alloc = alloc_get_header(pop, *off); struct lane_section *lane; lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR); struct bucket *b = heap_get_best_bucket(pop, alloc->size); uint32_t add_size_idx = b->calc_units(b, sizeh - alloc->size); uint32_t new_size_idx = b->calc_units(b, sizeh); uint64_t real_size = new_size_idx * b->unit_size; struct memory_block cnt = get_mblock_from_alloc(pop, alloc); heap_lock_if_run(pop, cnt); struct memory_block next = {0, 0, 0, 0}; if ((err = heap_get_adjacent_free_block(pop, b, &next, cnt, 0)) != 0) goto out; if (next.size_idx < add_size_idx) { err = ENOMEM; goto out; } if ((err = heap_get_exact_block(pop, b, &next, add_size_idx)) != 0) goto out; struct memory_block *blocks[2] = {&cnt, &next}; uint64_t op_result; void *hdr; struct memory_block m = heap_coalesce(pop, blocks, 2, HEAP_OP_ALLOC, &hdr, &op_result); void *block_data = heap_get_block_data(pop, m); void *datap = (char *)block_data + sizeof (struct allocation_header); void *userdatap = (char *)datap + data_off; /* mark new part as accessible and undefined */ VALGRIND_DO_MAKE_MEM_UNDEFINED(pop, (char *)block_data + alloc->size, real_size - alloc->size); /* resize allocated space */ VALGRIND_DO_MEMPOOL_CHANGE(pop, userdatap, userdatap, real_size - sizeof (struct allocation_header) - data_off); if (constructor != NULL) constructor(pop, userdatap, real_size - sizeof (struct allocation_header) - data_off, arg); struct allocator_lane_section *sec = (struct allocator_lane_section *)lane->layout; redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET, pop_offset(pop, &alloc->size), real_size); redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER, pop_offset(pop, hdr), op_result); redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO); out: heap_unlock_if_run(pop, cnt); lane_release(pop); return err; }
static void test_heap() { struct mock_pop *mpop = Malloc(MOCK_POOL_SIZE); PMEMobjpool *pop = &mpop->p; memset(pop, 0, MOCK_POOL_SIZE); pop->size = MOCK_POOL_SIZE; pop->heap_size = MOCK_POOL_SIZE - sizeof(PMEMobjpool); pop->heap_offset = (uint64_t)((uint64_t)&mpop->heap - (uint64_t)mpop); pop->p_ops.persist = obj_heap_persist; pop->p_ops.memset_persist = obj_heap_memset_persist; pop->p_ops.base = pop; pop->p_ops.pool_size = pop->size; void *heap_start = (char *)pop + pop->heap_offset; uint64_t heap_size = pop->heap_size; struct palloc_heap *heap = &pop->heap; struct pmem_ops *p_ops = &pop->p_ops; UT_ASSERT(heap_check(heap_start, heap_size) != 0); UT_ASSERT(heap_init(heap_start, heap_size, p_ops) == 0); UT_ASSERT(heap_boot(heap, heap_start, heap_size, pop, p_ops) == 0); UT_ASSERT(pop->heap.rt != NULL); struct bucket *b_small = heap_get_best_bucket(heap, 1); struct bucket *b_big = heap_get_best_bucket(heap, 2048); UT_ASSERT(b_small->unit_size < b_big->unit_size); struct bucket *b_def = heap_get_best_bucket(heap, CHUNKSIZE); UT_ASSERT(b_def->unit_size == CHUNKSIZE); /* new small buckets should be empty */ UT_ASSERT(b_small->type == BUCKET_RUN); UT_ASSERT(b_big->type == BUCKET_RUN); struct memory_block blocks[MAX_BLOCKS] = { {0, 0, 1, 0}, {0, 0, 1, 0}, {0, 0, 1, 0} }; for (int i = 0; i < MAX_BLOCKS; ++i) { heap_get_bestfit_block(heap, b_def, &blocks[i]); UT_ASSERT(blocks[i].block_off == 0); } struct memory_block prev; heap_get_adjacent_free_block(heap, b_def, &prev, blocks[1], 1); UT_ASSERT(prev.chunk_id == blocks[0].chunk_id); struct memory_block cnt; heap_get_adjacent_free_block(heap, b_def, &cnt, blocks[0], 0); UT_ASSERT(cnt.chunk_id == blocks[1].chunk_id); struct memory_block next; heap_get_adjacent_free_block(heap, b_def, &next, blocks[1], 0); UT_ASSERT(next.chunk_id == blocks[2].chunk_id); UT_ASSERT(heap_check(heap_start, heap_size) == 0); heap_cleanup(heap); UT_ASSERT(heap->rt == NULL); Free(mpop); }
static void test_heap() { struct mock_pop *mpop = Malloc(MOCK_POOL_SIZE); PMEMobjpool *pop = &mpop->p; memset(pop, 0, MOCK_POOL_SIZE); pop->size = MOCK_POOL_SIZE; pop->heap_size = MOCK_POOL_SIZE - sizeof(PMEMobjpool); pop->heap_offset = (uint64_t)((uint64_t)&mpop->heap - (uint64_t)mpop); pop->persist = obj_heap_persist; UT_ASSERT(heap_check(pop) != 0); UT_ASSERT(heap_init(pop) == 0); UT_ASSERT(heap_boot(pop) == 0); UT_ASSERT(pop->heap != NULL); struct bucket *b_small = heap_get_best_bucket(pop, 1); struct bucket *b_big = heap_get_best_bucket(pop, 2048); UT_ASSERT(b_small->unit_size < b_big->unit_size); struct bucket *b_def = heap_get_best_bucket(pop, CHUNKSIZE); UT_ASSERT(b_def->unit_size == CHUNKSIZE); /* new small buckets should be empty */ UT_ASSERT(b_small->type == BUCKET_RUN); UT_ASSERT(b_big->type == BUCKET_RUN); struct memory_block blocks[MAX_BLOCKS] = { {0, 0, 1, 0}, {0, 0, 1, 0}, {0, 0, 1, 0} }; for (int i = 0; i < MAX_BLOCKS; ++i) { heap_get_bestfit_block(pop, b_def, &blocks[i]); UT_ASSERT(blocks[i].block_off == 0); } struct memory_block *blocksp[MAX_BLOCKS] = {NULL}; struct memory_block prev; heap_get_adjacent_free_block(pop, b_def, &prev, blocks[1], 1); UT_ASSERT(prev.chunk_id == blocks[0].chunk_id); blocksp[0] = &prev; struct memory_block cnt; heap_get_adjacent_free_block(pop, b_def, &cnt, blocks[0], 0); UT_ASSERT(cnt.chunk_id == blocks[1].chunk_id); blocksp[1] = &cnt; struct memory_block next; heap_get_adjacent_free_block(pop, b_def, &next, blocks[1], 0); UT_ASSERT(next.chunk_id == blocks[2].chunk_id); blocksp[2] = &next; struct operation_context *ctx = operation_init(pop, NULL); struct memory_block result = heap_coalesce(pop, blocksp, MAX_BLOCKS, HEAP_OP_FREE, ctx); operation_process(ctx); operation_delete(ctx); UT_ASSERT(result.size_idx == 3); UT_ASSERT(result.chunk_id == prev.chunk_id); UT_ASSERT(heap_check(pop) == 0); heap_cleanup(pop); UT_ASSERT(pop->heap == NULL); Free(mpop); }