/* * get_mblock_from_alloc -- (internal) returns allocation memory block */ static struct memory_block get_mblock_from_alloc(PMEMobjpool *pop, struct bucket *b, struct allocation_header *alloc) { struct memory_block mblock = { alloc->chunk_id, alloc->zone_id, bucket_calc_units(b, alloc->size), calc_block_offset(pop, b, alloc) }; return mblock; }
/* * prealloc_construct -- resizes an existing memory block with a constructor * * The block offset is written persistently into the off variable, but only * after the constructor function has been called. * * If successful function returns zero. Otherwise an error number is returned. */ int prealloc_construct(PMEMobjpool *pop, uint64_t *off, size_t size, void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg), void *arg, uint64_t data_off) { if (size <= pmalloc_usable_size(pop, *off)) return 0; size_t sizeh = size + sizeof (struct allocation_header); int err = 0; struct allocation_header *alloc = alloc_get_header(pop, *off); struct bucket *b = heap_get_best_bucket(pop, alloc->size); uint32_t add_size_idx = bucket_calc_units(b, sizeh - alloc->size); uint32_t new_size_idx = bucket_calc_units(b, sizeh); uint64_t real_size = new_size_idx * bucket_unit_size(b); struct memory_block cnt = get_mblock_from_alloc(pop, b, alloc); if ((err = heap_lock_if_run(pop, cnt)) != 0) return err; struct memory_block next = {0}; if ((err = heap_get_adjacent_free_block(pop, &next, cnt, 0)) != 0) goto error; if (next.size_idx < add_size_idx) { err = ENOMEM; goto error; } if ((err = heap_get_exact_block(pop, b, &next, add_size_idx)) != 0) goto error; struct memory_block *blocks[2] = {&cnt, &next}; uint64_t op_result; void *hdr; struct memory_block m = heap_coalesce(pop, blocks, 2, HEAP_OP_ALLOC, &hdr, &op_result); void *block_data = heap_get_block_data(pop, m); void *datap = block_data + sizeof (struct allocation_header); if (constructor != NULL) constructor(pop, datap + data_off, arg); struct lane_section *lane; if ((err = lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR)) != 0) goto error; struct allocator_lane_section *sec = (struct allocator_lane_section *)lane->layout; redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET, pop_offset(pop, &alloc->size), real_size); redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER, pop_offset(pop, hdr), op_result); redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO); if (lane_release(pop) != 0) { ERR("Failed to release the lane"); ASSERT(0); } if (heap_unlock_if_run(pop, cnt) != 0) { ERR("Failed to release run lock"); ASSERT(0); } return 0; error: if (heap_unlock_if_run(pop, cnt) != 0) { ERR("Failed to release run lock"); ASSERT(0); } return err; }
/* * pmalloc_construct -- allocates a new block of memory with a constructor * * The block offset is written persistently into the off variable, but only * after the constructor function has been called. * * If successful function returns zero. Otherwise an error number is returned. */ int pmalloc_construct(PMEMobjpool *pop, uint64_t *off, size_t size, void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg), void *arg, uint64_t data_off) { size_t sizeh = size + sizeof (struct allocation_header); struct bucket *b = heap_get_best_bucket(pop, sizeh); int err = 0; uint32_t units = bucket_calc_units(b, sizeh); struct memory_block m = {0, 0, units, 0}; if ((err = heap_get_bestfit_block(pop, b, &m)) != 0) return err; uint64_t op_result = 0; void *block_data = heap_get_block_data(pop, m); void *datap = block_data + sizeof (struct allocation_header); ASSERT((uint64_t)block_data % _POBJ_CL_ALIGNMENT == 0); uint64_t real_size = bucket_unit_size(b) * units; alloc_write_header(pop, block_data, m.chunk_id, m.zone_id, real_size); if (constructor != NULL) constructor(pop, datap + data_off, arg); if ((err = heap_lock_if_run(pop, m)) != 0) return err; void *hdr = heap_get_block_header(pop, m, HEAP_OP_ALLOC, &op_result); struct lane_section *lane; if ((err = lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR)) != 0) goto err_lane_hold; struct allocator_lane_section *sec = (struct allocator_lane_section *)lane->layout; redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET, pop_offset(pop, off), pop_offset(pop, datap)); redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER, pop_offset(pop, hdr), op_result); redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO); if (lane_release(pop) != 0) { ERR("Failed to release the lane"); ASSERT(0); } if (heap_unlock_if_run(pop, m) != 0) { ERR("Failed to release run lock"); ASSERT(0); } return 0; err_lane_hold: if (heap_unlock_if_run(pop, m) != 0) { ERR("Failed to release run lock"); ASSERT(0); } if (bucket_insert_block(b, m) != 0) { ERR("Failed to recover heap volatile state"); ASSERT(0); } return err; }
/* * pmalloc_construct -- allocates a new block of memory with a constructor * * The block offset is written persistently into the off variable, but only * after the constructor function has been called. * * If successful function returns zero. Otherwise an error number is returned. */ int pmalloc_construct(PMEMobjpool *pop, uint64_t *off, size_t size, void (*constructor)(PMEMobjpool *pop, void *ptr, size_t usable_size, void *arg), void *arg, uint64_t data_off) { int err = 0; struct lane_section *lane; if ((err = lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR)) != 0) return err; size_t sizeh = size + sizeof (struct allocation_header); struct bucket *b = heap_get_best_bucket(pop, sizeh); struct memory_block m = {0, 0, 0, 0}; m.size_idx = bucket_calc_units(b, sizeh); err = heap_get_bestfit_block(pop, b, &m); if (err == ENOMEM && !bucket_is_small(b)) goto out; /* there's only one huge bucket */ if (err == ENOMEM) { /* * There's no more available memory in the common heap and in * this lane cache, fallback to the auxiliary (shared) bucket. */ b = heap_get_auxiliary_bucket(pop, sizeh); err = heap_get_bestfit_block(pop, b, &m); } if (err == ENOMEM) { /* * The auxiliary bucket cannot satisfy our request, borrow * memory from other caches. */ heap_drain_to_auxiliary(pop, b, m.size_idx); err = heap_get_bestfit_block(pop, b, &m); } if (err == ENOMEM) { /* we are completely out of memory */ goto out; } /* * Now that the memory is reserved we can go ahead with making the * allocation persistent. */ uint64_t real_size = bucket_unit_size(b) * m.size_idx; err = persist_alloc(pop, lane, m, real_size, off, constructor, arg, data_off); out: if (lane_release(pop) != 0) { ERR("Failed to release the lane"); ASSERT(0); } return err; }