/* * heap_coalesce -- merges adjacent memory blocks */ struct memory_block heap_coalesce(PMEMobjpool *pop, struct memory_block *blocks[], int n, enum heap_op op, void **hdr, uint64_t *op_result) { struct memory_block ret; struct memory_block *b = NULL; ret.size_idx = 0; for (int i = 0; i < n; ++i) { if (blocks[i] == NULL) continue; b = b ? : blocks[i]; ret.size_idx += blocks[i] ? blocks[i]->size_idx : 0; } ASSERTne(b, NULL); ret.chunk_id = b->chunk_id; ret.zone_id = b->zone_id; ret.block_off = b->block_off; *hdr = heap_get_block_header(pop, ret, op, op_result); return ret; }
/* * persist_alloc -- (internal) performs a persistent allocation of the * memory block previously reserved by volatile bucket */ static int persist_alloc(PMEMobjpool *pop, struct lane_section *lane, struct memory_block m, uint64_t real_size, uint64_t *off, void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg), void *arg, uint64_t data_off) { int err; #ifdef DEBUG if (heap_block_is_allocated(pop, m)) { ERR("heap corruption"); ASSERT(0); } #endif /* DEBUG */ uint64_t op_result = 0; void *block_data = heap_get_block_data(pop, m); void *datap = (char *)block_data + sizeof (struct allocation_header); void *userdatap = (char *)datap + data_off; ASSERT((uint64_t)block_data % _POBJ_CL_ALIGNMENT == 0); /* mark everything (including headers) as accessible */ VALGRIND_DO_MAKE_MEM_UNDEFINED(pop, block_data, real_size); /* mark space as allocated */ VALGRIND_DO_MEMPOOL_ALLOC(pop, userdatap, real_size - sizeof (struct allocation_header) - data_off); alloc_write_header(pop, block_data, m.chunk_id, m.zone_id, real_size); if (constructor != NULL) constructor(pop, userdatap, arg); if ((err = heap_lock_if_run(pop, m)) != 0) { VALGRIND_DO_MEMPOOL_FREE(pop, userdatap); return err; } void *hdr = heap_get_block_header(pop, m, HEAP_OP_ALLOC, &op_result); struct allocator_lane_section *sec = (struct allocator_lane_section *)lane->layout; redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET, pop_offset(pop, off), pop_offset(pop, datap)); redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER, pop_offset(pop, hdr), op_result); redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO); if (heap_unlock_if_run(pop, m) != 0) { ERR("Failed to release run lock"); ASSERT(0); } return err; }
/* * pmalloc_construct -- allocates a new block of memory with a constructor * * The block offset is written persistently into the off variable, but only * after the constructor function has been called. * * If successful function returns zero. Otherwise an error number is returned. */ int pmalloc_construct(PMEMobjpool *pop, uint64_t *off, size_t size, void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg), void *arg, uint64_t data_off) { size_t sizeh = size + sizeof (struct allocation_header); struct bucket *b = heap_get_best_bucket(pop, sizeh); int err = 0; uint32_t units = bucket_calc_units(b, sizeh); struct memory_block m = {0, 0, units, 0}; if ((err = heap_get_bestfit_block(pop, b, &m)) != 0) return err; uint64_t op_result = 0; void *block_data = heap_get_block_data(pop, m); void *datap = block_data + sizeof (struct allocation_header); ASSERT((uint64_t)block_data % _POBJ_CL_ALIGNMENT == 0); uint64_t real_size = bucket_unit_size(b) * units; alloc_write_header(pop, block_data, m.chunk_id, m.zone_id, real_size); if (constructor != NULL) constructor(pop, datap + data_off, arg); if ((err = heap_lock_if_run(pop, m)) != 0) return err; void *hdr = heap_get_block_header(pop, m, HEAP_OP_ALLOC, &op_result); struct lane_section *lane; if ((err = lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR)) != 0) goto err_lane_hold; struct allocator_lane_section *sec = (struct allocator_lane_section *)lane->layout; redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET, pop_offset(pop, off), pop_offset(pop, datap)); redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER, pop_offset(pop, hdr), op_result); redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO); if (lane_release(pop) != 0) { ERR("Failed to release the lane"); ASSERT(0); } if (heap_unlock_if_run(pop, m) != 0) { ERR("Failed to release run lock"); ASSERT(0); } return 0; err_lane_hold: if (heap_unlock_if_run(pop, m) != 0) { ERR("Failed to release run lock"); ASSERT(0); } if (bucket_insert_block(b, m) != 0) { ERR("Failed to recover heap volatile state"); ASSERT(0); } return err; }