/* * pfree -- deallocates a memory block previously allocated by pmalloc * * A zero value is written persistently into the off variable. * * If successful function returns zero. Otherwise an error number is returned. */ void pfree(PMEMobjpool *pop, uint64_t *off, uint64_t data_off) { struct allocation_header *alloc = alloc_get_header(pop, *off); struct lane_section *lane; lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR); struct bucket *b = heap_get_chunk_bucket(pop, alloc->chunk_id, alloc->zone_id); struct memory_block m = get_mblock_from_alloc(pop, alloc); #ifdef DEBUG if (!heap_block_is_allocated(pop, m)) { ERR("Double free or heap corruption"); ASSERT(0); } #endif /* DEBUG */ heap_lock_if_run(pop, m); uint64_t op_result; void *hdr; struct memory_block res = heap_free_block(pop, b, m, &hdr, &op_result); struct allocator_lane_section *sec = (struct allocator_lane_section *)lane->layout; redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET, pop_offset(pop, off), 0); redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER, pop_offset(pop, hdr), op_result); redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO); heap_unlock_if_run(pop, m); VALGRIND_DO_MEMPOOL_FREE(pop, (char *)alloc + sizeof (*alloc) + data_off); /* we might have been operating on inactive run */ if (b != NULL) { CNT_OP(b, insert, pop, res); if (b->type == BUCKET_RUN) heap_degrade_run_if_empty(pop, b, res); } lane_release(pop); }
int pfree(PMEMobjpool *pop, uint64_t *off) { struct allocation_header *alloc = alloc_get_header(pop, *off); struct bucket *b = heap_get_best_bucket(pop, alloc->size); int err = 0; struct memory_block m = get_mblock_from_alloc(pop, b, alloc); if ((err = heap_lock_if_run(pop, m)) != 0) return err; #ifdef _EAP_ALLOC_OPTIMIZE //fprintf(stderr,"_EAP_ALLOC_OPTIMIZE\n"); if(is_alloc_free_opt_enable(alloc->size)) { goto error_lane_hold; //goto temphere; }else { //printf("Relaxing allocs %zu\n", alloc->size); } #endif uint64_t op_result; void *hdr; struct memory_block res = heap_free_block(pop, b, m, &hdr, &op_result); struct lane_section *lane; if ((err = lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR)) != 0) goto error_lane_hold; struct allocator_lane_section *sec = (struct allocator_lane_section *)lane->layout; redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET, pop_offset(pop, off), 0); redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER, pop_offset(pop, hdr), op_result); redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO); if (lane_release(pop) != 0) { ERR("Failed to release the lane"); ASSERT(0); } #ifdef _EAP_ALLOC_OPTIMIZE goto temphere; temphere: // if(is_alloc_free_opt_enable(alloc->size)) // goto error_lane_hold; #endif /* * There's no point in rolling back redo log changes because the * volatile errors don't break the persistent state. */ if (bucket_insert_block(b, res) != 0) { ERR("Failed to update the heap volatile state"); ASSERT(0); } if (heap_unlock_if_run(pop, m) != 0) { ERR("Failed to release run lock"); ASSERT(0); } if (bucket_is_small(b) && heap_degrade_run_if_empty(pop, b, res) != 0) { ERR("Failed to degrade run"); ASSERT(0); } return 0; error_lane_hold: if (heap_unlock_if_run(pop, m) != 0) { ERR("Failed to release run lock"); ASSERT(0); } return err; }
/* * pmalloc_usable_size -- returns the number of bytes in the memory block */ size_t pmalloc_usable_size(PMEMobjpool *pop, uint64_t off) { return alloc_get_header(pop, off)->size - sizeof (struct allocation_header); }
/* * prealloc_construct -- resizes an existing memory block with a constructor * * The block offset is written persistently into the off variable, but only * after the constructor function has been called. * * If successful function returns zero. Otherwise an error number is returned. */ int prealloc_construct(PMEMobjpool *pop, uint64_t *off, size_t size, void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg), void *arg, uint64_t data_off) { if (size <= pmalloc_usable_size(pop, *off)) return 0; size_t sizeh = size + sizeof (struct allocation_header); int err = 0; struct allocation_header *alloc = alloc_get_header(pop, *off); struct bucket *b = heap_get_best_bucket(pop, alloc->size); uint32_t add_size_idx = bucket_calc_units(b, sizeh - alloc->size); uint32_t new_size_idx = bucket_calc_units(b, sizeh); uint64_t real_size = new_size_idx * bucket_unit_size(b); struct memory_block cnt = get_mblock_from_alloc(pop, b, alloc); if ((err = heap_lock_if_run(pop, cnt)) != 0) return err; struct memory_block next = {0}; if ((err = heap_get_adjacent_free_block(pop, &next, cnt, 0)) != 0) goto error; if (next.size_idx < add_size_idx) { err = ENOMEM; goto error; } if ((err = heap_get_exact_block(pop, b, &next, add_size_idx)) != 0) goto error; struct memory_block *blocks[2] = {&cnt, &next}; uint64_t op_result; void *hdr; struct memory_block m = heap_coalesce(pop, blocks, 2, HEAP_OP_ALLOC, &hdr, &op_result); void *block_data = heap_get_block_data(pop, m); void *datap = block_data + sizeof (struct allocation_header); if (constructor != NULL) constructor(pop, datap + data_off, arg); struct lane_section *lane; if ((err = lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR)) != 0) goto error; struct allocator_lane_section *sec = (struct allocator_lane_section *)lane->layout; redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET, pop_offset(pop, &alloc->size), real_size); redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER, pop_offset(pop, hdr), op_result); redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO); if (lane_release(pop) != 0) { ERR("Failed to release the lane"); ASSERT(0); } if (heap_unlock_if_run(pop, cnt) != 0) { ERR("Failed to release run lock"); ASSERT(0); } return 0; error: if (heap_unlock_if_run(pop, cnt) != 0) { ERR("Failed to release run lock"); ASSERT(0); } return err; }
/* * pfree -- deallocates a memory block previously allocated by pmalloc * * A zero value is written persistently into the off variable. * * If successful function returns zero. Otherwise an error number is returned. */ int pfree(PMEMobjpool *pop, uint64_t *off, uint64_t data_off) { struct allocation_header *alloc = alloc_get_header(pop, *off); int err = 0; struct lane_section *lane; if ((err = lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR)) != 0) return err; struct bucket *b = heap_get_chunk_bucket(pop, alloc->chunk_id, alloc->zone_id); struct memory_block m = get_mblock_from_alloc(pop, b, alloc); #ifdef DEBUG if (!heap_block_is_allocated(pop, m)) { ERR("Double free or heap corruption"); ASSERT(0); } #endif /* DEBUG */ if ((err = heap_lock_if_run(pop, m)) != 0) goto out; uint64_t op_result; void *hdr; struct memory_block res = heap_free_block(pop, b, m, &hdr, &op_result); struct allocator_lane_section *sec = (struct allocator_lane_section *)lane->layout; redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET, pop_offset(pop, off), 0); redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER, pop_offset(pop, hdr), op_result); redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO); if (heap_unlock_if_run(pop, m) != 0) { ERR("Failed to release run lock"); ASSERT(0); } VALGRIND_DO_MEMPOOL_FREE(pop, (char *)alloc + sizeof (*alloc) + data_off); bucket_insert_block(pop, b, res); if (bucket_is_small(b) && heap_degrade_run_if_empty(pop, b, res) != 0) { ERR("Failed to degrade run"); ASSERT(0); } out: if (lane_release(pop) != 0) { ERR("Failed to release the lane"); ASSERT(0); } return err; }
/* * prealloc_construct -- resizes an existing memory block with a constructor * * The block offset is written persistently into the off variable, but only * after the constructor function has been called. * * If successful function returns zero. Otherwise an error number is returned. */ int prealloc_construct(PMEMobjpool *pop, uint64_t *off, size_t size, void (*constructor)(PMEMobjpool *pop, void *ptr, size_t usable_size, void *arg), void *arg, uint64_t data_off) { if (size <= pmalloc_usable_size(pop, *off)) return 0; size_t sizeh = size + sizeof (struct allocation_header); int err; struct allocation_header *alloc = alloc_get_header(pop, *off); struct lane_section *lane; lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR); struct bucket *b = heap_get_best_bucket(pop, alloc->size); uint32_t add_size_idx = b->calc_units(b, sizeh - alloc->size); uint32_t new_size_idx = b->calc_units(b, sizeh); uint64_t real_size = new_size_idx * b->unit_size; struct memory_block cnt = get_mblock_from_alloc(pop, alloc); heap_lock_if_run(pop, cnt); struct memory_block next = {0, 0, 0, 0}; if ((err = heap_get_adjacent_free_block(pop, b, &next, cnt, 0)) != 0) goto out; if (next.size_idx < add_size_idx) { err = ENOMEM; goto out; } if ((err = heap_get_exact_block(pop, b, &next, add_size_idx)) != 0) goto out; struct memory_block *blocks[2] = {&cnt, &next}; uint64_t op_result; void *hdr; struct memory_block m = heap_coalesce(pop, blocks, 2, HEAP_OP_ALLOC, &hdr, &op_result); void *block_data = heap_get_block_data(pop, m); void *datap = (char *)block_data + sizeof (struct allocation_header); void *userdatap = (char *)datap + data_off; /* mark new part as accessible and undefined */ VALGRIND_DO_MAKE_MEM_UNDEFINED(pop, (char *)block_data + alloc->size, real_size - alloc->size); /* resize allocated space */ VALGRIND_DO_MEMPOOL_CHANGE(pop, userdatap, userdatap, real_size - sizeof (struct allocation_header) - data_off); if (constructor != NULL) constructor(pop, userdatap, real_size - sizeof (struct allocation_header) - data_off, arg); struct allocator_lane_section *sec = (struct allocator_lane_section *)lane->layout; redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET, pop_offset(pop, &alloc->size), real_size); redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER, pop_offset(pop, hdr), op_result); redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO); out: heap_unlock_if_run(pop, cnt); lane_release(pop); return err; }
/* * pfree -- deallocates a memory block previously allocated by pmalloc * * A zero value is written persistently into the off variable. * * If successful function returns zero. Otherwise an error number is returned. */ int pfree(PMEMobjpool *pop, uint64_t *off) { struct allocation_header *alloc = alloc_get_header(pop, *off); int err = 0; struct lane_section *lane; if ((err = lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR)) != 0) return err; struct bucket *b = heap_get_best_bucket(pop, alloc->size); struct memory_block m = get_mblock_from_alloc(pop, b, alloc); #ifdef DEBUG if (!heap_block_is_allocated(pop, m)) { ERR("Double free or heap corruption"); ASSERT(0); } #endif /* DEBUG */ if ((err = heap_lock_if_run(pop, m)) != 0) goto out; uint64_t op_result; void *hdr; struct memory_block res = heap_free_block(pop, b, m, &hdr, &op_result); struct allocator_lane_section *sec = (struct allocator_lane_section *)lane->layout; redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET, pop_offset(pop, off), 0); redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER, pop_offset(pop, hdr), op_result); redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO); /* * There's no point in rolling back redo log changes because the * volatile errors don't break the persistent state. */ if (bucket_insert_block(b, res) != 0) { ERR("Failed to update the heap volatile state"); ASSERT(0); } if (heap_unlock_if_run(pop, m) != 0) { ERR("Failed to release run lock"); ASSERT(0); } if (bucket_is_small(b) && heap_degrade_run_if_empty(pop, b, res) != 0) { ERR("Failed to degrade run"); ASSERT(0); } out: if (lane_release(pop) != 0) { ERR("Failed to release the lane"); ASSERT(0); } return err; }