/* * pmemobj_vg_register_object -- (internal) notify Valgrind about object */ static void pmemobj_vg_register_object(struct pmemobjpool *pop, PMEMoid oid, int is_root) { LOG(4, "pop %p oid.off 0x%016jx is_root %d", pop, oid.off, is_root); void *addr = pmemobj_direct(oid); size_t sz; if (is_root) sz = pmemobj_root_size(pop); else sz = pmemobj_alloc_usable_size(oid); size_t headers = sizeof (struct allocation_header) + OBJ_OOB_SIZE; VALGRIND_DO_MEMPOOL_ALLOC(pop, addr, sz); VALGRIND_DO_MAKE_MEM_DEFINED(pop, addr - headers, sz + headers); struct oob_header *oob = OOB_HEADER_FROM_PTR(addr); if (!is_root) /* no one should touch it */ VALGRIND_DO_MAKE_MEM_NOACCESS(pop, &oob->size, sizeof (oob->size)); /* no one should touch it */ VALGRIND_DO_MAKE_MEM_NOACCESS(pop, &oob->data.padding, sizeof (oob->data.padding)); }
/* * alloc_prep_block -- (internal) prepares a memory block for allocation * * Once the block is fully reserved and it's guaranteed that no one else will * be able to write to this memory region it is safe to write the allocation * header and call the object construction function. * * Because the memory block at this stage is only reserved in transient state * there's no need to worry about fail-safety of this method because in case * of a crash the memory will be back in the free blocks collection. */ static int alloc_prep_block(struct palloc_heap *heap, struct memory_block m, palloc_constr constructor, void *arg, uint64_t *offset_value) { void *block_data = heap_get_block_data(heap, m); void *userdatap = (char *)block_data + ALLOC_OFF; uint64_t unit_size = MEMBLOCK_OPS(AUTO, &m)-> block_size(&m, heap->layout); uint64_t real_size = unit_size * m.size_idx; ASSERT((uint64_t)block_data % ALLOC_BLOCK_SIZE == 0); ASSERT((uint64_t)userdatap % ALLOC_BLOCK_SIZE == 0); /* mark everything (including headers) as accessible */ VALGRIND_DO_MAKE_MEM_UNDEFINED(block_data, real_size); /* mark space as allocated */ VALGRIND_DO_MEMPOOL_ALLOC(heap->layout, userdatap, real_size - ALLOC_OFF); alloc_write_header(heap, block_data, m, real_size); int ret; if (constructor != NULL && (ret = constructor(heap->base, userdatap, real_size - ALLOC_OFF, arg)) != 0) { /* * If canceled, revert the block back to the free state in vg * machinery. Because the free operation is only performed on * the user data, the allocation header is made inaccessible * in a separate call. */ VALGRIND_DO_MEMPOOL_FREE(heap->layout, userdatap); VALGRIND_DO_MAKE_MEM_NOACCESS(block_data, ALLOC_OFF); /* * During this method there are several stores to pmem that are * not immediately flushed and in case of a cancellation those * stores are no longer relevant anyway. */ VALGRIND_SET_CLEAN(block_data, ALLOC_OFF); return ret; } /* flushes both the alloc and oob headers */ pmemops_persist(&heap->p_ops, block_data, ALLOC_OFF); /* * To avoid determining the user data pointer twice this method is also * responsible for calculating the offset of the object in the pool that * will be used to set the offset destination pointer provided by the * caller. */ *offset_value = PMALLOC_PTR_TO_OFF(heap, userdatap); return 0; }
/* * persist_alloc -- (internal) performs a persistent allocation of the * memory block previously reserved by volatile bucket */ static int persist_alloc(PMEMobjpool *pop, struct lane_section *lane, struct memory_block m, uint64_t real_size, uint64_t *off, void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg), void *arg, uint64_t data_off) { int err; #ifdef DEBUG if (heap_block_is_allocated(pop, m)) { ERR("heap corruption"); ASSERT(0); } #endif /* DEBUG */ uint64_t op_result = 0; void *block_data = heap_get_block_data(pop, m); void *datap = (char *)block_data + sizeof (struct allocation_header); void *userdatap = (char *)datap + data_off; ASSERT((uint64_t)block_data % _POBJ_CL_ALIGNMENT == 0); /* mark everything (including headers) as accessible */ VALGRIND_DO_MAKE_MEM_UNDEFINED(pop, block_data, real_size); /* mark space as allocated */ VALGRIND_DO_MEMPOOL_ALLOC(pop, userdatap, real_size - sizeof (struct allocation_header) - data_off); alloc_write_header(pop, block_data, m.chunk_id, m.zone_id, real_size); if (constructor != NULL) constructor(pop, userdatap, arg); if ((err = heap_lock_if_run(pop, m)) != 0) { VALGRIND_DO_MEMPOOL_FREE(pop, userdatap); return err; } void *hdr = heap_get_block_header(pop, m, HEAP_OP_ALLOC, &op_result); struct allocator_lane_section *sec = (struct allocator_lane_section *)lane->layout; redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET, pop_offset(pop, off), pop_offset(pop, datap)); redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER, pop_offset(pop, hdr), op_result); redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO); if (heap_unlock_if_run(pop, m) != 0) { ERR("Failed to release run lock"); ASSERT(0); } return err; }
/* * palloc_vg_register_object -- registers object in Valgrind */ void palloc_vg_register_object(struct palloc_heap *heap, void *addr, size_t size) { size_t headers = sizeof(struct allocation_header) + PALLOC_DATA_OFF; VALGRIND_DO_MEMPOOL_ALLOC(heap->layout, addr, size); VALGRIND_DO_MAKE_MEM_DEFINED((char *)addr - headers, size + headers); }
/* * palloc_vg_register_alloc -- (internal) registers allocation header * in Valgrind */ static int palloc_vg_register_alloc(const struct memory_block *m, void *arg) { struct palloc_heap *heap = arg; m->m_ops->reinit_header(m); void *uptr = m->m_ops->get_user_data(m); size_t usize = m->m_ops->get_user_size(m); VALGRIND_DO_MEMPOOL_ALLOC(heap->layout, uptr, usize); VALGRIND_DO_MAKE_MEM_DEFINED(uptr, usize); return 0; }
/* * alloc_prep_block -- (internal) prepares a memory block for allocation * * Once the block is fully reserved and it's guaranteed that no one else will * be able to write to this memory region it is safe to write the allocation * header and call the object construction function. * * Because the memory block at this stage is only reserved in transient state * there's no need to worry about fail-safety of this method because in case * of a crash the memory will be back in the free blocks collection. */ static int alloc_prep_block(struct palloc_heap *heap, const struct memory_block *m, palloc_constr constructor, void *arg, uint64_t extra_field, uint16_t object_flags, uint64_t *offset_value) { void *uptr = m->m_ops->get_user_data(m); size_t usize = m->m_ops->get_user_size(m); VALGRIND_DO_MEMPOOL_ALLOC(heap->layout, uptr, usize); VALGRIND_DO_MAKE_MEM_UNDEFINED(uptr, usize); VALGRIND_ANNOTATE_NEW_MEMORY(uptr, usize); int ret; if (constructor != NULL && (ret = constructor(heap->base, uptr, usize, arg)) != 0) { /* * If canceled, revert the block back to the free state in vg * machinery. */ VALGRIND_DO_MEMPOOL_FREE(heap->layout, uptr); return ret; } m->m_ops->write_header(m, extra_field, object_flags); /* * To avoid determining the user data pointer twice this method is also * responsible for calculating the offset of the object in the pool that * will be used to set the offset destination pointer provided by the * caller. */ *offset_value = HEAP_PTR_TO_OFF(heap, uptr); return 0; }