/* * constructor_zrealloc_root -- (internal) constructor for pmemobj_root */ static void constructor_zrealloc_root(PMEMobjpool *pop, void *ptr, size_t usable_size, void *arg) { LOG(3, "pop %p ptr %p arg %p", pop, ptr, arg); ASSERTne(ptr, NULL); ASSERTne(arg, NULL); struct carg_realloc *carg = arg; VALGRIND_ADD_TO_TX(OOB_HEADER_FROM_PTR(ptr), usable_size + OBJ_OOB_SIZE); constructor_realloc(pop, ptr, usable_size, arg); /* activate the padding redzone */ VALGRIND_DO_MAKE_MEM_NOACCESS(pop, &OOB_HEADER_FROM_PTR(ptr)->data.padding, sizeof (OOB_HEADER_FROM_PTR(ptr)->data.padding)); if (carg->constructor) carg->constructor(pop, ptr, carg->arg); VALGRIND_REMOVE_FROM_TX(OOB_HEADER_FROM_PTR(ptr), carg->new_size + OBJ_OOB_SIZE); }
/* * pmemobj_vg_register_object -- (internal) notify Valgrind about object */ static void pmemobj_vg_register_object(struct pmemobjpool *pop, PMEMoid oid, int is_root) { LOG(4, "pop %p oid.off 0x%016jx is_root %d", pop, oid.off, is_root); void *addr = pmemobj_direct(oid); size_t sz; if (is_root) sz = pmemobj_root_size(pop); else sz = pmemobj_alloc_usable_size(oid); size_t headers = sizeof (struct allocation_header) + OBJ_OOB_SIZE; VALGRIND_DO_MEMPOOL_ALLOC(pop, addr, sz); VALGRIND_DO_MAKE_MEM_DEFINED(pop, addr - headers, sz + headers); struct oob_header *oob = OOB_HEADER_FROM_PTR(addr); if (!is_root) /* no one should touch it */ VALGRIND_DO_MAKE_MEM_NOACCESS(pop, &oob->size, sizeof (oob->size)); /* no one should touch it */ VALGRIND_DO_MAKE_MEM_NOACCESS(pop, &oob->data.padding, sizeof (oob->data.padding)); }
/* * constructor_alloc_root -- (internal) constructor for obj_alloc_root */ static void constructor_alloc_root(PMEMobjpool *pop, void *ptr, void *arg) { LOG(3, "pop %p ptr %p arg %p", pop, ptr, arg); ASSERTne(ptr, NULL); ASSERTne(arg, NULL); struct oob_header *ro = OOB_HEADER_FROM_PTR(ptr); struct carg_root *carg = arg; /* temporarily add atomic root allocation to pmemcheck transaction */ VALGRIND_ADD_TO_TX(ro, OBJ_OOB_SIZE + carg->size); if (carg->constructor) carg->constructor(pop, ptr, carg->arg); else pop->memset_persist(pop, ptr, 0, carg->size); ro->data.internal_type = TYPE_ALLOCATED; ro->data.user_type = POBJ_ROOT_TYPE_NUM; ro->size = carg->size; VALGRIND_REMOVE_FROM_TX(ro, OBJ_OOB_SIZE + carg->size); pop->persist(pop, &ro->size, /* there's no padding between these, so we can add sizes */ sizeof (ro->size) + sizeof (ro->data.internal_type) + sizeof (ro->data.user_type)); VALGRIND_DO_MAKE_MEM_NOACCESS(pop, &ro->data.padding, sizeof (ro->data.padding)); }
/* * constructor_zrealloc -- (internal) constructor for pmemobj_zrealloc */ static void constructor_zrealloc(PMEMobjpool *pop, void *ptr, void *arg) { LOG(3, "pop %p ptr %p arg %p", pop, ptr, arg); ASSERTne(ptr, NULL); ASSERTne(arg, NULL); struct carg_realloc *carg = arg; struct oob_header *pobj = OOB_HEADER_FROM_PTR(ptr); if (ptr != carg->ptr) { size_t cpy_size = carg->new_size > carg->old_size ? carg->old_size : carg->new_size; pop->memcpy_persist(pop, ptr, carg->ptr, cpy_size); pobj->data.internal_type = TYPE_ALLOCATED; pobj->data.user_type = carg->user_type; pop->persist(pop, &pobj->data.internal_type, /* there's no padding between these, so we can add sizes */ sizeof (pobj->data.internal_type) + sizeof (pobj->data.user_type)); VALGRIND_DO_MAKE_MEM_NOACCESS(pop, &pobj->data.padding, sizeof (pobj->data.padding)); } if (carg->new_size > carg->old_size) { size_t grow_len = carg->new_size - carg->old_size; void *new_data_ptr = (void *)((uintptr_t)ptr + carg->old_size); pop->memset_persist(pop, new_data_ptr, 0, grow_len); } }
/* * constructor_alloc_bytype -- (internal) constructor for obj_alloc_construct */ static void constructor_alloc_bytype(PMEMobjpool *pop, void *ptr, size_t usable_size, void *arg) { LOG(3, "pop %p ptr %p arg %p", pop, ptr, arg); ASSERTne(ptr, NULL); ASSERTne(arg, NULL); struct oob_header *pobj = OOB_HEADER_FROM_PTR(ptr); struct carg_bytype *carg = arg; pobj->data.internal_type = TYPE_ALLOCATED; pobj->data.user_type = carg->user_type; pop->flush(pop, &pobj->data.internal_type, /* there's no padding between these, so we can add sizes */ sizeof (pobj->data.internal_type) + sizeof (pobj->data.user_type)); if (carg->zero_init) pop->memset_persist(pop, ptr, 0, usable_size); else pop->drain(pop); VALGRIND_DO_MAKE_MEM_NOACCESS(pop, &pobj->data.padding, sizeof (pobj->data.padding)); if (carg->constructor) carg->constructor(pop, ptr, carg->arg); }
/* * pmalloc_operation -- higher level wrapper for basic allocator API * * If successful function returns zero. Otherwise an error number is returned. */ int pmalloc_operation(struct palloc_heap *heap, uint64_t off, uint64_t *dest_off, size_t size, palloc_constr constructor, void *arg, struct operation_context *ctx) { #ifdef USE_VG_MEMCHECK uint64_t tmp; if (size && On_valgrind && dest_off == NULL) dest_off = &tmp; #endif int ret = palloc_operation(heap, off, dest_off, size, constructor, arg, ctx); if (ret) return ret; #ifdef USE_VG_MEMCHECK if (size && On_valgrind) { struct oob_header *pobj = OOB_HEADER_FROM_PTR((char *)heap->base + *dest_off); /* * The first few bytes of the oobh are unused and double as * an object guard which will cause valgrind to issue an error * whenever the unused memory is accessed. */ VALGRIND_DO_MAKE_MEM_NOACCESS(pobj->unused, sizeof(pobj->unused)); } #endif return 0; }
/* * constructor_zrealloc_root -- (internal) constructor for pmemobj_root */ static void constructor_zrealloc_root(PMEMobjpool *pop, void *ptr, void *arg) { LOG(3, "pop %p ptr %p arg %p", pop, ptr, arg); ASSERTne(ptr, NULL); ASSERTne(arg, NULL); struct carg_realloc *carg = arg; VALGRIND_ADD_TO_TX(OOB_HEADER_FROM_PTR(ptr), carg->new_size + OBJ_OOB_SIZE); constructor_zrealloc(pop, ptr, arg); if (carg->constructor) carg->constructor(pop, ptr, carg->arg); VALGRIND_REMOVE_FROM_TX(OOB_HEADER_FROM_PTR(ptr), carg->new_size + OBJ_OOB_SIZE); }
/* * pmemobj_type_num -- returns type number of object */ int pmemobj_type_num(PMEMoid oid) { LOG(3, "oid.off 0x%016jx", oid.off); if (OBJ_OID_IS_NULL(oid)) return -1; void *ptr = pmemobj_direct(oid); struct oob_header *oobh = OOB_HEADER_FROM_PTR(ptr); return oobh->data.user_type; }