/* * operation_add_entry -- adds new entry to the current operation with * entry type autodetected based on the memory location */ void operation_add_entry(struct operation_context *ctx, void *ptr, uint64_t value, enum operation_type type) { operation_add_typed_entry(ctx, ptr, value, type, OBJ_PTR_IS_VALID(ctx->pop, ptr) ? ENTRY_PERSISTENT : ENTRY_TRANSIENT); }
/* * huge_prep_operation_hdr -- prepares the new value of a chunk header that will * be set after the operation concludes. */ static void huge_prep_operation_hdr(const struct memory_block *m, enum memblock_state op, struct operation_context *ctx) { struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m); /* * Depending on the operation that needs to be performed a new chunk * header needs to be prepared with the new chunk state. */ uint64_t val = chunk_get_chunk_hdr_value( op == MEMBLOCK_ALLOCATED ? CHUNK_TYPE_USED : CHUNK_TYPE_FREE, hdr->flags, m->size_idx); if (ctx == NULL) { util_atomic_store_explicit64((uint64_t *)hdr, val, memory_order_relaxed); pmemops_persist(&m->heap->p_ops, hdr, sizeof(*hdr)); } else { operation_add_entry(ctx, hdr, val, ULOG_OPERATION_SET); } VALGRIND_DO_MAKE_MEM_NOACCESS(hdr + 1, (hdr->size_idx - 1) * sizeof(struct chunk_header)); /* * In the case of chunks larger than one unit the footer must be * created immediately AFTER the persistent state is safely updated. */ if (m->size_idx == 1) return; struct chunk_header *footer = hdr + m->size_idx - 1; VALGRIND_DO_MAKE_MEM_UNDEFINED(footer, sizeof(*footer)); val = chunk_get_chunk_hdr_value(CHUNK_TYPE_FOOTER, 0, m->size_idx); /* * It's only safe to write the footer AFTER the persistent part of * the operation have been successfully processed because the footer * pointer might point to a currently valid persistent state * of a different chunk. * The footer entry change is updated as transient because it will * be recreated at heap boot regardless - it's just needed for runtime * operations. */ if (ctx == NULL) { util_atomic_store_explicit64((uint64_t *)footer, val, memory_order_relaxed); VALGRIND_SET_CLEAN(footer, sizeof(*footer)); } else { operation_add_typed_entry(ctx, footer, val, ULOG_OPERATION_SET, LOG_TRANSIENT); } }
/* * huge_prep_operation_hdr -- prepares the new value of a chunk header that will * be set after the operation concludes. */ static void huge_prep_operation_hdr(const struct memory_block *m, enum memblock_state op, struct operation_context *ctx) { struct zone *z = ZID_TO_ZONE(m->heap->layout, m->zone_id); struct chunk_header *hdr = &z->chunk_headers[m->chunk_id]; /* * Depending on the operation that needs to be performed a new chunk * header needs to be prepared with the new chunk state. */ uint64_t val = chunk_get_chunk_hdr_value( op == MEMBLOCK_ALLOCATED ? CHUNK_TYPE_USED : CHUNK_TYPE_FREE, hdr->flags, m->size_idx); operation_add_entry(ctx, hdr, val, OPERATION_SET); VALGRIND_DO_MAKE_MEM_NOACCESS(hdr + 1, (hdr->size_idx - 1) * sizeof(struct chunk_header)); /* * In the case of chunks larger than one unit the footer must be * created immediately AFTER the persistent state is safely updated. */ if (m->size_idx == 1) return; struct chunk_header *footer = hdr + m->size_idx - 1; VALGRIND_DO_MAKE_MEM_UNDEFINED(footer, sizeof(*footer)); val = chunk_get_chunk_hdr_value(CHUNK_TYPE_FOOTER, 0, m->size_idx); /* * It's only safe to write the footer AFTER the persistent part of * the operation have been successfully processed because the footer * pointer might point to a currently valid persistent state * of a different chunk. * The footer entry change is updated as transient because it will * be recreated at heap boot regardless - it's just needed for runtime * operations. */ operation_add_typed_entry(ctx, footer, val, OPERATION_SET, ENTRY_TRANSIENT); }