/* * huge_prep_operation_hdr -- prepares the new value of a chunk header that will * be set after the operation concludes. */ static void huge_prep_operation_hdr(const struct memory_block *m, enum memblock_state op, struct operation_context *ctx) { struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m); /* * Depending on the operation that needs to be performed a new chunk * header needs to be prepared with the new chunk state. */ uint64_t val = chunk_get_chunk_hdr_value( op == MEMBLOCK_ALLOCATED ? CHUNK_TYPE_USED : CHUNK_TYPE_FREE, hdr->flags, m->size_idx); if (ctx == NULL) { util_atomic_store_explicit64((uint64_t *)hdr, val, memory_order_relaxed); pmemops_persist(&m->heap->p_ops, hdr, sizeof(*hdr)); } else { operation_add_entry(ctx, hdr, val, ULOG_OPERATION_SET); } VALGRIND_DO_MAKE_MEM_NOACCESS(hdr + 1, (hdr->size_idx - 1) * sizeof(struct chunk_header)); /* * In the case of chunks larger than one unit the footer must be * created immediately AFTER the persistent state is safely updated. */ if (m->size_idx == 1) return; struct chunk_header *footer = hdr + m->size_idx - 1; VALGRIND_DO_MAKE_MEM_UNDEFINED(footer, sizeof(*footer)); val = chunk_get_chunk_hdr_value(CHUNK_TYPE_FOOTER, 0, m->size_idx); /* * It's only safe to write the footer AFTER the persistent part of * the operation have been successfully processed because the footer * pointer might point to a currently valid persistent state * of a different chunk. * The footer entry change is updated as transient because it will * be recreated at heap boot regardless - it's just needed for runtime * operations. */ if (ctx == NULL) { util_atomic_store_explicit64((uint64_t *)footer, val, memory_order_relaxed); VALGRIND_SET_CLEAN(footer, sizeof(*footer)); } else { operation_add_typed_entry(ctx, footer, val, ULOG_OPERATION_SET, LOG_TRANSIENT); } }
/* * heap_get_block_header -- returns the header of the memory block */ void * heap_get_block_header(PMEMobjpool *pop, struct memory_block m, enum heap_op op, uint64_t *op_result) { struct zone *z = &pop->heap->layout->zones[m.zone_id]; struct chunk_header *hdr = &z->chunk_headers[m.chunk_id]; if (hdr->type != CHUNK_TYPE_RUN) { *op_result = chunk_get_chunk_hdr_value(*hdr, op == HEAP_OP_ALLOC ? CHUNK_TYPE_USED : CHUNK_TYPE_FREE, m.size_idx); heap_chunk_write_footer(hdr, m.size_idx); return hdr; } struct chunk_run *r = (struct chunk_run *)&z->chunks[m.chunk_id]; uint64_t bmask = ((1L << m.size_idx) - 1L) << (m.block_off % BITS_PER_VALUE); int bpos = m.block_off / BITS_PER_VALUE; if (op == HEAP_OP_FREE) *op_result = r->bitmap[bpos] & ~bmask; else *op_result = r->bitmap[bpos] | bmask; return &r->bitmap[bpos]; }
/* * huge_prep_operation_hdr -- prepares the new value of a chunk header that will * be set after the operation concludes. */ static void huge_prep_operation_hdr(const struct memory_block *m, enum memblock_state op, struct operation_context *ctx) { struct zone *z = ZID_TO_ZONE(m->heap->layout, m->zone_id); struct chunk_header *hdr = &z->chunk_headers[m->chunk_id]; /* * Depending on the operation that needs to be performed a new chunk * header needs to be prepared with the new chunk state. */ uint64_t val = chunk_get_chunk_hdr_value( op == MEMBLOCK_ALLOCATED ? CHUNK_TYPE_USED : CHUNK_TYPE_FREE, hdr->flags, m->size_idx); operation_add_entry(ctx, hdr, val, OPERATION_SET); VALGRIND_DO_MAKE_MEM_NOACCESS(hdr + 1, (hdr->size_idx - 1) * sizeof(struct chunk_header)); /* * In the case of chunks larger than one unit the footer must be * created immediately AFTER the persistent state is safely updated. */ if (m->size_idx == 1) return; struct chunk_header *footer = hdr + m->size_idx - 1; VALGRIND_DO_MAKE_MEM_UNDEFINED(footer, sizeof(*footer)); val = chunk_get_chunk_hdr_value(CHUNK_TYPE_FOOTER, 0, m->size_idx); /* * It's only safe to write the footer AFTER the persistent part of * the operation have been successfully processed because the footer * pointer might point to a currently valid persistent state * of a different chunk. * The footer entry change is updated as transient because it will * be recreated at heap boot regardless - it's just needed for runtime * operations. */ operation_add_typed_entry(ctx, footer, val, OPERATION_SET, ENTRY_TRANSIENT); }