/* * run_block_size -- looks for the right chunk and returns the block size * information that is attached to the run block metadata. */ static size_t run_block_size(const struct memory_block *m) { struct chunk_run *run = heap_get_chunk_run(m->heap, m); return run->hdr.block_size; }
/* * run_get_real_data -- returns pointer to the beginning data of a run block */ static void * run_get_real_data(const struct memory_block *m) { struct chunk_run *run = heap_get_chunk_run(m->heap, m); ASSERT(run->hdr.block_size != 0); return run_get_data_start(m) + (run->hdr.block_size * m->block_off); }
/* * run_get_real_data -- returns pointer to the beginning data of a run block */ static void * run_get_real_data(const struct memory_block *m) { struct chunk_run *run = heap_get_chunk_run(m->heap, m); struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m); ASSERT(run->block_size != 0); return run_get_data_start(hdr, run, m->header_type) + (run->block_size * m->block_off); }
/* * run_get_bitmap -- initializes run bitmap information */ static void run_get_bitmap(const struct memory_block *m, struct run_bitmap *b) { struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m); struct chunk_run *run = heap_get_chunk_run(m->heap, m); uint32_t size_idx = hdr->size_idx; memblock_run_bitmap(&size_idx, hdr->flags, run->hdr.block_size, run->hdr.alignment, run->content, b); ASSERTeq(size_idx, hdr->size_idx); }
/* * run_iterate_used -- iterates over used blocks in a run */ static int run_iterate_used(const struct memory_block *m, object_callback cb, void *arg) { uint32_t i = m->block_off / RUN_BITS_PER_VALUE; uint32_t block_start = m->block_off % RUN_BITS_PER_VALUE; uint32_t block_off; struct chunk_run *run = heap_get_chunk_run(m->heap, m); struct memory_block iter = *m; struct run_bitmap b; run_get_bitmap(m, &b); for (; i < b.nvalues; ++i) { uint64_t v = b.values[i]; block_off = (uint32_t)(RUN_BITS_PER_VALUE * i); for (uint32_t j = block_start; j < RUN_BITS_PER_VALUE; ) { if (block_off + j >= (uint32_t)b.nbits) break; if (!BIT_IS_CLR(v, j)) { iter.block_off = (uint32_t)(block_off + j); /* * The size index of this memory block cannot be * retrieved at this time because the header * might not be initialized in valgrind yet. */ iter.size_idx = 0; if (cb(&iter, arg) != 0) return 1; iter.size_idx = CALC_SIZE_IDX( run->hdr.block_size, iter.m_ops->get_real_size(&iter)); j = (uint32_t)(j + iter.size_idx); } else { ++j; } } block_start = 0; } return 0; }
/* * memblock_from_offset -- resolves a memory block data from an offset that * originates from the heap */ struct memory_block memblock_from_offset_opt(struct palloc_heap *heap, uint64_t off, int size) { struct memory_block m = MEMORY_BLOCK_NONE; m.heap = heap; off -= HEAP_PTR_TO_OFF(heap, &heap->layout->zone0); m.zone_id = (uint32_t)(off / ZONE_MAX_SIZE); off -= (ZONE_MAX_SIZE * m.zone_id) + sizeof(struct zone); m.chunk_id = (uint32_t)(off / CHUNKSIZE); struct chunk_header *hdr = heap_get_chunk_hdr(heap, &m); if (hdr->type == CHUNK_TYPE_RUN_DATA) m.chunk_id -= hdr->size_idx; off -= CHUNKSIZE * m.chunk_id; m.header_type = memblock_header_type(&m); off -= header_type_to_size[m.header_type]; m.type = off != 0 ? MEMORY_BLOCK_RUN : MEMORY_BLOCK_HUGE; ASSERTeq(memblock_detect_type(heap, &m), m.type); m.m_ops = &mb_ops[m.type]; uint64_t unit_size = m.m_ops->block_size(&m); if (off != 0) { /* run */ struct chunk_run *run = heap_get_chunk_run(heap, &m); off -= run_get_alignment_padding(hdr, run, m.header_type); off -= RUN_METASIZE; m.block_off = (uint16_t)(off / unit_size); off -= m.block_off * unit_size; } m.size_idx = !size ? 0 : CALC_SIZE_IDX(unit_size, memblock_header_ops[m.header_type].get_size(&m)); ASSERTeq(off, 0); return m; }
/* * run_prep_operation_hdr -- prepares the new value for a select few bytes of * a run bitmap that will be set after the operation concludes. * * It's VERY important to keep in mind that the particular value of the * bitmap this method is modifying must not be changed after this function * is called and before the operation is processed. */ static void run_prep_operation_hdr(const struct memory_block *m, enum memblock_state op, struct operation_context *ctx) { struct chunk_run *r = heap_get_chunk_run(m->heap, m); ASSERT(m->size_idx <= BITS_PER_VALUE); /* * Free blocks are represented by clear bits and used blocks by set * bits - which is the reverse of the commonly used scheme. * * Here a bit mask is prepared that flips the bits that represent the * memory block provided by the caller - because both the size index and * the block offset are tied 1:1 to the bitmap this operation is * relatively simple. */ uint64_t bmask; if (m->size_idx == BITS_PER_VALUE) { ASSERTeq(m->block_off % BITS_PER_VALUE, 0); bmask = UINT64_MAX; } else { bmask = ((1ULL << m->size_idx) - 1ULL) << (m->block_off % BITS_PER_VALUE); } /* * The run bitmap is composed of several 8 byte values, so a proper * element of the bitmap array must be selected. */ int bpos = m->block_off / BITS_PER_VALUE; /* the bit mask is applied immediately by the add entry operations */ if (op == MEMBLOCK_ALLOCATED) { operation_add_entry(ctx, &r->bitmap[bpos], bmask, REDO_OPERATION_OR); } else if (op == MEMBLOCK_FREE) { operation_add_entry(ctx, &r->bitmap[bpos], ~bmask, REDO_OPERATION_AND); } else { ASSERT(0); } }
/* * run_get_data_start -- (internal) returns the pointer to the beginning of * allocations in a run */ static char * run_get_data_start(const struct memory_block *m) { struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m); struct chunk_run *run = heap_get_chunk_run(m->heap, m); struct run_bitmap b; run_get_bitmap(m, &b); if (hdr->flags & CHUNK_FLAG_ALIGNED) { /* * Alignment is property of user data in allocations. And * since objects have headers, we need to take them into * account when calculating the address. */ uintptr_t hsize = header_type_to_size[m->header_type]; uintptr_t base = (uintptr_t)run->content + b.size + hsize; return (char *)(ALIGN_UP(base, run->hdr.alignment) - hsize); } else { return (char *)&run->content + b.size; } }
/* * run_vg_init -- initalizes run metadata in memcheck state */ static void run_vg_init(const struct memory_block *m, int objects, object_callback cb, void *arg) { struct zone *z = ZID_TO_ZONE(m->heap->layout, m->zone_id); struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m); struct chunk_run *run = heap_get_chunk_run(m->heap, m); VALGRIND_DO_MAKE_MEM_DEFINED(hdr, sizeof(*hdr)); /* set the run metadata as defined */ VALGRIND_DO_MAKE_MEM_DEFINED(run, RUN_BASE_METADATA_SIZE); struct run_bitmap b; run_get_bitmap(m, &b); /* * Mark run data headers as defined. */ for (unsigned j = 1; j < m->size_idx; ++j) { struct chunk_header *data_hdr = &z->chunk_headers[m->chunk_id + j]; VALGRIND_DO_MAKE_MEM_DEFINED(data_hdr, sizeof(struct chunk_header)); ASSERTeq(data_hdr->type, CHUNK_TYPE_RUN_DATA); } VALGRIND_DO_MAKE_MEM_NOACCESS(run, SIZEOF_RUN(run, m->size_idx)); /* set the run bitmap as defined */ VALGRIND_DO_MAKE_MEM_DEFINED(run, b.size + RUN_BASE_METADATA_SIZE); if (objects) { if (run_iterate_used(m, cb, arg) != 0) FATAL("failed to initialize valgrind state"); } }
/* * run_get_data_offset -- (internal) returns the number of bytes between * run base metadata and data */ static size_t run_get_data_offset(const struct memory_block *m) { struct chunk_run *run = heap_get_chunk_run(m->heap, m); return (size_t)run_get_data_start(m) - (size_t)&run->content; }
/* * memblock_huge_init -- initializes a new huge memory block */ struct memory_block memblock_huge_init(struct palloc_heap *heap, uint32_t chunk_id, uint32_t zone_id, uint32_t size_idx) { struct memory_block m = MEMORY_BLOCK_NONE; m.chunk_id = chunk_id; m.zone_id = zone_id; m.size_idx = size_idx; m.heap = heap; struct chunk_header nhdr = { .type = CHUNK_TYPE_FREE, .flags = 0, .size_idx = size_idx }; struct chunk_header *hdr = heap_get_chunk_hdr(heap, &m); VALGRIND_DO_MAKE_MEM_UNDEFINED(hdr, sizeof(*hdr)); VALGRIND_ANNOTATE_NEW_MEMORY(hdr, sizeof(*hdr)); *hdr = nhdr; /* write the entire header (8 bytes) at once */ pmemops_persist(&heap->p_ops, hdr, sizeof(*hdr)); huge_write_footer(hdr, size_idx); memblock_rebuild_state(heap, &m); return m; } /* * memblock_run_init -- initializes a new run memory block */ struct memory_block memblock_run_init(struct palloc_heap *heap, uint32_t chunk_id, uint32_t zone_id, uint32_t size_idx, uint16_t flags, uint64_t unit_size, uint64_t alignment) { ASSERTne(size_idx, 0); struct memory_block m = MEMORY_BLOCK_NONE; m.chunk_id = chunk_id; m.zone_id = zone_id; m.size_idx = size_idx; m.heap = heap; struct zone *z = ZID_TO_ZONE(heap->layout, zone_id); struct chunk_run *run = heap_get_chunk_run(heap, &m); size_t runsize = SIZEOF_RUN(run, size_idx); VALGRIND_DO_MAKE_MEM_UNDEFINED(run, runsize); /* add/remove chunk_run and chunk_header to valgrind transaction */ VALGRIND_ADD_TO_TX(run, runsize); run->hdr.block_size = unit_size; run->hdr.alignment = alignment; struct run_bitmap b; memblock_run_bitmap(&size_idx, flags, unit_size, alignment, run->content, &b); size_t bitmap_size = b.size; /* set all the bits */ memset(b.values, 0xFF, bitmap_size); /* clear only the bits available for allocations from this bucket */ memset(b.values, 0, sizeof(*b.values) * (b.nvalues - 1)); unsigned trailing_bits = b.nbits % RUN_BITS_PER_VALUE; uint64_t last_value = UINT64_MAX << trailing_bits; b.values[b.nvalues - 1] = last_value; VALGRIND_REMOVE_FROM_TX(run, runsize); pmemops_flush(&heap->p_ops, run, sizeof(struct chunk_run_header) + bitmap_size); struct chunk_header run_data_hdr; run_data_hdr.type = CHUNK_TYPE_RUN_DATA; run_data_hdr.flags = 0; VALGRIND_ADD_TO_TX(&z->chunk_headers[chunk_id], sizeof(struct chunk_header) * size_idx); struct chunk_header *data_hdr; for (unsigned i = 1; i < size_idx; ++i) { data_hdr = &z->chunk_headers[chunk_id + i]; VALGRIND_DO_MAKE_MEM_UNDEFINED(data_hdr, sizeof(*data_hdr)); VALGRIND_ANNOTATE_NEW_MEMORY(data_hdr, sizeof(*data_hdr)); run_data_hdr.size_idx = i; *data_hdr = run_data_hdr; } pmemops_persist(&heap->p_ops, &z->chunk_headers[chunk_id + 1], sizeof(struct chunk_header) * (size_idx - 1)); struct chunk_header *hdr = &z->chunk_headers[chunk_id]; ASSERT(hdr->type == CHUNK_TYPE_FREE); VALGRIND_ANNOTATE_NEW_MEMORY(hdr, sizeof(*hdr)); struct chunk_header run_hdr; run_hdr.size_idx = hdr->size_idx; run_hdr.type = CHUNK_TYPE_RUN; run_hdr.flags = flags; *hdr = run_hdr; pmemops_persist(&heap->p_ops, hdr, sizeof(*hdr)); VALGRIND_REMOVE_FROM_TX(&z->chunk_headers[chunk_id], sizeof(struct chunk_header) * size_idx); memblock_rebuild_state(heap, &m); return m; }