/* * heap_get_adjacent_free_block -- locates adjacent free memory block in heap */ static int heap_get_adjacent_free_block(struct palloc_heap *heap, const struct memory_block *in, struct memory_block *out, int prev) { struct zone *z = ZID_TO_ZONE(heap->layout, in->zone_id); struct chunk_header *hdr = &z->chunk_headers[in->chunk_id]; out->zone_id = in->zone_id; if (prev) { if (in->chunk_id == 0) return ENOENT; struct chunk_header *prev_hdr = &z->chunk_headers[in->chunk_id - 1]; out->chunk_id = in->chunk_id - prev_hdr->size_idx; if (z->chunk_headers[out->chunk_id].type != CHUNK_TYPE_FREE) return ENOENT; out->size_idx = z->chunk_headers[out->chunk_id].size_idx; } else { /* next */ if (in->chunk_id + hdr->size_idx == z->header.size_idx) return ENOENT; out->chunk_id = in->chunk_id + hdr->size_idx; if (z->chunk_headers[out->chunk_id].type != CHUNK_TYPE_FREE) return ENOENT; out->size_idx = z->chunk_headers[out->chunk_id].size_idx; } memblock_rebuild_state(heap, out); return 0; }
/* * heap_chunk_foreach_object -- (internal) iterates through objects in a chunk */ static int heap_chunk_foreach_object(struct palloc_heap *heap, object_callback cb, void *arg, struct memory_block *m) { struct zone *zone = ZID_TO_ZONE(heap->layout, m->zone_id); struct chunk_header *hdr = &zone->chunk_headers[m->chunk_id]; memblock_rebuild_state(heap, m); switch (hdr->type) { case CHUNK_TYPE_FREE: return 0; case CHUNK_TYPE_USED: m->size_idx = hdr->size_idx; return cb(m, arg); case CHUNK_TYPE_RUN: return heap_run_foreach_object(heap, cb, arg, m, alloc_class_get_create_by_unit_size( heap->rt->alloc_classes, m->m_ops->block_size(m))); default: ASSERT(0); } return 0; }
/* * heap_reclaim_zone_garbage -- (internal) creates volatile state of unused runs */ static int heap_reclaim_zone_garbage(struct palloc_heap *heap, uint32_t zone_id, int init) { struct zone *z = ZID_TO_ZONE(heap->layout, zone_id); struct chunk_run *run = NULL; int rchunks = 0; /* * If this is the first time this zone is processed, recreate all * footers BEFORE any other operation takes place. For example, the * heap_init_free_chunk call expects the footers to be created. */ if (init) { for (uint32_t i = 0; i < z->header.size_idx; ) { struct chunk_header *hdr = &z->chunk_headers[i]; switch (hdr->type) { case CHUNK_TYPE_USED: heap_chunk_write_footer(hdr, hdr->size_idx); break; } i += hdr->size_idx; } } for (uint32_t i = 0; i < z->header.size_idx; ) { struct chunk_header *hdr = &z->chunk_headers[i]; ASSERT(hdr->size_idx != 0); struct memory_block m = MEMORY_BLOCK_NONE; m.zone_id = zone_id; m.chunk_id = i; m.size_idx = hdr->size_idx; memblock_rebuild_state(heap, &m); switch (hdr->type) { case CHUNK_TYPE_RUN: run = (struct chunk_run *)&z->chunks[i]; rchunks += heap_reclaim_run(heap, run, &m); break; case CHUNK_TYPE_FREE: if (init) heap_init_free_chunk(heap, hdr, &m); break; case CHUNK_TYPE_USED: break; default: ASSERT(0); } i = m.chunk_id + m.size_idx; /* hdr might have changed */ } return rchunks == 0 ? ENOMEM : 0; }
/* * heap_run_process_bitmap_value -- (internal) looks for unset bits in the * value, creates a valid memory block out of them and inserts that * block into the given bucket. */ static int run_process_bitmap_value(const struct memory_block *m, uint64_t value, uint32_t base_offset, object_callback cb, void *arg) { int ret = 0; uint64_t shift = 0; /* already processed bits */ struct memory_block s = *m; do { /* * Shift the value so that the next memory block starts on the * least significant position: * ..............0 (free block) * or ..............1 (used block) */ uint64_t shifted = value >> shift; /* all clear or set bits indicate the end of traversal */ if (shifted == 0) { /* * Insert the remaining blocks as free. Remember that * unsigned values are always zero-filled, so we must * take the current shift into account. */ s.block_off = (uint32_t)(base_offset + shift); s.size_idx = (uint32_t)(RUN_BITS_PER_VALUE - shift); if ((ret = cb(&s, arg)) != 0) return ret; break; } else if (shifted == UINT64_MAX) { break; } /* * Offset and size of the next free block, either of these * can be zero depending on where the free block is located * in the value. */ unsigned off = (unsigned)util_lssb_index64(~shifted); unsigned size = (unsigned)util_lssb_index64(shifted); shift += off + size; if (size != 0) { /* zero size means skip to the next value */ s.block_off = (uint32_t)(base_offset + (shift - size)); s.size_idx = (uint32_t)(size); memblock_rebuild_state(m->heap, &s); if ((ret = cb(&s, arg)) != 0) return ret; } } while (shift != RUN_BITS_PER_VALUE); return 0; }
/* * heap_recycle_block -- (internal) recycles unused part of the memory block */ static void heap_recycle_block(struct palloc_heap *heap, struct bucket *b, struct memory_block *m, uint32_t units) { if (b->aclass->type == CLASS_RUN) { ASSERT(units <= UINT16_MAX); ASSERT(m->block_off + units <= UINT16_MAX); struct memory_block r = {m->chunk_id, m->zone_id, m->size_idx - units, (uint16_t)(m->block_off + units), 0, 0, NULL, NULL}; memblock_rebuild_state(heap, &r); bucket_insert_block(b, &r); } else { heap_resize_chunk(heap, m->chunk_id, m->zone_id, units); } m->size_idx = units; }
/* * heap_resize_chunk -- (internal) splits the chunk into two smaller ones */ static void heap_resize_chunk(struct palloc_heap *heap, uint32_t chunk_id, uint32_t zone_id, uint32_t new_size_idx) { uint32_t new_chunk_id = chunk_id + new_size_idx; struct zone *z = ZID_TO_ZONE(heap->layout, zone_id); struct chunk_header *old_hdr = &z->chunk_headers[chunk_id]; struct chunk_header *new_hdr = &z->chunk_headers[new_chunk_id]; uint32_t rem_size_idx = old_hdr->size_idx - new_size_idx; heap_chunk_init(heap, new_hdr, CHUNK_TYPE_FREE, rem_size_idx); heap_chunk_init(heap, old_hdr, CHUNK_TYPE_FREE, new_size_idx); struct bucket *def_bucket = heap->rt->default_bucket; struct memory_block m = {new_chunk_id, zone_id, rem_size_idx, 0, 0, 0, NULL, NULL}; memblock_rebuild_state(heap, &m); bucket_insert_block(def_bucket, &m); }
/* * heap_coalesce -- (internal) merges adjacent memory blocks */ static struct memory_block heap_coalesce(struct palloc_heap *heap, const struct memory_block *blocks[], int n) { struct memory_block ret; const struct memory_block *b = NULL; ret.size_idx = 0; for (int i = 0; i < n; ++i) { if (blocks[i] == NULL) continue; b = b ? b : blocks[i]; ret.size_idx += blocks[i] ? blocks[i]->size_idx : 0; } ASSERTne(b, NULL); ret.chunk_id = b->chunk_id; ret.zone_id = b->zone_id; ret.block_off = b->block_off; memblock_rebuild_state(heap, &ret); return ret; }
/* * heap_vg_open_chunk -- (internal) notifies Valgrind about chunk layout */ static void heap_vg_open_chunk(struct palloc_heap *heap, object_callback cb, void *arg, int objects, struct memory_block *m) { struct zone *z = ZID_TO_ZONE(heap->layout, m->zone_id); void *chunk = &z->chunks[m->chunk_id]; memblock_rebuild_state(heap, m); if (m->type == MEMORY_BLOCK_RUN) { struct chunk_run *run = chunk; ASSERTne(m->size_idx, 0); VALGRIND_DO_MAKE_MEM_NOACCESS(run, SIZEOF_RUN(run, m->size_idx)); /* set the run metadata as defined */ VALGRIND_DO_MAKE_MEM_DEFINED(run, sizeof(*run) - sizeof(run->data)); if (objects) { int ret = heap_run_foreach_object(heap, cb, arg, m, alloc_class_get_create_by_unit_size( heap->rt->alloc_classes, m->m_ops->block_size(m))); ASSERTeq(ret, 0); } } else { size_t size = m->m_ops->get_real_size(m); VALGRIND_DO_MAKE_MEM_NOACCESS(chunk, size); if (objects && m->m_ops->get_state(m) == MEMBLOCK_ALLOCATED) { int ret = cb(m, arg); ASSERTeq(ret, 0); } } }
static void test_recycler(void) { struct mock_pop *mpop = MMAP_ANON_ALIGNED(MOCK_POOL_SIZE, Ut_mmap_align); PMEMobjpool *pop = &mpop->p; memset(pop, 0, MOCK_POOL_SIZE); pop->heap_offset = (uint64_t)((uint64_t)&mpop->heap - (uint64_t)mpop); pop->p_ops.persist = obj_heap_persist; pop->p_ops.memset_persist = obj_heap_memset_persist; pop->p_ops.base = pop; pop->set = MALLOC(sizeof(*(pop->set))); pop->set->options = 0; pop->set->directory_based = 0; void *heap_start = (char *)pop + pop->heap_offset; uint64_t heap_size = MOCK_POOL_SIZE - sizeof(PMEMobjpool); struct palloc_heap *heap = &pop->heap; struct pmem_ops *p_ops = &pop->p_ops; struct stats *s = stats_new(pop); UT_ASSERTne(s, NULL); UT_ASSERT(heap_check(heap_start, heap_size) != 0); UT_ASSERT(heap_init(heap_start, heap_size, &pop->heap_size, p_ops) == 0); UT_ASSERT(heap_boot(heap, heap_start, heap_size, &pop->heap_size, pop, p_ops, s, pop->set) == 0); UT_ASSERT(heap_buckets_init(heap) == 0); UT_ASSERT(pop->heap.rt != NULL); /* trigger heap bucket populate */ struct memory_block m = MEMORY_BLOCK_NONE; m.size_idx = 1; struct bucket *b = heap_bucket_acquire_by_id(heap, DEFAULT_ALLOC_CLASS_ID); UT_ASSERT(heap_get_bestfit_block(heap, b, &m) == 0); heap_bucket_release(heap, b); int ret; struct recycler *r = recycler_new(&pop->heap, 10000 /* never recalc */); UT_ASSERTne(r, NULL); init_run_with_score(pop->heap.layout, 0, 64); init_run_with_score(pop->heap.layout, 1, 128); init_run_with_score(pop->heap.layout, 15, 0); struct memory_block mrun = {0, 0, 1, 0}; struct memory_block mrun2 = {1, 0, 1, 0}; memblock_rebuild_state(&pop->heap, &mrun); memblock_rebuild_state(&pop->heap, &mrun2); ret = recycler_put(r, &mrun, recycler_calc_score(&pop->heap, &mrun, NULL)); UT_ASSERTeq(ret, 0); ret = recycler_put(r, &mrun2, recycler_calc_score(&pop->heap, &mrun2, NULL)); UT_ASSERTeq(ret, 0); struct memory_block mrun_ret = MEMORY_BLOCK_NONE; mrun_ret.size_idx = 1; struct memory_block mrun2_ret = MEMORY_BLOCK_NONE; mrun2_ret.size_idx = 1; ret = recycler_get(r, &mrun_ret); UT_ASSERTeq(ret, 0); ret = recycler_get(r, &mrun2_ret); UT_ASSERTeq(ret, 0); UT_ASSERTeq(mrun2.chunk_id, mrun2_ret.chunk_id); UT_ASSERTeq(mrun.chunk_id, mrun_ret.chunk_id); init_run_with_score(pop->heap.layout, 7, 256); init_run_with_score(pop->heap.layout, 2, 64); init_run_with_score(pop->heap.layout, 5, 512); init_run_with_score(pop->heap.layout, 10, 128); mrun.chunk_id = 7; mrun2.chunk_id = 2; struct memory_block mrun3 = {5, 0, 1, 0}; struct memory_block mrun4 = {10, 0, 1, 0}; memblock_rebuild_state(&pop->heap, &mrun3); memblock_rebuild_state(&pop->heap, &mrun4); mrun_ret.size_idx = 1; mrun2_ret.size_idx = 1; struct memory_block mrun3_ret = MEMORY_BLOCK_NONE; mrun3_ret.size_idx = 1; struct memory_block mrun4_ret = MEMORY_BLOCK_NONE; mrun4_ret.size_idx = 1; ret = recycler_put(r, &mrun, recycler_calc_score(&pop->heap, &mrun, NULL)); UT_ASSERTeq(ret, 0); ret = recycler_put(r, &mrun2, recycler_calc_score(&pop->heap, &mrun2, NULL)); UT_ASSERTeq(ret, 0); ret = recycler_put(r, &mrun3, recycler_calc_score(&pop->heap, &mrun3, NULL)); UT_ASSERTeq(ret, 0); ret = recycler_put(r, &mrun4, recycler_calc_score(&pop->heap, &mrun4, NULL)); UT_ASSERTeq(ret, 0); ret = recycler_get(r, &mrun2_ret); UT_ASSERTeq(ret, 0); ret = recycler_get(r, &mrun4_ret); UT_ASSERTeq(ret, 0); ret = recycler_get(r, &mrun_ret); UT_ASSERTeq(ret, 0); ret = recycler_get(r, &mrun3_ret); UT_ASSERTeq(ret, 0); UT_ASSERTeq(mrun.chunk_id, mrun_ret.chunk_id); UT_ASSERTeq(mrun2.chunk_id, mrun2_ret.chunk_id); UT_ASSERTeq(mrun3.chunk_id, mrun3_ret.chunk_id); UT_ASSERTeq(mrun4.chunk_id, mrun4_ret.chunk_id); init_run_with_max_block(pop->heap.layout, 1); struct memory_block mrun5 = {1, 0, 1, 0}; memblock_rebuild_state(&pop->heap, &mrun5); ret = recycler_put(r, &mrun5, recycler_calc_score(&pop->heap, &mrun5, NULL)); UT_ASSERTeq(ret, 0); struct memory_block mrun5_ret = MEMORY_BLOCK_NONE; mrun5_ret.size_idx = 11; ret = recycler_get(r, &mrun5_ret); UT_ASSERTeq(ret, ENOMEM); mrun5_ret = MEMORY_BLOCK_NONE; mrun5_ret.size_idx = 10; ret = recycler_get(r, &mrun5_ret); UT_ASSERTeq(ret, 0); recycler_delete(r); stats_delete(pop, s); heap_cleanup(heap); UT_ASSERT(heap->rt == NULL); FREE(pop->set); MUNMAP_ANON_ALIGNED(mpop, MOCK_POOL_SIZE); }
static void test_container(struct block_container *bc, struct palloc_heap *heap) { UT_ASSERTne(bc, NULL); struct memory_block a = {1, 0, 1, 0}; struct memory_block b = {2, 0, 2, 0}; struct memory_block c = {3, 0, 3, 0}; struct memory_block d = {5, 0, 5, 0}; init_run_with_score(heap->layout, 1, 128); init_run_with_score(heap->layout, 2, 128); init_run_with_score(heap->layout, 3, 128); init_run_with_score(heap->layout, 5, 128); memblock_rebuild_state(heap, &a); memblock_rebuild_state(heap, &b); memblock_rebuild_state(heap, &c); memblock_rebuild_state(heap, &d); int ret; ret = bc->c_ops->insert(bc, &a); UT_ASSERTeq(ret, 0); ret = bc->c_ops->insert(bc, &b); UT_ASSERTeq(ret, 0); ret = bc->c_ops->insert(bc, &c); UT_ASSERTeq(ret, 0); ret = bc->c_ops->insert(bc, &d); UT_ASSERTeq(ret, 0); struct memory_block invalid_ret = {0, 0, 6, 0}; ret = bc->c_ops->get_rm_bestfit(bc, &invalid_ret); UT_ASSERTeq(ret, ENOMEM); struct memory_block b_ret = {0, 0, 2, 0}; ret = bc->c_ops->get_rm_bestfit(bc, &b_ret); UT_ASSERTeq(ret, 0); UT_ASSERTeq(b_ret.chunk_id, b.chunk_id); struct memory_block a_ret = {0, 0, 1, 0}; ret = bc->c_ops->get_rm_bestfit(bc, &a_ret); UT_ASSERTeq(ret, 0); UT_ASSERTeq(a_ret.chunk_id, a.chunk_id); struct memory_block c_ret = {0, 0, 3, 0}; ret = bc->c_ops->get_rm_bestfit(bc, &c_ret); UT_ASSERTeq(ret, 0); UT_ASSERTeq(c_ret.chunk_id, c.chunk_id); struct memory_block d_ret = {0, 0, 4, 0}; /* less one than target */ ret = bc->c_ops->get_rm_bestfit(bc, &d_ret); UT_ASSERTeq(ret, 0); UT_ASSERTeq(d_ret.chunk_id, d.chunk_id); ret = bc->c_ops->get_rm_bestfit(bc, &c_ret); UT_ASSERTeq(ret, ENOMEM); ret = bc->c_ops->insert(bc, &a); UT_ASSERTeq(ret, 0); ret = bc->c_ops->insert(bc, &b); UT_ASSERTeq(ret, 0); ret = bc->c_ops->insert(bc, &c); UT_ASSERTeq(ret, 0); bc->c_ops->rm_all(bc); ret = bc->c_ops->is_empty(bc); UT_ASSERTeq(ret, 1); ret = bc->c_ops->get_rm_bestfit(bc, &c_ret); UT_ASSERTeq(ret, ENOMEM); bc->c_ops->destroy(bc); }
FUNC_MOCK_END static void test_detect(void) { struct memory_block mhuge_used = { .chunk_id = 0, 0, 0, 0 }; struct memory_block mhuge_free = { .chunk_id = 1, 0, 0, 0 }; struct memory_block mrun = { .chunk_id = 2, 0, 0, 0 }; struct heap_layout *layout = pop->heap.layout; layout->zone0.chunk_headers[0].size_idx = 1; layout->zone0.chunk_headers[0].type = CHUNK_TYPE_USED; layout->zone0.chunk_headers[1].size_idx = 1; layout->zone0.chunk_headers[1].type = CHUNK_TYPE_FREE; layout->zone0.chunk_headers[2].size_idx = 1; layout->zone0.chunk_headers[2].type = CHUNK_TYPE_RUN; memblock_rebuild_state(&pop->heap, &mhuge_used); memblock_rebuild_state(&pop->heap, &mhuge_free); memblock_rebuild_state(&pop->heap, &mrun); UT_ASSERTeq(mhuge_used.type, MEMORY_BLOCK_HUGE); UT_ASSERTeq(mhuge_free.type, MEMORY_BLOCK_HUGE); UT_ASSERTeq(mrun.type, MEMORY_BLOCK_RUN); } static void test_block_size(void) { struct memory_block mhuge = { .chunk_id = 0, 0, 0, 0 }; struct memory_block mrun = { .chunk_id = 1, 0, 0, 0 }; struct palloc_heap *heap = &pop->heap; struct heap_layout *layout = heap->layout; layout->zone0.chunk_headers[0].size_idx = 1; layout->zone0.chunk_headers[0].type = CHUNK_TYPE_USED; layout->zone0.chunk_headers[1].size_idx = 1; layout->zone0.chunk_headers[1].type = CHUNK_TYPE_RUN; struct chunk_run *run = (struct chunk_run *) &layout->zone0.chunks[1]; run->block_size = 1234; memblock_rebuild_state(&pop->heap, &mhuge); memblock_rebuild_state(&pop->heap, &mrun); UT_ASSERTne(mhuge.m_ops, NULL); UT_ASSERTne(mrun.m_ops, NULL); UT_ASSERTeq(mhuge.m_ops->block_size(&mhuge), CHUNKSIZE); UT_ASSERTeq(mrun.m_ops->block_size(&mrun), 1234); } static void test_prep_hdr(void) { struct memory_block mhuge_used = { .chunk_id = 0, 0, .size_idx = 1, 0 }; struct memory_block mhuge_free = { .chunk_id = 1, 0, .size_idx = 1, 0 }; struct memory_block mrun_used = { .chunk_id = 2, 0, .size_idx = 4, .block_off = 0 }; struct memory_block mrun_free = { .chunk_id = 2, 0, .size_idx = 4, .block_off = 4 }; struct memory_block mrun_large_used = { .chunk_id = 2, 0, .size_idx = 64, .block_off = 64 }; struct memory_block mrun_large_free = { .chunk_id = 2, 0, .size_idx = 64, .block_off = 128 }; struct palloc_heap *heap = &pop->heap; struct heap_layout *layout = heap->layout; layout->zone0.chunk_headers[0].size_idx = 1; layout->zone0.chunk_headers[0].type = CHUNK_TYPE_USED; layout->zone0.chunk_headers[1].size_idx = 1; layout->zone0.chunk_headers[1].type = CHUNK_TYPE_FREE; layout->zone0.chunk_headers[2].size_idx = 1; layout->zone0.chunk_headers[2].type = CHUNK_TYPE_RUN; struct chunk_run *run = (struct chunk_run *)&layout->zone0.chunks[2]; run->bitmap[0] = 0b1111; run->bitmap[1] = ~0ULL; run->bitmap[2] = 0ULL; memblock_rebuild_state(heap, &mhuge_used); memblock_rebuild_state(heap, &mhuge_free); memblock_rebuild_state(heap, &mrun_used); memblock_rebuild_state(heap, &mrun_free); memblock_rebuild_state(heap, &mrun_large_used); memblock_rebuild_state(heap, &mrun_large_free); UT_ASSERTne(mhuge_used.m_ops, NULL); mhuge_used.m_ops->prep_hdr(&mhuge_used, MEMBLOCK_FREE, NULL); UT_ASSERTeq(layout->zone0.chunk_headers[0].type, CHUNK_TYPE_FREE); mhuge_free.m_ops->prep_hdr(&mhuge_free, MEMBLOCK_ALLOCATED, NULL); UT_ASSERTeq(layout->zone0.chunk_headers[1].type, CHUNK_TYPE_USED); mrun_used.m_ops->prep_hdr(&mrun_used, MEMBLOCK_FREE, NULL); UT_ASSERTeq(run->bitmap[0], 0ULL); mrun_free.m_ops->prep_hdr(&mrun_free, MEMBLOCK_ALLOCATED, NULL); UT_ASSERTeq(run->bitmap[0], 0b11110000); mrun_large_used.m_ops->prep_hdr(&mrun_large_used, MEMBLOCK_FREE, NULL); UT_ASSERTeq(run->bitmap[1], 0ULL); mrun_large_free.m_ops->prep_hdr(&mrun_large_free, MEMBLOCK_ALLOCATED, NULL); UT_ASSERTeq(run->bitmap[2], ~0ULL); } int main(int argc, char *argv[]) { START(argc, argv, "obj_memblock"); PMEMobjpool pool; pop = &pool; pop->heap.layout = ZALLOC(sizeof(struct heap_layout) + NCHUNKS * sizeof(struct chunk)); test_detect(); test_block_size(); test_prep_hdr(); FREE(pop->heap.layout); DONE(NULL); }
/* * heap_reclaim_run -- checks the run for available memory if unclaimed. * * Returns 1 if reclaimed chunk, 0 otherwise. */ static int heap_reclaim_run(struct palloc_heap *heap, struct chunk_run *run, struct memory_block *m) { if (m->m_ops->claim(m) != 0) return 0; /* this run already has an owner */ struct alloc_class *c = alloc_class_get_create_by_unit_size( heap->rt->alloc_classes, run->block_size); if (c == NULL) return 0; ASSERTeq(c->type, CLASS_RUN); pthread_mutex_t *lock = m->m_ops->get_lock(m); util_mutex_lock(lock); unsigned i; unsigned nval = c->run.bitmap_nval; for (i = 0; nval > 0 && i < nval - 1; ++i) if (run->bitmap[i] != 0) break; int empty = (i == (nval - 1)) && (run->bitmap[i] == c->run.bitmap_lastval); if (empty) { struct zone *z = ZID_TO_ZONE(heap->layout, m->zone_id); struct chunk_header *hdr = &z->chunk_headers[m->chunk_id]; struct bucket *defb = heap_get_default_bucket(heap); /* * The redo log ptr can be NULL if we are sure that there's only * one persistent value modification in the entire operation * context. */ struct operation_context ctx; operation_init(&ctx, heap->base, NULL, NULL); ctx.p_ops = &heap->p_ops; struct memory_block nb = MEMORY_BLOCK_NONE; nb.chunk_id = m->chunk_id; nb.zone_id = m->zone_id; nb.block_off = 0; nb.size_idx = m->size_idx; heap_chunk_init(heap, hdr, CHUNK_TYPE_FREE, nb.size_idx); memblock_rebuild_state(heap, &nb); nb = heap_coalesce_huge(heap, &nb); nb.m_ops->prep_hdr(&nb, MEMBLOCK_FREE, &ctx); operation_process(&ctx); bucket_insert_block(defb, &nb); *m = nb; } else { recycler_put(heap->rt->recyclers[c->id], m); } util_mutex_unlock(lock); return empty; }
/* * heap_chunk_init -- (internal) writes chunk header */ static void heap_chunk_init(struct palloc_heap *heap, struct chunk_header *hdr, uint16_t type, uint32_t size_idx) { struct chunk_header nhdr = { .type = type, .flags = 0, .size_idx = size_idx }; VALGRIND_DO_MAKE_MEM_UNDEFINED(hdr, sizeof(*hdr)); *hdr = nhdr; /* write the entire header (8 bytes) at once */ pmemops_persist(&heap->p_ops, hdr, sizeof(*hdr)); heap_chunk_write_footer(hdr, size_idx); } /* * heap_zone_init -- (internal) writes zone's first chunk and header */ static void heap_zone_init(struct palloc_heap *heap, uint32_t zone_id) { struct zone *z = ZID_TO_ZONE(heap->layout, zone_id); uint32_t size_idx = get_zone_size_idx(zone_id, heap->rt->max_zone, heap->size); heap_chunk_init(heap, &z->chunk_headers[0], CHUNK_TYPE_FREE, size_idx); struct zone_header nhdr = { .size_idx = size_idx, .magic = ZONE_HEADER_MAGIC, }; z->header = nhdr; /* write the entire header (8 bytes) at once */ pmemops_persist(&heap->p_ops, &z->header, sizeof(z->header)); } /* * heap_run_init -- (internal) creates a run based on a chunk */ static void heap_run_init(struct palloc_heap *heap, struct bucket *b, const struct memory_block *m) { struct alloc_class *c = b->aclass; ASSERTeq(c->type, CLASS_RUN); struct zone *z = ZID_TO_ZONE(heap->layout, m->zone_id); struct chunk_run *run = (struct chunk_run *)&z->chunks[m->chunk_id]; ASSERTne(m->size_idx, 0); size_t runsize = SIZEOF_RUN(run, m->size_idx); VALGRIND_DO_MAKE_MEM_UNDEFINED(run, runsize); /* add/remove chunk_run and chunk_header to valgrind transaction */ VALGRIND_ADD_TO_TX(run, runsize); run->block_size = c->unit_size; pmemops_persist(&heap->p_ops, &run->block_size, sizeof(run->block_size)); /* set all the bits */ memset(run->bitmap, 0xFF, sizeof(run->bitmap)); unsigned nval = c->run.bitmap_nval; ASSERT(nval > 0); /* clear only the bits available for allocations from this bucket */ memset(run->bitmap, 0, sizeof(uint64_t) * (nval - 1)); run->bitmap[nval - 1] = c->run.bitmap_lastval; run->incarnation_claim = heap->run_id; VALGRIND_SET_CLEAN(&run->incarnation_claim, sizeof(run->incarnation_claim)); VALGRIND_REMOVE_FROM_TX(run, runsize); pmemops_persist(&heap->p_ops, run->bitmap, sizeof(run->bitmap)); struct chunk_header run_data_hdr; run_data_hdr.type = CHUNK_TYPE_RUN_DATA; run_data_hdr.flags = 0; struct chunk_header *data_hdr; for (unsigned i = 1; i < m->size_idx; ++i) { data_hdr = &z->chunk_headers[m->chunk_id + i]; VALGRIND_DO_MAKE_MEM_UNDEFINED(data_hdr, sizeof(*data_hdr)); VALGRIND_ADD_TO_TX(data_hdr, sizeof(*data_hdr)); run_data_hdr.size_idx = i; *data_hdr = run_data_hdr; VALGRIND_REMOVE_FROM_TX(data_hdr, sizeof(*data_hdr)); } pmemops_persist(&heap->p_ops, &z->chunk_headers[m->chunk_id + 1], sizeof(struct chunk_header) * (m->size_idx - 1)); struct chunk_header *hdr = &z->chunk_headers[m->chunk_id]; ASSERT(hdr->type == CHUNK_TYPE_FREE); VALGRIND_ADD_TO_TX(hdr, sizeof(*hdr)); struct chunk_header run_hdr; run_hdr.size_idx = hdr->size_idx; run_hdr.type = CHUNK_TYPE_RUN; run_hdr.flags = header_type_to_flag[c->header_type]; *hdr = run_hdr; VALGRIND_REMOVE_FROM_TX(hdr, sizeof(*hdr)); pmemops_persist(&heap->p_ops, hdr, sizeof(*hdr)); } /* * heap_run_insert -- (internal) inserts and splits a block of memory into a run */ static void heap_run_insert(struct palloc_heap *heap, struct bucket *b, const struct memory_block *m, uint32_t size_idx, uint16_t block_off) { struct alloc_class *c = b->aclass; ASSERTeq(c->type, CLASS_RUN); ASSERT(size_idx <= BITS_PER_VALUE); ASSERT(block_off + size_idx <= c->run.bitmap_nallocs); uint32_t unit_max = c->run.unit_max; struct memory_block nm = *m; nm.size_idx = unit_max - (block_off % unit_max); nm.block_off = block_off; if (nm.size_idx > size_idx) nm.size_idx = size_idx; do { bucket_insert_block(b, &nm); ASSERT(nm.size_idx <= UINT16_MAX); ASSERT(nm.block_off + nm.size_idx <= UINT16_MAX); nm.block_off = (uint16_t)(nm.block_off + (uint16_t)nm.size_idx); size_idx -= nm.size_idx; nm.size_idx = size_idx > unit_max ? unit_max : size_idx; } while (size_idx != 0); } /* * heap_process_run_metadata -- (internal) parses the run bitmap */ static uint32_t heap_process_run_metadata(struct palloc_heap *heap, struct bucket *b, const struct memory_block *m) { struct alloc_class *c = b->aclass; ASSERTeq(c->type, CLASS_RUN); uint16_t block_off = 0; uint16_t block_size_idx = 0; uint32_t inserted_blocks = 0; struct zone *z = ZID_TO_ZONE(heap->layout, m->zone_id); struct chunk_run *run = (struct chunk_run *)&z->chunks[m->chunk_id]; for (unsigned i = 0; i < c->run.bitmap_nval; ++i) { ASSERT(i < MAX_BITMAP_VALUES); uint64_t v = run->bitmap[i]; ASSERT(BITS_PER_VALUE * i <= UINT16_MAX); block_off = (uint16_t)(BITS_PER_VALUE * i); if (v == 0) { heap_run_insert(heap, b, m, BITS_PER_VALUE, block_off); inserted_blocks += BITS_PER_VALUE; continue; } else if (v == UINT64_MAX) { continue; } for (unsigned j = 0; j < BITS_PER_VALUE; ++j) { if (BIT_IS_CLR(v, j)) { block_size_idx++; } else if (block_size_idx != 0) { ASSERT(block_off >= block_size_idx); heap_run_insert(heap, b, m, block_size_idx, (uint16_t)(block_off - block_size_idx)); inserted_blocks += block_size_idx; block_size_idx = 0; } if ((block_off++) == c->run.bitmap_nallocs) { i = MAX_BITMAP_VALUES; break; } } if (block_size_idx != 0) { ASSERT(block_off >= block_size_idx); heap_run_insert(heap, b, m, block_size_idx, (uint16_t)(block_off - block_size_idx)); inserted_blocks += block_size_idx; block_size_idx = 0; } } return inserted_blocks; } /* * heap_create_run -- (internal) initializes a new run on an existing free chunk */ static void heap_create_run(struct palloc_heap *heap, struct bucket *b, struct memory_block *m) { heap_run_init(heap, b, m); memblock_rebuild_state(heap, m); heap_process_run_metadata(heap, b, m); }
/* * memblock_huge_init -- initializes a new huge memory block */ struct memory_block memblock_huge_init(struct palloc_heap *heap, uint32_t chunk_id, uint32_t zone_id, uint32_t size_idx) { struct memory_block m = MEMORY_BLOCK_NONE; m.chunk_id = chunk_id; m.zone_id = zone_id; m.size_idx = size_idx; m.heap = heap; struct chunk_header nhdr = { .type = CHUNK_TYPE_FREE, .flags = 0, .size_idx = size_idx }; struct chunk_header *hdr = heap_get_chunk_hdr(heap, &m); VALGRIND_DO_MAKE_MEM_UNDEFINED(hdr, sizeof(*hdr)); VALGRIND_ANNOTATE_NEW_MEMORY(hdr, sizeof(*hdr)); *hdr = nhdr; /* write the entire header (8 bytes) at once */ pmemops_persist(&heap->p_ops, hdr, sizeof(*hdr)); huge_write_footer(hdr, size_idx); memblock_rebuild_state(heap, &m); return m; } /* * memblock_run_init -- initializes a new run memory block */ struct memory_block memblock_run_init(struct palloc_heap *heap, uint32_t chunk_id, uint32_t zone_id, uint32_t size_idx, uint16_t flags, uint64_t unit_size, uint64_t alignment) { ASSERTne(size_idx, 0); struct memory_block m = MEMORY_BLOCK_NONE; m.chunk_id = chunk_id; m.zone_id = zone_id; m.size_idx = size_idx; m.heap = heap; struct zone *z = ZID_TO_ZONE(heap->layout, zone_id); struct chunk_run *run = heap_get_chunk_run(heap, &m); size_t runsize = SIZEOF_RUN(run, size_idx); VALGRIND_DO_MAKE_MEM_UNDEFINED(run, runsize); /* add/remove chunk_run and chunk_header to valgrind transaction */ VALGRIND_ADD_TO_TX(run, runsize); run->hdr.block_size = unit_size; run->hdr.alignment = alignment; struct run_bitmap b; memblock_run_bitmap(&size_idx, flags, unit_size, alignment, run->content, &b); size_t bitmap_size = b.size; /* set all the bits */ memset(b.values, 0xFF, bitmap_size); /* clear only the bits available for allocations from this bucket */ memset(b.values, 0, sizeof(*b.values) * (b.nvalues - 1)); unsigned trailing_bits = b.nbits % RUN_BITS_PER_VALUE; uint64_t last_value = UINT64_MAX << trailing_bits; b.values[b.nvalues - 1] = last_value; VALGRIND_REMOVE_FROM_TX(run, runsize); pmemops_flush(&heap->p_ops, run, sizeof(struct chunk_run_header) + bitmap_size); struct chunk_header run_data_hdr; run_data_hdr.type = CHUNK_TYPE_RUN_DATA; run_data_hdr.flags = 0; VALGRIND_ADD_TO_TX(&z->chunk_headers[chunk_id], sizeof(struct chunk_header) * size_idx); struct chunk_header *data_hdr; for (unsigned i = 1; i < size_idx; ++i) { data_hdr = &z->chunk_headers[chunk_id + i]; VALGRIND_DO_MAKE_MEM_UNDEFINED(data_hdr, sizeof(*data_hdr)); VALGRIND_ANNOTATE_NEW_MEMORY(data_hdr, sizeof(*data_hdr)); run_data_hdr.size_idx = i; *data_hdr = run_data_hdr; } pmemops_persist(&heap->p_ops, &z->chunk_headers[chunk_id + 1], sizeof(struct chunk_header) * (size_idx - 1)); struct chunk_header *hdr = &z->chunk_headers[chunk_id]; ASSERT(hdr->type == CHUNK_TYPE_FREE); VALGRIND_ANNOTATE_NEW_MEMORY(hdr, sizeof(*hdr)); struct chunk_header run_hdr; run_hdr.size_idx = hdr->size_idx; run_hdr.type = CHUNK_TYPE_RUN; run_hdr.flags = flags; *hdr = run_hdr; pmemops_persist(&heap->p_ops, hdr, sizeof(*hdr)); VALGRIND_REMOVE_FROM_TX(&z->chunk_headers[chunk_id], sizeof(struct chunk_header) * size_idx); memblock_rebuild_state(heap, &m); return m; }