/* * heap_resize_chunk -- (internal) splits the chunk into two smaller ones */ static void heap_resize_chunk(struct palloc_heap *heap, uint32_t chunk_id, uint32_t zone_id, uint32_t new_size_idx) { uint32_t new_chunk_id = chunk_id + new_size_idx; struct zone *z = ZID_TO_ZONE(heap->layout, zone_id); struct chunk_header *old_hdr = &z->chunk_headers[chunk_id]; struct chunk_header *new_hdr = &z->chunk_headers[new_chunk_id]; uint32_t rem_size_idx = old_hdr->size_idx - new_size_idx; heap_chunk_init(heap, new_hdr, CHUNK_TYPE_FREE, rem_size_idx); heap_chunk_init(heap, old_hdr, CHUNK_TYPE_FREE, new_size_idx); struct bucket *def_bucket = heap->rt->default_bucket; struct memory_block m = {new_chunk_id, zone_id, rem_size_idx, 0, 0, 0, NULL, NULL}; memblock_rebuild_state(heap, &m); bucket_insert_block(def_bucket, &m); }
/* * heap_resize_chunk -- (internal) splits the chunk into two smaller ones */ static void heap_resize_chunk(PMEMobjpool *pop, uint32_t chunk_id, uint32_t zone_id, uint32_t new_size_idx) { uint32_t new_chunk_id = chunk_id + new_size_idx; struct zone *z = &pop->heap->layout->zones[zone_id]; struct chunk_header *old_hdr = &z->chunk_headers[chunk_id]; struct chunk_header *new_hdr = &z->chunk_headers[new_chunk_id]; uint32_t rem_size_idx = old_hdr->size_idx - new_size_idx; heap_chunk_init(pop, new_hdr, CHUNK_TYPE_FREE, rem_size_idx); heap_chunk_init(pop, old_hdr, CHUNK_TYPE_FREE, new_size_idx); struct bucket *def_bucket = pop->heap->buckets[DEFAULT_BUCKET]; struct memory_block m = {new_chunk_id, zone_id, rem_size_idx, 0}; if (bucket_insert_block(def_bucket, m) != 0) { ERR("bucket_insert_block failed during resize"); } }
/* * heap_reclaim_run -- checks the run for available memory if unclaimed. * * Returns 1 if reclaimed chunk, 0 otherwise. */ static int heap_reclaim_run(struct palloc_heap *heap, struct chunk_run *run, struct memory_block *m) { if (m->m_ops->claim(m) != 0) return 0; /* this run already has an owner */ struct alloc_class *c = alloc_class_get_create_by_unit_size( heap->rt->alloc_classes, run->block_size); if (c == NULL) return 0; ASSERTeq(c->type, CLASS_RUN); pthread_mutex_t *lock = m->m_ops->get_lock(m); util_mutex_lock(lock); unsigned i; unsigned nval = c->run.bitmap_nval; for (i = 0; nval > 0 && i < nval - 1; ++i) if (run->bitmap[i] != 0) break; int empty = (i == (nval - 1)) && (run->bitmap[i] == c->run.bitmap_lastval); if (empty) { struct zone *z = ZID_TO_ZONE(heap->layout, m->zone_id); struct chunk_header *hdr = &z->chunk_headers[m->chunk_id]; struct bucket *defb = heap_get_default_bucket(heap); /* * The redo log ptr can be NULL if we are sure that there's only * one persistent value modification in the entire operation * context. */ struct operation_context ctx; operation_init(&ctx, heap->base, NULL, NULL); ctx.p_ops = &heap->p_ops; struct memory_block nb = MEMORY_BLOCK_NONE; nb.chunk_id = m->chunk_id; nb.zone_id = m->zone_id; nb.block_off = 0; nb.size_idx = m->size_idx; heap_chunk_init(heap, hdr, CHUNK_TYPE_FREE, nb.size_idx); memblock_rebuild_state(heap, &nb); nb = heap_coalesce_huge(heap, &nb); nb.m_ops->prep_hdr(&nb, MEMBLOCK_FREE, &ctx); operation_process(&ctx); bucket_insert_block(defb, &nb); *m = nb; } else { recycler_put(heap->rt->recyclers[c->id], m); } util_mutex_unlock(lock); return empty; }
/* * heap_chunk_init -- (internal) writes chunk header */ static void heap_chunk_init(struct palloc_heap *heap, struct chunk_header *hdr, uint16_t type, uint32_t size_idx) { struct chunk_header nhdr = { .type = type, .flags = 0, .size_idx = size_idx }; VALGRIND_DO_MAKE_MEM_UNDEFINED(hdr, sizeof(*hdr)); *hdr = nhdr; /* write the entire header (8 bytes) at once */ pmemops_persist(&heap->p_ops, hdr, sizeof(*hdr)); heap_chunk_write_footer(hdr, size_idx); } /* * heap_zone_init -- (internal) writes zone's first chunk and header */ static void heap_zone_init(struct palloc_heap *heap, uint32_t zone_id) { struct zone *z = ZID_TO_ZONE(heap->layout, zone_id); uint32_t size_idx = get_zone_size_idx(zone_id, heap->rt->max_zone, heap->size); heap_chunk_init(heap, &z->chunk_headers[0], CHUNK_TYPE_FREE, size_idx); struct zone_header nhdr = { .size_idx = size_idx, .magic = ZONE_HEADER_MAGIC, }; z->header = nhdr; /* write the entire header (8 bytes) at once */ pmemops_persist(&heap->p_ops, &z->header, sizeof(z->header)); } /* * heap_run_init -- (internal) creates a run based on a chunk */ static void heap_run_init(struct palloc_heap *heap, struct bucket *b, const struct memory_block *m) { struct alloc_class *c = b->aclass; ASSERTeq(c->type, CLASS_RUN); struct zone *z = ZID_TO_ZONE(heap->layout, m->zone_id); struct chunk_run *run = (struct chunk_run *)&z->chunks[m->chunk_id]; ASSERTne(m->size_idx, 0); size_t runsize = SIZEOF_RUN(run, m->size_idx); VALGRIND_DO_MAKE_MEM_UNDEFINED(run, runsize); /* add/remove chunk_run and chunk_header to valgrind transaction */ VALGRIND_ADD_TO_TX(run, runsize); run->block_size = c->unit_size; pmemops_persist(&heap->p_ops, &run->block_size, sizeof(run->block_size)); /* set all the bits */ memset(run->bitmap, 0xFF, sizeof(run->bitmap)); unsigned nval = c->run.bitmap_nval; ASSERT(nval > 0); /* clear only the bits available for allocations from this bucket */ memset(run->bitmap, 0, sizeof(uint64_t) * (nval - 1)); run->bitmap[nval - 1] = c->run.bitmap_lastval; run->incarnation_claim = heap->run_id; VALGRIND_SET_CLEAN(&run->incarnation_claim, sizeof(run->incarnation_claim)); VALGRIND_REMOVE_FROM_TX(run, runsize); pmemops_persist(&heap->p_ops, run->bitmap, sizeof(run->bitmap)); struct chunk_header run_data_hdr; run_data_hdr.type = CHUNK_TYPE_RUN_DATA; run_data_hdr.flags = 0; struct chunk_header *data_hdr; for (unsigned i = 1; i < m->size_idx; ++i) { data_hdr = &z->chunk_headers[m->chunk_id + i]; VALGRIND_DO_MAKE_MEM_UNDEFINED(data_hdr, sizeof(*data_hdr)); VALGRIND_ADD_TO_TX(data_hdr, sizeof(*data_hdr)); run_data_hdr.size_idx = i; *data_hdr = run_data_hdr; VALGRIND_REMOVE_FROM_TX(data_hdr, sizeof(*data_hdr)); } pmemops_persist(&heap->p_ops, &z->chunk_headers[m->chunk_id + 1], sizeof(struct chunk_header) * (m->size_idx - 1)); struct chunk_header *hdr = &z->chunk_headers[m->chunk_id]; ASSERT(hdr->type == CHUNK_TYPE_FREE); VALGRIND_ADD_TO_TX(hdr, sizeof(*hdr)); struct chunk_header run_hdr; run_hdr.size_idx = hdr->size_idx; run_hdr.type = CHUNK_TYPE_RUN; run_hdr.flags = header_type_to_flag[c->header_type]; *hdr = run_hdr; VALGRIND_REMOVE_FROM_TX(hdr, sizeof(*hdr)); pmemops_persist(&heap->p_ops, hdr, sizeof(*hdr)); } /* * heap_run_insert -- (internal) inserts and splits a block of memory into a run */ static void heap_run_insert(struct palloc_heap *heap, struct bucket *b, const struct memory_block *m, uint32_t size_idx, uint16_t block_off) { struct alloc_class *c = b->aclass; ASSERTeq(c->type, CLASS_RUN); ASSERT(size_idx <= BITS_PER_VALUE); ASSERT(block_off + size_idx <= c->run.bitmap_nallocs); uint32_t unit_max = c->run.unit_max; struct memory_block nm = *m; nm.size_idx = unit_max - (block_off % unit_max); nm.block_off = block_off; if (nm.size_idx > size_idx) nm.size_idx = size_idx; do { bucket_insert_block(b, &nm); ASSERT(nm.size_idx <= UINT16_MAX); ASSERT(nm.block_off + nm.size_idx <= UINT16_MAX); nm.block_off = (uint16_t)(nm.block_off + (uint16_t)nm.size_idx); size_idx -= nm.size_idx; nm.size_idx = size_idx > unit_max ? unit_max : size_idx; } while (size_idx != 0); } /* * heap_process_run_metadata -- (internal) parses the run bitmap */ static uint32_t heap_process_run_metadata(struct palloc_heap *heap, struct bucket *b, const struct memory_block *m) { struct alloc_class *c = b->aclass; ASSERTeq(c->type, CLASS_RUN); uint16_t block_off = 0; uint16_t block_size_idx = 0; uint32_t inserted_blocks = 0; struct zone *z = ZID_TO_ZONE(heap->layout, m->zone_id); struct chunk_run *run = (struct chunk_run *)&z->chunks[m->chunk_id]; for (unsigned i = 0; i < c->run.bitmap_nval; ++i) { ASSERT(i < MAX_BITMAP_VALUES); uint64_t v = run->bitmap[i]; ASSERT(BITS_PER_VALUE * i <= UINT16_MAX); block_off = (uint16_t)(BITS_PER_VALUE * i); if (v == 0) { heap_run_insert(heap, b, m, BITS_PER_VALUE, block_off); inserted_blocks += BITS_PER_VALUE; continue; } else if (v == UINT64_MAX) { continue; } for (unsigned j = 0; j < BITS_PER_VALUE; ++j) { if (BIT_IS_CLR(v, j)) { block_size_idx++; } else if (block_size_idx != 0) { ASSERT(block_off >= block_size_idx); heap_run_insert(heap, b, m, block_size_idx, (uint16_t)(block_off - block_size_idx)); inserted_blocks += block_size_idx; block_size_idx = 0; } if ((block_off++) == c->run.bitmap_nallocs) { i = MAX_BITMAP_VALUES; break; } } if (block_size_idx != 0) { ASSERT(block_off >= block_size_idx); heap_run_insert(heap, b, m, block_size_idx, (uint16_t)(block_off - block_size_idx)); inserted_blocks += block_size_idx; block_size_idx = 0; } } return inserted_blocks; } /* * heap_create_run -- (internal) initializes a new run on an existing free chunk */ static void heap_create_run(struct palloc_heap *heap, struct bucket *b, struct memory_block *m) { heap_run_init(heap, b, m); memblock_rebuild_state(heap, m); heap_process_run_metadata(heap, b, m); }
/* * heap_degrade_run_if_empty -- makes a chunk out of an empty run */ int heap_degrade_run_if_empty(PMEMobjpool *pop, struct bucket *b, struct memory_block m) { struct zone *z = &pop->heap->layout->zones[m.zone_id]; struct chunk_header *hdr = &z->chunk_headers[m.chunk_id]; ASSERT(hdr->type == CHUNK_TYPE_RUN); struct chunk_run *run = (struct chunk_run *)&z->chunks[m.chunk_id]; int err = 0; if ((err = pthread_mutex_lock(heap_get_run_lock(pop, m))) != 0) return err; int i; for (i = 0; i < bucket_bitmap_nval(b) - 1; ++i) if (run->bitmap[i] != 0) goto out; if (run->bitmap[i] != bucket_bitmap_lastval(b)) goto out; m.block_off = 0; m.size_idx = RUN_UNIT_MAX; uint32_t size_idx_sum = 0; while (size_idx_sum != bucket_bitmap_nallocs(b)) { if (bucket_get_rm_block_exact(b, m) != 0) { ERR("persistent and volatile state mismatched"); ASSERT(0); } size_idx_sum += m.size_idx; m.block_off += RUN_UNIT_MAX; if (m.block_off + RUN_UNIT_MAX > bucket_bitmap_nallocs(b)) m.size_idx = bucket_bitmap_nallocs(b) - m.block_off; else m.size_idx = RUN_UNIT_MAX; } struct bucket *defb = pop->heap->buckets[DEFAULT_BUCKET]; if ((err = bucket_lock(defb)) != 0) { ERR("Failed to lock default bucket"); ASSERT(0); } m.block_off = 0; m.size_idx = 1; heap_chunk_init(pop, hdr, CHUNK_TYPE_FREE, m.size_idx); uint64_t *mhdr; uint64_t op_result; struct memory_block fm = heap_free_block(pop, defb, m, &mhdr, &op_result); VALGRIND_ADD_TO_TX(mhdr, sizeof (*mhdr)); *mhdr = op_result; VALGRIND_REMOVE_FROM_TX(mhdr, sizeof (*mhdr)); pop->persist(mhdr, sizeof (*mhdr)); if ((err = bucket_insert_block(defb, fm)) != 0) { ERR("Failed to update heap volatile state"); } bucket_unlock(defb); out: if (pthread_mutex_unlock(heap_get_run_lock(pop, m)) != 0) { ERR("Failed to release run lock"); ASSERT(0); } return err; }
/* * heap_chunk_init -- (internal) writes chunk header */ static void heap_chunk_init(PMEMobjpool *pop, struct chunk_header *hdr, uint16_t type, uint32_t size_idx) { struct chunk_header nhdr = { .type = type, .flags = 0, .size_idx = size_idx }; *hdr = nhdr; /* write the entire header (8 bytes) at once */ pop->persist(hdr, sizeof (*hdr)); heap_chunk_write_footer(hdr, size_idx); } /* * heap_zone_init -- (internal) writes zone's first chunk and header */ static void heap_zone_init(PMEMobjpool *pop, uint32_t zone_id) { struct zone *z = &pop->heap->layout->zones[zone_id]; uint32_t size_idx = get_zone_size_idx(zone_id, pop->heap->max_zone, pop->heap_size); heap_chunk_init(pop, &z->chunk_headers[0], CHUNK_TYPE_FREE, size_idx); struct zone_header nhdr = { .size_idx = size_idx, .magic = ZONE_HEADER_MAGIC, }; z->header = nhdr; /* write the entire header (8 bytes) at once */ pop->persist(&z->header, sizeof (z->header)); } /* * heap_init_run -- (internal) creates a run based on a chunk */ static void heap_init_run(PMEMobjpool *pop, struct bucket *b, struct chunk_header *hdr, struct chunk_run *run) { /* add/remove chunk_run and chunk_header to valgrind transaction */ VALGRIND_ADD_TO_TX(run, sizeof (*run)); run->block_size = bucket_unit_size(b); pop->persist(&run->block_size, sizeof (run->block_size)); ASSERT(hdr->type == CHUNK_TYPE_FREE); /* set all the bits */ memset(run->bitmap, 0xFF, sizeof (run->bitmap)); /* clear only the bits available for allocations from this bucket */ memset(run->bitmap, 0, sizeof (uint64_t) * (bucket_bitmap_nval(b) - 1)); run->bitmap[bucket_bitmap_nval(b) - 1] = bucket_bitmap_lastval(b); VALGRIND_REMOVE_FROM_TX(run, sizeof (*run)); pop->persist(run->bitmap, sizeof (run->bitmap)); VALGRIND_ADD_TO_TX(hdr, sizeof (*hdr)); hdr->type = CHUNK_TYPE_RUN; VALGRIND_REMOVE_FROM_TX(hdr, sizeof (*hdr)); pop->persist(hdr, sizeof (*hdr)); }