int convert_v1_v2(void *psf, void *addr) { poolset = psf; pop = addr; heap = (struct heap_layout *)((char *)addr + pop->heap_offset); if (le32toh(pop->hdr.major) != SOURCE_MAJOR_VERSION) return -1; pop->hdr.major = htole32(TARGET_MAJOR_VERSION); util_checksum(&pop->hdr, sizeof(pop->hdr), &pop->hdr.checksum, 1); struct lane_layout *lanes = (struct lane_layout *)((char *)addr + pop->lanes_offset); for (uint64_t i = 0; i < pop->nlanes; ++i) { lane_alloc_recover((struct allocator_lane_section *) &lanes[i].sections[LANE_SECTION_ALLOCATOR]); lane_list_recover((struct lane_list_section *) &lanes[i].sections[LANE_SECTION_LIST]); lane_tx_recover((struct lane_tx_layout *) &lanes[i].sections[LANE_SECTION_TRANSACTION]); } memset(lanes, 0, pop->nlanes * sizeof(struct lane_layout)); pmempool_convert_persist(poolset, lanes, pop->nlanes * sizeof(struct lane_layout)); return 0; }
static void restore_range(struct tx_range *r) { void *dest = (char *)pop + r->offset; memcpy(dest, r->data, r->size); pmempool_convert_persist(poolset, dest, r->size); }
static int pfree(uint64_t *off) { uint64_t offset = *off; if (offset == 0) return 0; PMEMoid oid; oid.off = offset; struct allocation_header *hdr = &D_RW_OBJ(oid)->alloch; struct zone *z = ZID_TO_ZONE(heap, hdr->zone_id); struct chunk_header *chdr = &z->chunk_headers[hdr->chunk_id]; if (chdr->type == CHUNK_TYPE_USED) { chdr->type = CHUNK_TYPE_FREE; pmempool_convert_persist(poolset, &chdr->type, sizeof(chdr->type)); *off = 0; pmempool_convert_persist(poolset, off, sizeof(*off)); return 0; } else if (chdr->type != CHUNK_TYPE_RUN) { assert(0); } struct chunk_run *run = (struct chunk_run *)&z->chunks[hdr->chunk_id].data; uintptr_t diff = (uintptr_t)hdr - (uintptr_t)&run->data; uint64_t block_off = (uint16_t)((size_t)diff / run->block_size); uint64_t size_idx = CALC_SIZE_IDX(run->block_size, hdr->size); uint64_t bmask = ((1ULL << size_idx) - 1ULL) << (block_off % BITS_PER_VALUE); uint64_t bpos = block_off / BITS_PER_VALUE; run->bitmap[bpos] &= ~bmask; pmempool_convert_persist(poolset, &run->bitmap[bpos], sizeof(run->bitmap[bpos])); *off = 0; pmempool_convert_persist(poolset, off, sizeof(*off)); return 0; }
static void redo_recover(struct redo_log *redo, size_t nentries) { size_t nflags = redo_log_nflags(redo, nentries); if (nflags == 0) return; assert(nflags != 1); uint64_t *val; while ((redo->offset & REDO_FINISH_FLAG) == 0) { val = (uint64_t *)((uintptr_t)pop + redo->offset); *val = redo->value; pmempool_convert_persist(poolset, val, sizeof(uint64_t)); redo++; } uint64_t offset = redo->offset & REDO_FLAG_MASK; val = (uint64_t *)((uintptr_t)pop + offset); *val = redo->value; pmempool_convert_persist(poolset, val, sizeof(uint64_t)); }
int convert_v1_v2(void *psf, void *addr) { poolset = psf; pop = addr; heap = (struct heap_layout *)((char *)addr + pop->heap_offset); struct lane_layout *lanes = (struct lane_layout *)((char *)addr + pop->lanes_offset); for (uint64_t i = 0; i < pop->nlanes; ++i) { lane_alloc_recover((struct allocator_lane_section *) &lanes[i].sections[LANE_SECTION_ALLOCATOR]); lane_list_recover((struct lane_list_section *) &lanes[i].sections[LANE_SECTION_LIST]); lane_tx_recover((struct lane_tx_layout *) &lanes[i].sections[LANE_SECTION_TRANSACTION]); } memset(lanes, 0, pop->nlanes * sizeof(struct lane_layout)); pmempool_convert_persist(poolset, lanes, pop->nlanes * sizeof(struct lane_layout)); return 0; }