void nova_init_blockmap(struct super_block *sb, int recovery) { struct nova_sb_info *sbi = NOVA_SB(sb); struct rb_root *tree; unsigned long num_used_block; struct nova_range_node *blknode; struct free_list *free_list; unsigned long per_list_blocks; int i; int ret; num_used_block = sbi->reserved_blocks; /* Divide the block range among per-CPU free lists */ per_list_blocks = sbi->num_blocks / sbi->cpus; sbi->per_list_blocks = per_list_blocks; for (i = 0; i < sbi->cpus; i++) { free_list = nova_get_free_list(sb, i); tree = &(free_list->block_free_tree); free_list->block_start = per_list_blocks * i; free_list->block_end = free_list->block_start + per_list_blocks - 1; /* For recovery, update these fields later */ if (recovery == 0) { free_list->num_free_blocks = per_list_blocks; if (i == 0) { free_list->block_start += num_used_block; free_list->num_free_blocks -= num_used_block; } blknode = nova_alloc_blocknode(sb); if (blknode == NULL) NOVA_ASSERT(0); blknode->range_low = free_list->block_start; blknode->range_high = free_list->block_end; ret = nova_insert_blocktree(sbi, tree, blknode); if (ret) { nova_err(sb, "%s failed\n", __func__); nova_free_blocknode(sb, blknode); return; } free_list->first_node = blknode; free_list->num_blocknode = 1; } } free_list = nova_get_free_list(sb, (sbi->cpus - 1)); if (free_list->block_end + 1 < sbi->num_blocks) { /* Shared free list gets any remaining blocks */ sbi->shared_free_list.block_start = free_list->block_end + 1; sbi->shared_free_list.block_end = sbi->num_blocks - 1; } }
static int __nova_build_blocknode_map(struct super_block *sb, unsigned long *bitmap, unsigned long bsize, unsigned long scale) { struct nova_sb_info *sbi = NOVA_SB(sb); struct free_list *free_list; unsigned long next = 0; unsigned long low = 0; unsigned long start, end; int cpuid = 0; free_list = nova_get_free_list(sb, cpuid); start = free_list->block_start; end = free_list->block_end + 1; while (1) { next = find_next_zero_bit(bitmap, end, start); if (next == bsize) break; if (next == end) { if (cpuid == sbi->cpus - 1) cpuid = SHARED_CPU; else cpuid++; free_list = nova_get_free_list(sb, cpuid); start = free_list->block_start; end = free_list->block_end + 1; continue; } low = next; next = find_next_bit(bitmap, end, next); if (nova_insert_blocknode_map(sb, cpuid, low << scale , (next << scale) - 1)) { nova_dbg("Error: could not insert %lu - %lu\n", low << scale, ((next << scale) - 1)); } start = next; if (next == bsize) break; if (next == end) { if (cpuid == sbi->cpus - 1) cpuid = SHARED_CPU; else cpuid++; free_list = nova_get_free_list(sb, cpuid); start = free_list->block_start; end = free_list->block_end + 1; } } return 0; }
static int nova_insert_blocknode_map(struct super_block *sb, int cpuid, unsigned long low, unsigned long high) { struct nova_sb_info *sbi = NOVA_SB(sb); struct free_list *free_list; struct rb_root *tree; struct nova_range_node *blknode = NULL; unsigned long num_blocks = 0; int ret; num_blocks = high - low + 1; nova_dbgv("%s: cpu %d, low %lu, high %lu, num %lu\n", __func__, cpuid, low, high, num_blocks); free_list = nova_get_free_list(sb, cpuid); tree = &(free_list->block_free_tree); blknode = nova_alloc_blocknode(sb); if (blknode == NULL) return -ENOMEM; blknode->range_low = low; blknode->range_high = high; ret = nova_insert_blocktree(sbi, tree, blknode); if (ret) { nova_err(sb, "%s failed\n", __func__); nova_free_blocknode(sb, blknode); goto out; } if (!free_list->first_node) free_list->first_node = blknode; free_list->num_blocknode++; free_list->num_free_blocks += num_blocks; out: return ret; }
static void nova_destroy_blocknode_tree(struct super_block *sb, int cpu) { struct free_list *free_list; free_list = nova_get_free_list(sb, cpu); nova_destroy_range_node_tree(sb, &free_list->block_free_tree); }
void nova_print_alloc_stats(struct super_block *sb) { struct nova_sb_info *sbi = NOVA_SB(sb); struct free_list *free_list; unsigned long alloc_log_count = 0; unsigned long alloc_log_pages = 0; unsigned long alloc_data_count = 0; unsigned long alloc_data_pages = 0; unsigned long free_log_count = 0; unsigned long freed_log_pages = 0; unsigned long free_data_count = 0; unsigned long freed_data_pages = 0; int i; printk("=========== NOVA allocation stats ===========\n"); printk("Alloc %llu, alloc steps %lu, average %llu\n", Countstats[new_data_blocks_t], alloc_steps, Countstats[new_data_blocks_t] ? alloc_steps / Countstats[new_data_blocks_t] : 0); printk("Free %llu, free steps %lu, average %llu\n", Countstats[free_data_t], free_steps, Countstats[free_data_t] ? free_steps / Countstats[free_data_t] : 0); printk("Fast GC %llu, check pages %llu, free pages %lu, average %llu\n", Countstats[fast_gc_t], fast_checked_pages, fast_gc_pages, Countstats[fast_gc_t] ? fast_gc_pages / Countstats[fast_gc_t] : 0); printk("Thorough GC %llu, checked pages %llu, free pages %lu, " "average %llu\n", Countstats[thorough_gc_t], thorough_checked_pages, thorough_gc_pages, Countstats[thorough_gc_t] ? thorough_gc_pages / Countstats[thorough_gc_t] : 0); for (i = 0; i < sbi->cpus; i++) { free_list = nova_get_free_list(sb, i); alloc_log_count += free_list->alloc_log_count; alloc_log_pages += free_list->alloc_log_pages; alloc_data_count += free_list->alloc_data_count; alloc_data_pages += free_list->alloc_data_pages; free_log_count += free_list->free_log_count; freed_log_pages += free_list->freed_log_pages; free_data_count += free_list->free_data_count; freed_data_pages += free_list->freed_data_pages; } printk("alloc log count %lu, allocated log pages %lu, " "alloc data count %lu, allocated data pages %lu, " "free log count %lu, freed log pages %lu, " "free data count %lu, freed data pages %lu\n", alloc_log_count, alloc_log_pages, alloc_data_count, alloc_data_pages, free_log_count, freed_log_pages, free_data_count, freed_data_pages); printk("Persistent barriers %lu\n", barriers); }
static u64 nova_save_free_list_blocknodes(struct super_block *sb, int cpu, u64 temp_tail) { struct free_list *free_list; free_list = nova_get_free_list(sb, cpu); temp_tail = nova_save_range_nodes_to_log(sb, &free_list->block_free_tree, temp_tail, 0); return temp_tail; }
int nova_alloc_block_free_lists(struct super_block *sb) { struct nova_sb_info *sbi = NOVA_SB(sb); struct free_list *free_list; int i; sbi->free_lists = kzalloc(sbi->cpus * sizeof(struct free_list), GFP_KERNEL); if (!sbi->free_lists) return -ENOMEM; for (i = 0; i < sbi->cpus; i++) { free_list = nova_get_free_list(sb, i); free_list->block_free_tree = RB_ROOT; spin_lock_init(&free_list->s_lock); } return 0; }
void nova_save_blocknode_mappings_to_log(struct super_block *sb) { struct nova_inode *pi = nova_get_inode_by_ino(sb, NOVA_BLOCKNODE_INO); struct nova_sb_info *sbi = NOVA_SB(sb); struct nova_super_block *super; struct free_list *free_list; unsigned long num_blocknode = 0; unsigned long num_pages; int allocated; u64 new_block = 0; u64 temp_tail; int i; /* Allocate log pages before save blocknode mappings */ for (i = 0; i < sbi->cpus; i++) { free_list = nova_get_free_list(sb, i); num_blocknode += free_list->num_blocknode; nova_dbgv("%s: free list %d: %lu nodes\n", __func__, i, free_list->num_blocknode); } free_list = nova_get_free_list(sb, SHARED_CPU); num_blocknode += free_list->num_blocknode; nova_dbgv("%s: shared list: %lu nodes\n", __func__, free_list->num_blocknode); num_pages = num_blocknode / RANGENODE_PER_PAGE; if (num_blocknode % RANGENODE_PER_PAGE) num_pages++; allocated = nova_allocate_inode_log_pages(sb, pi, num_pages, &new_block); if (allocated != num_pages) { nova_dbg("Error saving blocknode mappings: %d\n", allocated); return; } /* * save the total allocated blocknode mappings * in super block * No transaction is needed as we will recover the fields * via failure recovery */ super = nova_get_super(sb); nova_memunlock_range(sb, &super->s_wtime, NOVA_FAST_MOUNT_FIELD_SIZE); super->s_wtime = cpu_to_le32(get_seconds()); nova_memlock_range(sb, &super->s_wtime, NOVA_FAST_MOUNT_FIELD_SIZE); nova_flush_buffer(super, NOVA_SB_SIZE, 0); /* Finally update log head and tail */ pi->log_head = new_block; nova_flush_buffer(&pi->log_head, CACHELINE_SIZE, 0); temp_tail = new_block; for (i = 0; i < sbi->cpus; i++) { temp_tail = nova_save_free_list_blocknodes(sb, i, temp_tail); } temp_tail = nova_save_free_list_blocknodes(sb, SHARED_CPU, temp_tail); nova_update_tail(pi, temp_tail); nova_dbg("%s: %lu blocknodes, %lu log pages, pi head 0x%llx, " "tail 0x%llx\n", __func__, num_blocknode, num_pages, pi->log_head, pi->log_tail); }
static int nova_init_blockmap_from_inode(struct super_block *sb) { struct nova_sb_info *sbi = NOVA_SB(sb); struct nova_inode *pi = nova_get_inode_by_ino(sb, NOVA_BLOCKNODE_INO); struct free_list *free_list; struct nova_range_node_lowhigh *entry; struct nova_range_node *blknode; size_t size = sizeof(struct nova_range_node_lowhigh); u64 curr_p; u64 cpuid; int ret = 0; curr_p = pi->log_head; if (curr_p == 0) { nova_dbg("%s: pi head is 0!\n", __func__); return -EINVAL; } while (curr_p != pi->log_tail) { if (is_last_entry(curr_p, size)) { curr_p = next_log_page(sb, curr_p); } if (curr_p == 0) { nova_dbg("%s: curr_p is NULL!\n", __func__); NOVA_ASSERT(0); ret = -EINVAL; break; } entry = (struct nova_range_node_lowhigh *)nova_get_block(sb, curr_p); blknode = nova_alloc_blocknode(sb); if (blknode == NULL) NOVA_ASSERT(0); blknode->range_low = le64_to_cpu(entry->range_low); blknode->range_high = le64_to_cpu(entry->range_high); cpuid = get_cpuid(sbi, blknode->range_low); /* FIXME: Assume NR_CPUS not change */ free_list = nova_get_free_list(sb, cpuid); ret = nova_insert_blocktree(sbi, &free_list->block_free_tree, blknode); if (ret) { nova_err(sb, "%s failed\n", __func__); nova_free_blocknode(sb, blknode); NOVA_ASSERT(0); nova_destroy_blocknode_trees(sb); goto out; } free_list->num_blocknode++; if (free_list->num_blocknode == 1) free_list->first_node = blknode; free_list->num_free_blocks += blknode->range_high - blknode->range_low + 1; curr_p += sizeof(struct nova_range_node_lowhigh); } out: nova_free_inode_log(sb, pi); return ret; }