void nova_init_blockmap(struct super_block *sb, int recovery) { struct nova_sb_info *sbi = NOVA_SB(sb); struct rb_root *tree; unsigned long num_used_block; struct nova_range_node *blknode; struct free_list *free_list; unsigned long per_list_blocks; int i; int ret; num_used_block = sbi->reserved_blocks; /* Divide the block range among per-CPU free lists */ per_list_blocks = sbi->num_blocks / sbi->cpus; sbi->per_list_blocks = per_list_blocks; for (i = 0; i < sbi->cpus; i++) { free_list = nova_get_free_list(sb, i); tree = &(free_list->block_free_tree); free_list->block_start = per_list_blocks * i; free_list->block_end = free_list->block_start + per_list_blocks - 1; /* For recovery, update these fields later */ if (recovery == 0) { free_list->num_free_blocks = per_list_blocks; if (i == 0) { free_list->block_start += num_used_block; free_list->num_free_blocks -= num_used_block; } blknode = nova_alloc_blocknode(sb); if (blknode == NULL) NOVA_ASSERT(0); blknode->range_low = free_list->block_start; blknode->range_high = free_list->block_end; ret = nova_insert_blocktree(sbi, tree, blknode); if (ret) { nova_err(sb, "%s failed\n", __func__); nova_free_blocknode(sb, blknode); return; } free_list->first_node = blknode; free_list->num_blocknode = 1; } } free_list = nova_get_free_list(sb, (sbi->cpus - 1)); if (free_list->block_end + 1 < sbi->num_blocks) { /* Shared free list gets any remaining blocks */ sbi->shared_free_list.block_start = free_list->block_end + 1; sbi->shared_free_list.block_end = sbi->num_blocks - 1; } }
int nova_rebuild_inode(struct super_block *sb, struct nova_inode_info *si, u64 pi_addr) { struct nova_inode_info_header *sih = &si->header; struct nova_inode *pi; unsigned long nova_ino; pi = (struct nova_inode *)nova_get_block(sb, pi_addr); if (!pi) NOVA_ASSERT(0); if (pi->valid == 0) return -EINVAL; nova_ino = pi->nova_ino; nova_dbgv("%s: inode %lu, addr 0x%llx, valid %d, " "head 0x%llx, tail 0x%llx\n", __func__, nova_ino, pi_addr, pi->valid, pi->log_head, pi->log_tail); nova_init_header(sb, sih, __le16_to_cpu(pi->i_mode)); sih->ino = nova_ino; switch (__le16_to_cpu(pi->i_mode) & S_IFMT) { case S_IFLNK: /* Treat symlink files as normal files */ /* Fall through */ case S_IFREG: nova_rebuild_file_inode_tree(sb, pi, pi_addr, sih); break; case S_IFDIR: nova_rebuild_dir_inode_tree(sb, pi, pi_addr, sih); break; default: /* In case of special inode, walk the log */ if (pi->log_head) nova_rebuild_file_inode_tree(sb, pi, pi_addr, sih); sih->pi_addr = pi_addr; break; } return 0; }
static int nova_failure_insert_inodetree(struct super_block *sb, unsigned long ino_low, unsigned long ino_high) { struct nova_sb_info *sbi = NOVA_SB(sb); struct inode_map *inode_map; struct nova_range_node *prev = NULL, *next = NULL; struct nova_range_node *new_node; unsigned long internal_low, internal_high; int cpu; struct rb_root *tree; int ret; if (ino_low > ino_high) { nova_err(sb, "%s: ino low %lu, ino high %lu\n", __func__, ino_low, ino_high); BUG(); } cpu = ino_low % sbi->cpus; if (ino_high % sbi->cpus != cpu) { nova_err(sb, "%s: ino low %lu, ino high %lu\n", __func__, ino_low, ino_high); BUG(); } internal_low = ino_low / sbi->cpus; internal_high = ino_high / sbi->cpus; inode_map = &sbi->inode_maps[cpu]; tree = &inode_map->inode_inuse_tree; mutex_lock(&inode_map->inode_table_mutex); ret = nova_find_free_slot(sbi, tree, internal_low, internal_high, &prev, &next); if (ret) { nova_dbg("%s: ino %lu - %lu already exists!: %d\n", __func__, ino_low, ino_high, ret); mutex_unlock(&inode_map->inode_table_mutex); return ret; } if (prev && next && (internal_low == prev->range_high + 1) && (internal_high + 1 == next->range_low)) { /* fits the hole */ rb_erase(&next->node, tree); inode_map->num_range_node_inode--; prev->range_high = next->range_high; nova_free_inode_node(sb, next); goto finish; } if (prev && (internal_low == prev->range_high + 1)) { /* Aligns left */ prev->range_high += internal_high - internal_low + 1; goto finish; } if (next && (internal_high + 1 == next->range_low)) { /* Aligns right */ next->range_low -= internal_high - internal_low + 1; goto finish; } /* Aligns somewhere in the middle */ new_node = nova_alloc_inode_node(sb); NOVA_ASSERT(new_node); new_node->range_low = internal_low; new_node->range_high = internal_high; ret = nova_insert_inodetree(sbi, new_node, cpu); if (ret) { nova_err(sb, "%s failed\n", __func__); nova_free_inode_node(sb, new_node); goto finish; } inode_map->num_range_node_inode++; finish: mutex_unlock(&inode_map->inode_table_mutex); return ret; }
static int nova_init_inode_list_from_inode(struct super_block *sb) { struct nova_sb_info *sbi = NOVA_SB(sb); struct nova_inode *pi = nova_get_inode_by_ino(sb, NOVA_INODELIST1_INO); struct nova_range_node_lowhigh *entry; struct nova_range_node *range_node; struct inode_map *inode_map; size_t size = sizeof(struct nova_range_node_lowhigh); unsigned long num_inode_node = 0; u64 curr_p; unsigned long cpuid; int ret; sbi->s_inodes_used_count = 0; curr_p = pi->log_head; if (curr_p == 0) { nova_dbg("%s: pi head is 0!\n", __func__); return -EINVAL; } while (curr_p != pi->log_tail) { if (is_last_entry(curr_p, size)) { curr_p = next_log_page(sb, curr_p); } if (curr_p == 0) { nova_dbg("%s: curr_p is NULL!\n", __func__); NOVA_ASSERT(0); } entry = (struct nova_range_node_lowhigh *)nova_get_block(sb, curr_p); range_node = nova_alloc_inode_node(sb); if (range_node == NULL) NOVA_ASSERT(0); cpuid = (entry->range_low & CPUID_MASK) >> 56; if (cpuid >= sbi->cpus) { nova_err(sb, "Invalid cpuid %lu\n", cpuid); nova_free_inode_node(sb, range_node); NOVA_ASSERT(0); nova_destroy_inode_trees(sb); goto out; } range_node->range_low = entry->range_low & ~CPUID_MASK; range_node->range_high = entry->range_high; ret = nova_insert_inodetree(sbi, range_node, cpuid); if (ret) { nova_err(sb, "%s failed, %d\n", __func__, cpuid); nova_free_inode_node(sb, range_node); NOVA_ASSERT(0); nova_destroy_inode_trees(sb); goto out; } sbi->s_inodes_used_count += range_node->range_high - range_node->range_low + 1; num_inode_node++; inode_map = &sbi->inode_maps[cpuid]; inode_map->num_range_node_inode++; if (!inode_map->first_inode_range) inode_map->first_inode_range = range_node; curr_p += sizeof(struct nova_range_node_lowhigh); } nova_dbg("%s: %lu inode nodes\n", __func__, num_inode_node); out: nova_free_inode_log(sb, pi); return ret; }
static int nova_init_blockmap_from_inode(struct super_block *sb) { struct nova_sb_info *sbi = NOVA_SB(sb); struct nova_inode *pi = nova_get_inode_by_ino(sb, NOVA_BLOCKNODE_INO); struct free_list *free_list; struct nova_range_node_lowhigh *entry; struct nova_range_node *blknode; size_t size = sizeof(struct nova_range_node_lowhigh); u64 curr_p; u64 cpuid; int ret = 0; curr_p = pi->log_head; if (curr_p == 0) { nova_dbg("%s: pi head is 0!\n", __func__); return -EINVAL; } while (curr_p != pi->log_tail) { if (is_last_entry(curr_p, size)) { curr_p = next_log_page(sb, curr_p); } if (curr_p == 0) { nova_dbg("%s: curr_p is NULL!\n", __func__); NOVA_ASSERT(0); ret = -EINVAL; break; } entry = (struct nova_range_node_lowhigh *)nova_get_block(sb, curr_p); blknode = nova_alloc_blocknode(sb); if (blknode == NULL) NOVA_ASSERT(0); blknode->range_low = le64_to_cpu(entry->range_low); blknode->range_high = le64_to_cpu(entry->range_high); cpuid = get_cpuid(sbi, blknode->range_low); /* FIXME: Assume NR_CPUS not change */ free_list = nova_get_free_list(sb, cpuid); ret = nova_insert_blocktree(sbi, &free_list->block_free_tree, blknode); if (ret) { nova_err(sb, "%s failed\n", __func__); nova_free_blocknode(sb, blknode); NOVA_ASSERT(0); nova_destroy_blocknode_trees(sb); goto out; } free_list->num_blocknode++; if (free_list->num_blocknode == 1) free_list->first_node = blknode; free_list->num_free_blocks += blknode->range_high - blknode->range_low + 1; curr_p += sizeof(struct nova_range_node_lowhigh); } out: nova_free_inode_log(sb, pi); return ret; }
int nova_rebuild_dir_inode_tree(struct super_block *sb, struct nova_inode *pi, u64 pi_addr, struct nova_inode_info_header *sih) { struct nova_dentry *entry = NULL; struct nova_setattr_logentry *attr_entry = NULL; struct nova_link_change_entry *link_change_entry = NULL; struct nova_inode_log_page *curr_page; u64 ino = pi->nova_ino; unsigned short de_len; timing_t rebuild_time; void *addr; u64 curr_p; u64 next; u8 type; int ret; NOVA_START_TIMING(rebuild_dir_t, rebuild_time); nova_dbg_verbose("Rebuild dir %llu tree\n", ino); sih->pi_addr = pi_addr; curr_p = pi->log_head; if (curr_p == 0) { nova_err(sb, "Dir %llu log is NULL!\n", ino); BUG(); } nova_dbg_verbose("Log head 0x%llx, tail 0x%llx\n", curr_p, pi->log_tail); sih->log_pages = 1; while (curr_p != pi->log_tail) { if (goto_next_page(sb, curr_p)) { sih->log_pages++; curr_p = next_log_page(sb, curr_p); } if (curr_p == 0) { nova_err(sb, "Dir %llu log is NULL!\n", ino); BUG(); } addr = (void *)nova_get_block(sb, curr_p); type = nova_get_entry_type(addr); switch (type) { case SET_ATTR: attr_entry = (struct nova_setattr_logentry *)addr; nova_apply_setattr_entry(sb, pi, sih, attr_entry); sih->last_setattr = curr_p; curr_p += sizeof(struct nova_setattr_logentry); continue; case LINK_CHANGE: link_change_entry = (struct nova_link_change_entry *)addr; nova_apply_link_change_entry(pi, link_change_entry); sih->last_link_change = curr_p; curr_p += sizeof(struct nova_link_change_entry); continue; case DIR_LOG: break; default: nova_dbg("%s: unknown type %d, 0x%llx\n", __func__, type, curr_p); NOVA_ASSERT(0); } entry = (struct nova_dentry *)nova_get_block(sb, curr_p); nova_dbgv("curr_p: 0x%llx, type %d, ino %llu, " "name %s, namelen %u, rec len %u\n", curr_p, entry->entry_type, le64_to_cpu(entry->ino), entry->name, entry->name_len, le16_to_cpu(entry->de_len)); if (entry->ino > 0) { if (entry->invalid == 0) { /* A valid entry to add */ ret = nova_replay_add_dentry(sb, sih, entry); } } else { /* Delete the entry */ ret = nova_replay_remove_dentry(sb, sih, entry); } if (ret) { nova_err(sb, "%s ERROR %d\n", __func__, ret); break; } nova_rebuild_dir_time_and_size(sb, pi, entry); de_len = le16_to_cpu(entry->de_len); curr_p += de_len; } sih->i_size = le64_to_cpu(pi->i_size); sih->i_mode = le64_to_cpu(pi->i_mode); nova_flush_buffer(pi, sizeof(struct nova_inode), 0); /* Keep traversing until log ends */ curr_p &= PAGE_MASK; curr_page = (struct nova_inode_log_page *)nova_get_block(sb, curr_p); while ((next = curr_page->page_tail.next_page) != 0) { sih->log_pages++; curr_p = next; curr_page = (struct nova_inode_log_page *) nova_get_block(sb, curr_p); } pi->i_blocks = sih->log_pages; // nova_print_dir_tree(sb, sih, ino); NOVA_END_TIMING(rebuild_dir_t, rebuild_time); return 0; }