static void logfs_kill_sb(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); log_super("LogFS: Start unmounting\n"); /* Alias entries slow down mount, so evict as many as possible */ sync_filesystem(sb); logfs_write_anchor(sb); free_areas(sb); /* * From this point on alias entries are simply dropped - and any * writes to the object store are considered bugs. */ log_super("LogFS: Now in shutdown\n"); generic_shutdown_super(sb); super->s_flags |= LOGFS_SB_FLAG_SHUTDOWN; BUG_ON(super->s_dirty_used_bytes || super->s_dirty_free_bytes); logfs_cleanup_gc(sb); logfs_cleanup_journal(sb); logfs_cleanup_areas(sb); logfs_cleanup_rw(sb); if (super->s_erase_page) __free_page(super->s_erase_page); super->s_devops->put_device(super); logfs_mempool_destroy(super->s_btree_pool); logfs_mempool_destroy(super->s_alias_pool); kfree(super); log_super("LogFS: Finished unmounting\n"); }
static struct dentry *logfs_get_sb_device(struct logfs_super *super, struct file_system_type *type, int flags) { struct super_block *sb; int err = -ENOMEM; static int mount_count; log_super("LogFS: Start mount %x\n", mount_count++); err = -EINVAL; sb = sget(type, logfs_sb_test, logfs_sb_set, flags | MS_NOATIME, super); /* if (IS_ERR(sb)) { super->s_devops->put_device(super); kfree(super); return ERR_CAST(sb); } if (sb->s_root) { // Device is already in use super->s_devops->put_device(super); kfree(super); return dget(sb->s_root); } */ /* * sb->s_maxbytes is limited to 8TB. On 32bit systems, the page cache * only covers 16TB and the upper 8TB are used for indirect blocks. * On 64bit system we could bump up the limit, but that would make * the filesystem incompatible with 32bit systems. */ sb->s_maxbytes = (1ull << 43) - 1; sb->s_max_links = LOGFS_LINK_MAX; sb->s_op = &logfs_super_operations; err = logfs_read_sb(sb, sb->s_flags & MS_RDONLY); if (err) //goto err1; return ERR_PTR(err); sb->s_flags |= MS_ACTIVE; err = logfs_get_sb_final(sb); if (err) { //deactivate_locked_super(sb); return ERR_PTR(err); } return dget(sb->s_root); /* err1: // no ->s_root, no ->put_super() iput(super->s_master_inode); iput(super->s_segfile_inode); iput(super->s_mapping_inode); deactivate_locked_super(sb); return ERR_PTR(err); */ }
static int logfs_get_sb_final(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); struct inode *rootdir; int err; /* root dir */ rootdir = logfs_iget(sb, LOGFS_INO_ROOT); if (IS_ERR(rootdir)) //goto fail; return -EIO; sb->s_root = d_make_root(rootdir); if (!sb->s_root) //goto fail; return -EIO; // at that point we know that ->put_super() will be called super->s_erase_page = alloc_pages(GFP_KERNEL, 0); if (!super->s_erase_page) return -ENOMEM; memset(page_address(super->s_erase_page), 0xFF, PAGE_SIZE); /* // FIXME: check for read-only mounts err = logfs_make_writeable(sb); if (err) { __free_page(super->s_erase_page); return err; } */ log_super("LogFS: Finished mounting\n"); return 0; /* fail: iput(super->s_master_inode); iput(super->s_segfile_inode); iput(super->s_mapping_inode); return -EIO; */ }
static int log_writes_kthread(void *arg) { struct log_writes_c *lc = (struct log_writes_c *)arg; sector_t sector = 0; while (!kthread_should_stop()) { bool super = false; bool logging_enabled; struct pending_block *block = NULL; int ret; spin_lock_irq(&lc->blocks_lock); if (!list_empty(&lc->logging_blocks)) { block = list_first_entry(&lc->logging_blocks, struct pending_block, list); list_del_init(&block->list); if (!lc->logging_enabled) goto next; sector = lc->next_sector; if (block->flags & LOG_DISCARD_FLAG) lc->next_sector++; else lc->next_sector += block->nr_sectors + 1; /* * Apparently the size of the device may not be known * right away, so handle this properly. */ if (!lc->end_sector) lc->end_sector = logdev_last_sector(lc); if (lc->end_sector && lc->next_sector >= lc->end_sector) { DMERR("Ran out of space on the logdev"); lc->logging_enabled = false; goto next; } lc->logged_entries++; atomic_inc(&lc->io_blocks); super = (block->flags & (LOG_FUA_FLAG | LOG_MARK_FLAG)); if (super) atomic_inc(&lc->io_blocks); } next: logging_enabled = lc->logging_enabled; spin_unlock_irq(&lc->blocks_lock); if (block) { if (logging_enabled) { ret = log_one_block(lc, block, sector); if (!ret && super) ret = log_super(lc); if (ret) { spin_lock_irq(&lc->blocks_lock); lc->logging_enabled = false; spin_unlock_irq(&lc->blocks_lock); } } else free_pending_block(lc, block); continue; } if (!try_to_freeze()) { set_current_state(TASK_INTERRUPTIBLE); if (!kthread_should_stop() && !atomic_read(&lc->pending_blocks)) schedule(); __set_current_state(TASK_RUNNING); } }