static void *logfs_write_commit(struct super_block *sb, void *h, u16 *type, size_t *len) { struct logfs_super *super = logfs_super(sb); *type = JE_COMMIT; *len = super->s_no_je * sizeof(__be64); return super->s_je_array; }
void logfs_cleanup_journal(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); btree_grim_visitor32(&super->s_reserved_segments, 0, NULL); kfree(super->s_compressed_je); kfree(super->s_je); }
static void read_erasecount(struct super_block *sb, struct logfs_je_journal_ec *ec) { struct logfs_super *super = logfs_super(sb); int i; journal_for_each(i) super->s_journal_ec[i] = be32_to_cpu(ec->ec[i]); }
int logfs_erase_segment(struct super_block *sb, u32 segno, int ensure_erase) { struct logfs_super *super = logfs_super(sb); super->s_gec++; return super->s_devops->erase(sb, (u64)segno << super->s_segshift, super->s_segsize, ensure_erase); }
/* called with inode->i_lock held */ static int logfs_drop_inode(struct inode *inode) { struct logfs_super *super = logfs_super(inode->i_sb); struct logfs_inode *li = logfs_inode(inode); spin_lock(&logfs_inode_lock); list_move(&li->li_freeing_list, &super->s_freeing_list); spin_unlock(&logfs_inode_lock); return generic_drop_inode(inode); }
static int __logfs_create(struct inode *dir, struct dentry *dentry, struct inode *inode, const char *dest, long destlen) { struct logfs_super *super = logfs_super(dir->i_sb); struct logfs_inode *li = logfs_inode(inode); struct logfs_transaction *ta; int ret; ta = kzalloc(sizeof(*ta), GFP_KERNEL); if (!ta) return -ENOMEM; ta->state = CREATE_1; ta->ino = inode->i_ino; mutex_lock(&super->s_dirop_mutex); logfs_add_transaction(inode, ta); if (dest) { /* symlink */ ret = logfs_inode_write(inode, dest, destlen, 0, WF_LOCK, NULL); if (!ret) ret = write_inode(inode); } else { /* creat/mkdir/mknod */ ret = write_inode(inode); } if (ret) { abort_transaction(inode, ta); li->li_flags |= LOGFS_IF_STILLBORN; /* FIXME: truncate symlink */ inode->i_nlink--; iput(inode); goto out; } ta->state = CREATE_2; logfs_add_transaction(dir, ta); ret = logfs_write_dir(dir, dentry, inode); /* sync directory */ if (!ret) ret = write_inode(dir); if (ret) { logfs_del_transaction(dir, ta); ta->state = CREATE_2; logfs_add_transaction(inode, ta); logfs_remove_inode(inode); iput(inode); goto out; } d_instantiate(dentry, inode); out: mutex_unlock(&super->s_dirop_mutex); return ret; }
static int logfs_write_je(struct super_block *sb, void* (*write)(struct super_block *sb, void *scratch, u16 *type, size_t *len)) { void *buf; size_t len; u16 type; buf = write(sb, logfs_super(sb)->s_je, &type, &len); return logfs_write_je_buf(sb, buf, type, len); }
static s64 logfs_get_free_bytes(struct logfs_area *area, size_t bytes) { s32 ofs; logfs_open_area(area, bytes); ofs = area->a_used_bytes; area->a_used_bytes += bytes; BUG_ON(area->a_used_bytes >= logfs_super(area->a_sb)->s_segsize); return dev_ofs(area->a_sb, area->a_segno, ofs); }
static void read_dynsb(struct super_block *sb, struct logfs_je_dynsb *dynsb) { struct logfs_super *super = logfs_super(sb); super->s_gec = be64_to_cpu(dynsb->ds_gec); super->s_sweeper = be64_to_cpu(dynsb->ds_sweeper); super->s_victim_ino = be64_to_cpu(dynsb->ds_victim_ino); super->s_rename_dir = be64_to_cpu(dynsb->ds_rename_dir); super->s_rename_pos = be64_to_cpu(dynsb->ds_rename_pos); super->s_used_bytes = be64_to_cpu(dynsb->ds_used_bytes); super->s_generation = be32_to_cpu(dynsb->ds_generation); }
static void writeseg_end_io(struct bio *bio) { struct bio_vec *bvec; int i; struct super_block *sb = bio->bi_private; struct logfs_super *super = logfs_super(sb); BUG_ON(bio->bi_error); /* FIXME: Retry io or write elsewhere */ bio_for_each_segment_all(bvec, bio, i) { end_page_writeback(bvec->bv_page); page_cache_release(bvec->bv_page); }
static void *logfs_write_erasecount(struct super_block *sb, void *_ec, u16 *type, size_t *len) { struct logfs_super *super = logfs_super(sb); struct logfs_je_journal_ec *ec = _ec; int i; journal_for_each(i) ec->ec[i] = cpu_to_be32(super->s_journal_ec[i]); *type = JE_ERASECOUNT; *len = logfs_journal_erasecount_size(super); return ec; }
static int logfs_read_segment(struct super_block *sb, u32 segno) { struct logfs_super *super = logfs_super(sb); struct logfs_journal_header *jh = super->s_compressed_je; u64 ofs, seg_ofs = dev_ofs(sb, segno, 0); u32 h_ofs, last_ofs = 0; u16 len, datalen, last_len = 0; int i, err; for (h_ofs = 0; h_ofs < super->s_segsize; h_ofs += sizeof(*jh)) { ofs = seg_ofs + h_ofs; err = __read_je_header(sb, ofs, jh); if (err) continue; if (jh->h_type != cpu_to_be16(JE_COMMIT)) continue; err = __read_je_payload(sb, ofs, jh); if (err) continue; len = be16_to_cpu(jh->h_len); datalen = be16_to_cpu(jh->h_datalen); if ((datalen > sizeof(super->s_je_array)) || (datalen % sizeof(__be64))) continue; last_ofs = h_ofs; last_len = datalen; h_ofs += ALIGN(len, sizeof(*jh)) - sizeof(*jh); } if (last_ofs == 0) return -ENOENT; ofs = seg_ofs + last_ofs; log_journal("Read commit from %llx\n", ofs); err = __read_je(sb, ofs, jh); BUG_ON(err); if (err) return err; unpack(jh, super->s_je_array); super->s_no_je = last_len / sizeof(__be64); for (i = 0; i < super->s_no_je; i++) { err = read_je(sb, be64_to_cpu(super->s_je_array[i])); if (err) return err; } super->s_journal_area->a_segno = segno; return 0; }
static int logfs_unlink(struct inode *dir, struct dentry *dentry) { struct logfs_super *super = logfs_super(dir->i_sb); struct inode *inode = dentry->d_inode; struct logfs_transaction *ta; struct page *page; pgoff_t index; int ret; ta = kzalloc(sizeof(*ta), GFP_KERNEL); if (!ta) return -ENOMEM; ta->state = UNLINK_1; ta->ino = inode->i_ino; inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; page = logfs_get_dd_page(dir, dentry); if (!page) { kfree(ta); return -ENOENT; } if (IS_ERR(page)) { kfree(ta); return PTR_ERR(page); } index = page->index; page_cache_release(page); mutex_lock(&super->s_dirop_mutex); logfs_add_transaction(dir, ta); ret = logfs_delete(dir, index, NULL); if (!ret) ret = write_inode(dir); if (ret) { abort_transaction(dir, ta); printk(KERN_ERR"LOGFS: unable to delete inode\n"); goto out; } ta->state = UNLINK_2; logfs_add_transaction(inode, ta); ret = logfs_remove_inode(inode); out: mutex_unlock(&super->s_dirop_mutex); return ret; }
/* No locking done here, as this is called before .get_sb() returns. */ int logfs_replay_journal(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); struct inode *inode; u64 ino, pos; int err; if (super->s_victim_ino) { /* delete victim inode */ ino = super->s_victim_ino; printk(KERN_INFO"LogFS: delete unmapped inode #%llx\n", ino); inode = logfs_iget(sb, ino); if (IS_ERR(inode)) goto fail; LOGFS_BUG_ON(i_size_read(inode) > 0, sb); super->s_victim_ino = 0; err = logfs_remove_inode(inode); iput(inode); if (err) { super->s_victim_ino = ino; goto fail; } } if (super->s_rename_dir) { /* delete old dd from rename */ ino = super->s_rename_dir; pos = super->s_rename_pos; printk(KERN_INFO"LogFS: delete unbacked dentry (%llx, %llx)\n", ino, pos); inode = logfs_iget(sb, ino); if (IS_ERR(inode)) goto fail; super->s_rename_dir = 0; super->s_rename_pos = 0; err = logfs_delete_dd(inode, pos); iput(inode); if (err) { super->s_rename_dir = ino; super->s_rename_pos = pos; goto fail; } } return 0; fail: LOGFS_BUG(sb); return -EIO; }
/* * FIXME: There should be a reserve for root, similar to ext2. */ int logfs_statfs(struct dentry *dentry, struct kstatfs *stats) { struct super_block *sb = dentry->d_sb; struct logfs_super *super = logfs_super(sb); stats->f_type = LOGFS_MAGIC_U32; stats->f_bsize = sb->s_blocksize; stats->f_blocks = super->s_size >> LOGFS_BLOCK_BITS >> 3; stats->f_bfree = super->s_free_bytes >> sb->s_blocksize_bits; stats->f_bavail = super->s_free_bytes >> sb->s_blocksize_bits; stats->f_files = 0; stats->f_ffree = 0; stats->f_namelen = LOGFS_MAX_NAMELEN; return 0; }
/* * Cross-directory rename, target does not exist. Just a little nasty. * Create a new dentry in the target dir, then remove the old dentry, * all the while taking care to remember our operation in the journal. */ static int logfs_rename_cross(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct logfs_super *super = logfs_super(old_dir->i_sb); struct logfs_disk_dentry dd; struct logfs_transaction *ta; loff_t pos; int err; /* 1. locate source dd */ err = logfs_get_dd(old_dir, old_dentry, &dd, &pos); if (err) return err; ta = kzalloc(sizeof(*ta), GFP_KERNEL); if (!ta) return -ENOMEM; ta->state = CROSS_RENAME_1; ta->dir = old_dir->i_ino; ta->pos = pos; /* 2. write target dd */ mutex_lock(&super->s_dirop_mutex); logfs_add_transaction(new_dir, ta); err = logfs_write_dir(new_dir, new_dentry, old_dentry->d_inode); if (!err) err = write_inode(new_dir); if (err) { super->s_rename_dir = 0; super->s_rename_pos = 0; abort_transaction(new_dir, ta); goto out; } /* 3. remove source dd */ ta->state = CROSS_RENAME_2; logfs_add_transaction(old_dir, ta); err = logfs_delete_dd(old_dir, pos); if (!err) err = write_inode(old_dir); LOGFS_BUG_ON(err, old_dir->i_sb); out: mutex_unlock(&super->s_dirop_mutex); return err; }
static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index, size_t nr_pages) { struct logfs_super *super = logfs_super(sb); struct address_space *mapping = super->s_mapping_inode->i_mapping; struct bio *bio; struct page *page; struct request_queue *q = bdev_get_queue(sb->s_bdev); unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9); int i; if (max_pages > BIO_MAX_PAGES) max_pages = BIO_MAX_PAGES; bio = bio_alloc(GFP_NOFS, max_pages); BUG_ON(!bio); for (i = 0; i < nr_pages; i++) { if (i >= max_pages) { /* Block layer cannot split bios :( */ bio->bi_vcnt = i; bio->bi_idx = 0; bio->bi_size = i * PAGE_SIZE; bio->bi_bdev = super->s_bdev; bio->bi_sector = ofs >> 9; bio->bi_private = sb; bio->bi_end_io = writeseg_end_io; atomic_inc(&super->s_pending_writes); submit_bio(WRITE, bio); ofs += i * PAGE_SIZE; index += i; nr_pages -= i; i = 0; bio = bio_alloc(GFP_NOFS, max_pages); BUG_ON(!bio); } page = find_lock_page(mapping, index + i); BUG_ON(!page); bio->bi_io_vec[i].bv_page = page; bio->bi_io_vec[i].bv_len = PAGE_SIZE; bio->bi_io_vec[i].bv_offset = 0; BUG_ON(PageWriteback(page)); set_page_writeback(page); unlock_page(page); }
static int logfs_read_sb(struct super_block *sb, int read_only) { struct logfs_super *super = logfs_super(sb); int ret; super->s_btree_pool = mempool_create(32, btree_alloc, btree_free, NULL); if (!super->s_btree_pool) return -ENOMEM; btree_init_mempool64(&super->s_shadow_tree.new, super->s_btree_pool); btree_init_mempool64(&super->s_shadow_tree.old, super->s_btree_pool); btree_init_mempool32(&super->s_shadow_tree.segment_map, super->s_btree_pool); ret = logfs_init_mapping(sb); if (ret) return ret; ret = __logfs_read_sb(sb); if (ret) return ret; if (super->s_feature_incompat & ~LOGFS_FEATURES_INCOMPAT) return -EIO; if ((super->s_feature_ro_compat & ~LOGFS_FEATURES_RO_COMPAT) && !read_only) return -EIO; ret = logfs_init_rw(sb); if (ret) return ret; ret = logfs_init_areas(sb); if (ret) return ret; /* ret = logfs_init_gc(sb); if (ret) return ret; */ ret = logfs_init_journal(sb); if (ret) return ret; return 0; }
static struct page *get_mapping_page(struct super_block *sb, pgoff_t index, int use_filler) { struct logfs_super *super = logfs_super(sb); struct address_space *mapping = super->s_mapping_inode->i_mapping; filler_t *filler = super->s_devops->readpage; struct page *page; BUG_ON(mapping_gfp_mask(mapping) & __GFP_FS); if (use_filler) page = read_cache_page(mapping, index, filler, sb); else { page = find_or_create_page(mapping, index, GFP_NOFS); unlock_page(page); } return page; }
static void account_shadows(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); struct inode *inode = super->s_master_inode; struct logfs_inode *li = logfs_inode(inode); struct shadow_tree *tree = &super->s_shadow_tree; btree_grim_visitor64(&tree->new, (unsigned long)sb, account_shadow); btree_grim_visitor64(&tree->old, (unsigned long)sb, account_shadow); btree_grim_visitor32(&tree->segment_map, 0, NULL); tree->no_shadowed_segments = 0; if (li->li_block) { li->li_block->ops->free_block(sb, li->li_block); } BUG_ON((s64)li->li_used_bytes < 0); }
static int bdev_readpage(void *_sb, struct page *page) { struct super_block *sb = _sb; struct block_device *bdev = logfs_super(sb)->s_bdev; int err; err = sync_request(page, bdev, READ); if (err) { ClearPageUptodate(page); SetPageError(page); } else { SetPageUptodate(page); ClearPageError(page); } unlock_page(page); return err; }
static void read_anchor(struct super_block *sb, struct logfs_je_anchor *da) { struct logfs_super *super = logfs_super(sb); struct inode *inode = super->s_master_inode; struct logfs_inode *li = logfs_inode(inode); int i; super->s_last_ino = be64_to_cpu(da->da_last_ino); li->li_flags = 0; li->li_height = da->da_height; i_size_write(inode, be64_to_cpu(da->da_size)); li->li_used_bytes = be64_to_cpu(da->da_used_bytes); for (i = 0; i < LOGFS_EMBEDDED_FIELDS; i++) li->li_data[i] = be64_to_cpu(da->da_data[i]); }
static void *logfs_write_dynsb(struct super_block *sb, void *_dynsb, u16 *type, size_t *len) { struct logfs_super *super = logfs_super(sb); struct logfs_je_dynsb *dynsb = _dynsb; dynsb->ds_gec = cpu_to_be64(super->s_gec); dynsb->ds_sweeper = cpu_to_be64(super->s_sweeper); dynsb->ds_victim_ino = cpu_to_be64(super->s_victim_ino); dynsb->ds_rename_dir = cpu_to_be64(super->s_rename_dir); dynsb->ds_rename_pos = cpu_to_be64(super->s_rename_pos); dynsb->ds_used_bytes = cpu_to_be64(super->s_used_bytes); dynsb->ds_generation = cpu_to_be32(super->s_generation); *type = JE_DYNSB; *len = sizeof(*dynsb); return dynsb; }
static int logfs_get_sb_final(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); struct inode *rootdir; int err; /* root dir */ rootdir = logfs_iget(sb, LOGFS_INO_ROOT); if (IS_ERR(rootdir)) //goto fail; return -EIO; sb->s_root = d_make_root(rootdir); if (!sb->s_root) //goto fail; return -EIO; // at that point we know that ->put_super() will be called super->s_erase_page = alloc_pages(GFP_KERNEL, 0); if (!super->s_erase_page) return -ENOMEM; memset(page_address(super->s_erase_page), 0xFF, PAGE_SIZE); /* // FIXME: check for read-only mounts err = logfs_make_writeable(sb); if (err) { __free_page(super->s_erase_page); return err; } */ log_super("LogFS: Finished mounting\n"); return 0; /* fail: iput(super->s_master_inode); iput(super->s_segfile_inode); iput(super->s_mapping_inode); return -EIO; */ }
static void *logfs_write_area(struct super_block *sb, void *_a, u16 *type, size_t *len) { struct logfs_super *super = logfs_super(sb); struct logfs_area *area = super->s_area[super->s_sum_index]; struct logfs_je_area *a = _a; a->vim = VIM_DEFAULT; a->gc_level = super->s_sum_index; a->used_bytes = cpu_to_be32(area->a_used_bytes); a->segno = cpu_to_be32(area->a_segno); if (super->s_writesize > 1) write_wbuf(sb, area, a + 1); *type = JE_AREA; *len = sizeof(*a) + super->s_writesize; return a; }
static int logfs_write_obj_aliases(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); int err; log_journal("logfs_write_obj_aliases: %d aliases to write\n", super->s_no_object_aliases); super->s_je_fill = 0; err = logfs_write_obj_aliases_pagecache(sb); if (err) return err; if (super->s_je_fill) err = logfs_write_je_buf(sb, super->s_je, JE_OBJ_ALIAS, super->s_je_fill * sizeof(struct logfs_obj_alias)); return err; }
static void logfs_set_ino_generation(struct super_block *sb, struct inode *inode) { struct logfs_super *super = logfs_super(sb); u64 ino; mutex_lock(&super->s_journal_mutex); ino = logfs_seek_hole(super->s_master_inode, super->s_last_ino + 1); super->s_last_ino = ino; super->s_inos_till_wrap--; if (super->s_inos_till_wrap < 0) { super->s_last_ino = LOGFS_RESERVED_INOS; super->s_generation++; super->s_inos_till_wrap = INOS_PER_WRAP; } inode->i_ino = ino; inode->i_generation = super->s_generation; mutex_unlock(&super->s_journal_mutex); }
static void *__logfs_write_anchor(struct super_block *sb, void *_da, u16 *type, size_t *len) { struct logfs_super *super = logfs_super(sb); struct logfs_je_anchor *da = _da; struct inode *inode = super->s_master_inode; struct logfs_inode *li = logfs_inode(inode); int i; da->da_height = li->li_height; da->da_last_ino = cpu_to_be64(super->s_last_ino); da->da_size = cpu_to_be64(i_size_read(inode)); da->da_used_bytes = cpu_to_be64(li->li_used_bytes); for (i = 0; i < LOGFS_EMBEDDED_FIELDS; i++) da->da_data[i] = cpu_to_be64(li->li_data[i]); *type = JE_ANCHOR; *len = sizeof(*da); return da; }
void logfs_cleanup_gc(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); int i; if (!super->s_free_list.count) return; /* * FIXME: The btree may still contain a single empty node. So we * call the grim visitor to clean up that mess. Btree code should * do it for us, really. */ btree_grim_visitor32(&super->s_cand_tree, 0, NULL); logfs_cleanup_list(sb, &super->s_free_list); logfs_cleanup_list(sb, &super->s_reserve_list); for_each_area(i) logfs_cleanup_list(sb, &super->s_low_list[i]); logfs_cleanup_list(sb, &super->s_ec_list); }
static void write_wbuf(struct super_block *sb, struct logfs_area *area, void *wbuf) { struct logfs_super *super = logfs_super(sb); struct address_space *mapping = super->s_mapping_inode->i_mapping; u64 ofs; pgoff_t index; int page_ofs; struct page *page; ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes & ~(super->s_writesize - 1)); index = ofs >> PAGE_SHIFT; page_ofs = ofs & (PAGE_SIZE - 1); page = find_lock_page(mapping, index); BUG_ON(!page); memcpy(wbuf, page_address(page) + page_ofs, super->s_writesize); unlock_page(page); }