/* * This must be called after early kernel init, since then the rootdev * is available. */ static void check_pinning_enforcement(struct super_block *mnt_sb) { bool ro = false; /* * If load pinning is not enforced via a read-only block * device, allow sysctl to change modes for testing. */ if (mnt_sb->s_bdev) { char bdev[BDEVNAME_SIZE]; ro = bdev_read_only(mnt_sb->s_bdev); bdevname(mnt_sb->s_bdev, bdev); pr_info("%s (%u:%u): %s\n", bdev, MAJOR(mnt_sb->s_bdev->bd_dev), MINOR(mnt_sb->s_bdev->bd_dev), ro ? "read-only" : "writable"); } else pr_info("mnt_sb lacks block device, treating as: writable\n"); if (!ro) { if (!register_sysctl_paths(loadpin_sysctl_path, loadpin_sysctl_table)) pr_notice("sysctl registration failed!\n"); else pr_info("enforcement can be disabled.\n"); } else pr_info("load pinning engaged.\n"); }
co_rc_t co_os_file_block_open(struct co_monitor *linuxvm, co_monitor_file_block_dev_t *fdev) { struct file *filp; struct inode *inode = NULL; co_rc_t rc = CO_RC(OK); co_debug("opening %s\n", fdev->pathname); filp = filp_open(fdev->pathname, O_RDWR | O_LARGEFILE, 0); if (IS_ERR(filp)) return CO_RC(ERROR); if (filp->f_dentry) inode = filp->f_dentry->d_inode; if (inode && S_ISBLK(inode->i_mode)) { if (bdev_read_only(inode->i_bdev)) { /* TODO... */ } } else if (!inode || !S_ISREG(inode->i_mode)) { co_debug("colinux: invalid file type: %s\n", fdev->pathname); rc = CO_RC(ERROR); goto out; } fdev->sysdep = (struct co_os_file_block_sysdep *)(filp); return rc; out: filp_close(filp, current->files); return rc; }
/** * blkdev_reset_zones - Reset zones write pointer * @bdev: Target block device * @sector: Start sector of the first zone to reset * @nr_sectors: Number of sectors, at least the length of one zone * @gfp_mask: Memory allocation flags (for bio_alloc) * * Description: * Reset the write pointer of the zones contained in the range * @sector..@sector+@nr_sectors. Specifying the entire disk sector range * is valid, but the specified range should not contain conventional zones. */ int blkdev_reset_zones(struct block_device *bdev, sector_t sector, sector_t nr_sectors, gfp_t gfp_mask) { struct request_queue *q = bdev_get_queue(bdev); sector_t zone_sectors; sector_t end_sector = sector + nr_sectors; struct bio *bio = NULL; struct blk_plug plug; int ret; if (!blk_queue_is_zoned(q)) return -EOPNOTSUPP; if (bdev_read_only(bdev)) return -EPERM; if (!nr_sectors || end_sector > bdev->bd_part->nr_sects) /* Out of range */ return -EINVAL; /* Check alignment (handle eventual smaller last zone) */ zone_sectors = blk_queue_zone_sectors(q); if (sector & (zone_sectors - 1)) return -EINVAL; if ((nr_sectors & (zone_sectors - 1)) && end_sector != bdev->bd_part->nr_sects) return -EINVAL; blk_start_plug(&plug); while (sector < end_sector) { bio = blk_next_bio(bio, 0, gfp_mask); bio->bi_iter.bi_sector = sector; bio_set_dev(bio, bdev); bio_set_op_attrs(bio, REQ_OP_ZONE_RESET, 0); sector += zone_sectors; /* This may take a while, so be nice to others */ cond_resched(); } ret = submit_bio_wait(bio); bio_put(bio); blk_finish_plug(&plug); return ret; }
/* Check if the root device is read-only (e.g. dm-verity is enabled). * This must be called after early kernel init, since then the rootdev * is available. */ static bool rootdev_readonly(void) { bool rc; struct block_device *bdev; const fmode_t mode = FMODE_WRITE; bdev = blkdev_get_by_dev(ROOT_DEV, mode, NULL); if (IS_ERR(bdev)) { /* In this weird case, assume it is read-only. */ pr_info("dev(%u,%u): FMODE_WRITE disallowed?!\n", MAJOR(ROOT_DEV), MINOR(ROOT_DEV)); return true; } rc = bdev_read_only(bdev); blkdev_put(bdev, mode); pr_info("dev(%u,%u): %s\n", MAJOR(ROOT_DEV), MINOR(ROOT_DEV), rc ? "read-only" : "writable"); return rc; }
static int f2fs_fill_super(struct super_block *sb, void *data, int silent) { struct f2fs_sb_info *sbi; struct f2fs_super_block *raw_super; struct buffer_head *raw_super_buf; struct inode *root; long err; bool retry = true, need_fsck = false; char *options = NULL; int recovery, i; try_onemore: err = -EINVAL; raw_super = NULL; raw_super_buf = NULL; recovery = 0; /* allocate memory for f2fs-specific super block info */ sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL); if (!sbi) return -ENOMEM; /* set a block size */ if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) { f2fs_msg(sb, KERN_ERR, "unable to set blocksize"); goto free_sbi; } err = read_raw_super_block(sb, &raw_super, &raw_super_buf, &recovery); if (err) goto free_sbi; sb->s_fs_info = sbi; default_options(sbi); /* parse mount options */ options = kstrdup((const char *)data, GFP_KERNEL); if (data && !options) { err = -ENOMEM; goto free_sb_buf; } err = parse_options(sb, options); if (err) goto free_options; sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize)); sb->s_max_links = F2FS_LINK_MAX; get_random_bytes(&sbi->s_next_generation, sizeof(u32)); sb->s_op = &f2fs_sops; sb->s_xattr = f2fs_xattr_handlers; sb->s_export_op = &f2fs_export_ops; sb->s_magic = F2FS_SUPER_MAGIC; sb->s_time_gran = 1; sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0); memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid)); /* init f2fs-specific super block info */ sbi->sb = sb; sbi->raw_super = raw_super; sbi->raw_super_buf = raw_super_buf; mutex_init(&sbi->gc_mutex); mutex_init(&sbi->writepages); mutex_init(&sbi->cp_mutex); init_rwsem(&sbi->node_write); clear_sbi_flag(sbi, SBI_POR_DOING); spin_lock_init(&sbi->stat_lock); init_rwsem(&sbi->read_io.io_rwsem); sbi->read_io.sbi = sbi; sbi->read_io.bio = NULL; for (i = 0; i < NR_PAGE_TYPE; i++) { init_rwsem(&sbi->write_io[i].io_rwsem); sbi->write_io[i].sbi = sbi; sbi->write_io[i].bio = NULL; } init_rwsem(&sbi->cp_rwsem); init_waitqueue_head(&sbi->cp_wait); init_sb_info(sbi); /* get an inode for meta space */ sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi)); if (IS_ERR(sbi->meta_inode)) { f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode"); err = PTR_ERR(sbi->meta_inode); goto free_options; } err = get_valid_checkpoint(sbi); if (err) { f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint"); goto free_meta_inode; } /* sanity checking of checkpoint */ err = -EINVAL; if (sanity_check_ckpt(sbi)) { f2fs_msg(sb, KERN_ERR, "Invalid F2FS checkpoint"); goto free_cp; } sbi->total_valid_node_count = le32_to_cpu(sbi->ckpt->valid_node_count); sbi->total_valid_inode_count = le32_to_cpu(sbi->ckpt->valid_inode_count); sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count); sbi->total_valid_block_count = le64_to_cpu(sbi->ckpt->valid_block_count); sbi->last_valid_block_count = sbi->total_valid_block_count; sbi->alloc_valid_block_count = 0; INIT_LIST_HEAD(&sbi->dir_inode_list); spin_lock_init(&sbi->dir_inode_lock); init_extent_cache_info(sbi); init_ino_entry_info(sbi); /* setup f2fs internal modules */ err = build_segment_manager(sbi); if (err) { f2fs_msg(sb, KERN_ERR, "Failed to initialize F2FS segment manager"); goto free_sm; } err = build_node_manager(sbi); if (err) { f2fs_msg(sb, KERN_ERR, "Failed to initialize F2FS node manager"); goto free_nm; } build_gc_manager(sbi); /* get an inode for node space */ sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi)); if (IS_ERR(sbi->node_inode)) { f2fs_msg(sb, KERN_ERR, "Failed to read node inode"); err = PTR_ERR(sbi->node_inode); goto free_nm; } /* if there are nt orphan nodes free them */ recover_orphan_inodes(sbi); /* read root inode and dentry */ root = f2fs_iget(sb, F2FS_ROOT_INO(sbi)); if (IS_ERR(root)) { f2fs_msg(sb, KERN_ERR, "Failed to read root inode"); err = PTR_ERR(root); goto free_node_inode; } if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { iput(root); err = -EINVAL; goto free_node_inode; } sb->s_root = d_make_root(root); /* allocate root dentry */ if (!sb->s_root) { err = -ENOMEM; goto free_root_inode; } err = f2fs_build_stats(sbi); if (err) goto free_root_inode; if (f2fs_proc_root) sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root); if (sbi->s_proc) proc_create_data("segment_info", S_IRUGO, sbi->s_proc, &f2fs_seq_segment_info_fops, sb); if (test_opt(sbi, DISCARD)) { struct request_queue *q = bdev_get_queue(sb->s_bdev); if (!blk_queue_discard(q)) f2fs_msg(sb, KERN_WARNING, "mounting with \"discard\" option, but " "the device does not support discard"); clear_opt(sbi, DISCARD); } sbi->s_kobj.kset = f2fs_kset; init_completion(&sbi->s_kobj_unregister); err = kobject_init_and_add(&sbi->s_kobj, &f2fs_ktype, NULL, "%s", sb->s_id); if (err) goto free_proc; /* recover fsynced data */ if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) { /* * mount should be failed, when device has readonly mode, and * previous checkpoint was not done by clean system shutdown. */ if (bdev_read_only(sb->s_bdev) && !is_set_ckpt_flags(sbi->ckpt, CP_UMOUNT_FLAG)) { err = -EROFS; goto free_kobj; } if (need_fsck) set_sbi_flag(sbi, SBI_NEED_FSCK); err = recover_fsync_data(sbi); if (err) { need_fsck = true; f2fs_msg(sb, KERN_ERR, "Cannot recover all fsync data errno=%ld", err); goto free_kobj; } } /* * If filesystem is not mounted as read-only then * do start the gc_thread. */ if (test_opt(sbi, BG_GC) && !f2fs_readonly(sb)) { /* After POR, we can run background GC thread.*/ err = start_gc_thread(sbi); if (err) goto free_kobj; } kfree(options); /* recover broken superblock */ if (recovery && !f2fs_readonly(sb) && !bdev_read_only(sb->s_bdev)) { f2fs_msg(sb, KERN_INFO, "Recover invalid superblock"); f2fs_commit_super(sbi); } return 0; free_kobj: kobject_del(&sbi->s_kobj); free_proc: if (sbi->s_proc) { remove_proc_entry("segment_info", sbi->s_proc); remove_proc_entry(sb->s_id, f2fs_proc_root); } f2fs_destroy_stats(sbi); free_root_inode: dput(sb->s_root); sb->s_root = NULL; free_node_inode: iput(sbi->node_inode); free_nm: destroy_node_manager(sbi); free_sm: destroy_segment_manager(sbi); free_cp: kfree(sbi->ckpt); free_meta_inode: make_bad_inode(sbi->meta_inode); iput(sbi->meta_inode); free_options: kfree(options); free_sb_buf: brelse(raw_super_buf); free_sbi: kfree(sbi); /* give only one another chance */ if (retry) { retry = false; shrink_dcache_sb(sb); goto try_onemore; } return err; }
static int f2fs_fill_super(struct super_block *sb, void *data, int silent) { struct f2fs_sb_info *sbi; struct f2fs_super_block *raw_super; struct inode *root; long err; bool retry = true, need_fsck = false; char *options = NULL; int recovery, i, valid_super_block; struct curseg_info *seg_i; try_onemore: err = -EINVAL; raw_super = NULL; valid_super_block = -1; recovery = 0; /* allocate memory for f2fs-specific super block info */ sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL); if (!sbi) return -ENOMEM; /* Load the checksum driver */ sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0); if (IS_ERR(sbi->s_chksum_driver)) { f2fs_msg(sb, KERN_ERR, "Cannot load crc32 driver."); err = PTR_ERR(sbi->s_chksum_driver); sbi->s_chksum_driver = NULL; goto free_sbi; } /* set a block size */ if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) { f2fs_msg(sb, KERN_ERR, "unable to set blocksize"); goto free_sbi; } err = read_raw_super_block(sb, &raw_super, &valid_super_block, &recovery); if (err) goto free_sbi; sb->s_fs_info = sbi; default_options(sbi); /* parse mount options */ options = kstrdup((const char *)data, GFP_KERNEL); if (data && !options) { err = -ENOMEM; goto free_sb_buf; } err = parse_options(sb, options); if (err) goto free_options; sbi->max_file_blocks = max_file_blocks(); sb->s_maxbytes = sbi->max_file_blocks << le32_to_cpu(raw_super->log_blocksize); sb->s_max_links = F2FS_LINK_MAX; get_random_bytes(&sbi->s_next_generation, sizeof(u32)); sb->s_op = &f2fs_sops; sb->s_cop = &f2fs_cryptops; sb->s_xattr = f2fs_xattr_handlers; sb->s_export_op = &f2fs_export_ops; sb->s_magic = F2FS_SUPER_MAGIC; sb->s_time_gran = 1; sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0); memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid)); /* init f2fs-specific super block info */ sbi->sb = sb; sbi->raw_super = raw_super; sbi->valid_super_block = valid_super_block; mutex_init(&sbi->gc_mutex); mutex_init(&sbi->writepages); mutex_init(&sbi->cp_mutex); init_rwsem(&sbi->node_write); /* disallow all the data/node/meta page writes */ set_sbi_flag(sbi, SBI_POR_DOING); spin_lock_init(&sbi->stat_lock); init_rwsem(&sbi->read_io.io_rwsem); sbi->read_io.sbi = sbi; sbi->read_io.bio = NULL; for (i = 0; i < NR_PAGE_TYPE; i++) { init_rwsem(&sbi->write_io[i].io_rwsem); sbi->write_io[i].sbi = sbi; sbi->write_io[i].bio = NULL; } init_rwsem(&sbi->cp_rwsem); init_waitqueue_head(&sbi->cp_wait); init_sb_info(sbi); /* get an inode for meta space */ sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi)); if (IS_ERR(sbi->meta_inode)) { f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode"); err = PTR_ERR(sbi->meta_inode); goto free_options; } err = get_valid_checkpoint(sbi); if (err) { f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint"); goto free_meta_inode; } sbi->total_valid_node_count = le32_to_cpu(sbi->ckpt->valid_node_count); sbi->total_valid_inode_count = le32_to_cpu(sbi->ckpt->valid_inode_count); sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count); sbi->total_valid_block_count = le64_to_cpu(sbi->ckpt->valid_block_count); sbi->last_valid_block_count = sbi->total_valid_block_count; sbi->alloc_valid_block_count = 0; for (i = 0; i < NR_INODE_TYPE; i++) { INIT_LIST_HEAD(&sbi->inode_list[i]); spin_lock_init(&sbi->inode_lock[i]); } init_extent_cache_info(sbi); init_ino_entry_info(sbi); /* setup f2fs internal modules */ err = build_segment_manager(sbi); if (err) { f2fs_msg(sb, KERN_ERR, "Failed to initialize F2FS segment manager"); goto free_sm; } err = build_node_manager(sbi); if (err) { f2fs_msg(sb, KERN_ERR, "Failed to initialize F2FS node manager"); goto free_nm; } /* For write statistics */ if (sb->s_bdev->bd_part) sbi->sectors_written_start = (u64)part_stat_read(sb->s_bdev->bd_part, sectors[1]); /* Read accumulated write IO statistics if exists */ seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE); if (__exist_node_summaries(sbi)) sbi->kbytes_written = le64_to_cpu(seg_i->sum_blk->journal.info.kbytes_written); build_gc_manager(sbi); /* get an inode for node space */ sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi)); if (IS_ERR(sbi->node_inode)) { f2fs_msg(sb, KERN_ERR, "Failed to read node inode"); err = PTR_ERR(sbi->node_inode); goto free_nm; } f2fs_join_shrinker(sbi); /* if there are nt orphan nodes free them */ err = recover_orphan_inodes(sbi); if (err) goto free_node_inode; /* read root inode and dentry */ root = f2fs_iget(sb, F2FS_ROOT_INO(sbi)); if (IS_ERR(root)) { f2fs_msg(sb, KERN_ERR, "Failed to read root inode"); err = PTR_ERR(root); goto free_node_inode; } if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { iput(root); err = -EINVAL; goto free_node_inode; } sb->s_root = d_make_root(root); /* allocate root dentry */ if (!sb->s_root) { err = -ENOMEM; goto free_root_inode; } err = f2fs_build_stats(sbi); if (err) goto free_root_inode; if (f2fs_proc_root) sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root); if (sbi->s_proc) proc_create_data("segment_info", S_IRUGO, sbi->s_proc, &f2fs_seq_segment_info_fops, sb); sbi->s_kobj.kset = f2fs_kset; init_completion(&sbi->s_kobj_unregister); err = kobject_init_and_add(&sbi->s_kobj, &f2fs_ktype, NULL, "%s", sb->s_id); if (err) goto free_proc; /* recover fsynced data */ if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) { /* * mount should be failed, when device has readonly mode, and * previous checkpoint was not done by clean system shutdown. */ if (bdev_read_only(sb->s_bdev) && !is_set_ckpt_flags(sbi->ckpt, CP_UMOUNT_FLAG)) { err = -EROFS; goto free_kobj; } if (need_fsck) set_sbi_flag(sbi, SBI_NEED_FSCK); err = recover_fsync_data(sbi); if (err) { need_fsck = true; f2fs_msg(sb, KERN_ERR, "Cannot recover all fsync data errno=%ld", err); goto free_kobj; } } /* recover_fsync_data() cleared this already */ clear_sbi_flag(sbi, SBI_POR_DOING); /* * If filesystem is not mounted as read-only then * do start the gc_thread. */ if (test_opt(sbi, BG_GC) && !f2fs_readonly(sb)) { /* After POR, we can run background GC thread.*/ err = start_gc_thread(sbi); if (err) goto free_kobj; } kfree(options); /* recover broken superblock */ if (recovery && !f2fs_readonly(sb) && !bdev_read_only(sb->s_bdev)) { err = f2fs_commit_super(sbi, true); f2fs_msg(sb, KERN_INFO, "Try to recover %dth superblock, ret: %ld", sbi->valid_super_block ? 1 : 2, err); } f2fs_update_time(sbi, CP_TIME); f2fs_update_time(sbi, REQ_TIME); return 0; free_kobj: kobject_del(&sbi->s_kobj); kobject_put(&sbi->s_kobj); wait_for_completion(&sbi->s_kobj_unregister); free_proc: if (sbi->s_proc) { remove_proc_entry("segment_info", sbi->s_proc); remove_proc_entry(sb->s_id, f2fs_proc_root); } f2fs_destroy_stats(sbi); free_root_inode: dput(sb->s_root); sb->s_root = NULL; free_node_inode: mutex_lock(&sbi->umount_mutex); f2fs_leave_shrinker(sbi); iput(sbi->node_inode); mutex_unlock(&sbi->umount_mutex); free_nm: destroy_node_manager(sbi); free_sm: destroy_segment_manager(sbi); kfree(sbi->ckpt); free_meta_inode: make_bad_inode(sbi->meta_inode); iput(sbi->meta_inode); free_options: kfree(options); free_sb_buf: kfree(raw_super); free_sbi: if (sbi->s_chksum_driver) crypto_free_shash(sbi->s_chksum_driver); kfree(sbi); /* give only one another chance */ if (retry) { retry = false; shrink_dcache_sb(sb); goto try_onemore; } return err; }
int xfs_readonly_buftarg( xfs_buftarg_t *btp) { return bdev_read_only(btp->pbr_bdev); }