static inline Indirect *get_branch(struct inode *inode, int depth, int *offsets, Indirect chain[DEPTH], int *err) { struct super_block *sb = inode->i_sb; Indirect *p = chain; struct buffer_head *bh; *err = 0; /* i_data is not going away, no lock needed */ add_chain (chain, NULL, i_data(inode) + *offsets); if (!p->key) goto no_block; while (--depth) { bh = sb_bread(sb, block_to_cpu(p->key)); if (!bh) goto failure; read_lock(&pointers_lock); if (!verify_chain(chain, p)) goto changed; add_chain(++p, bh, (block_t *)bh->b_data + *++offsets); read_unlock(&pointers_lock); if (!p->key) goto no_block; } return NULL; changed: read_unlock(&pointers_lock); brelse(bh); *err = -EAGAIN; goto no_block; failure: *err = -EIO; no_block: return p; }
struct buffer_head * befs_bread(struct super_block *sb, befs_blocknr_t block) { struct buffer_head *bh = NULL; befs_debug(sb, "---> Enter befs_read() %Lu", block); bh = sb_bread(sb, block); if (bh == NULL) { befs_error(sb, "Failed to read block %lu", block); goto error; } befs_debug(sb, "<--- befs_read()"); return bh; error: befs_debug(sb, "<--- befs_read() ERROR"); return NULL; }
static int update_dind_extent_range(handle_t *handle, struct inode *inode, ext4_fsblk_t pblock, ext4_lblk_t *blk_nump, struct list_blocks_struct *lb) { struct buffer_head *bh; __le32 *i_data; int i, retval = 0; ext4_lblk_t blk_count = *blk_nump; unsigned long max_entries = inode->i_sb->s_blocksize >> 2; if (!pblock) { /* Only update the file block number */ *blk_nump += max_entries * max_entries; return 0; } bh = sb_bread(inode->i_sb, pblock); if (!bh) return -EIO; i_data = (__le32 *)bh->b_data; for (i = 0; i < max_entries; i++) { if (i_data[i]) { retval = update_ind_extent_range(handle, inode, le32_to_cpu(i_data[i]), &blk_count, lb); if (retval) break; } else { /* Only update the file block number */ blk_count += max_entries; } } /* Update the file block number */ *blk_nump = blk_count; put_bh(bh); return retval; }
/* * Read f2fs raw super block. * Because we have two copies of super block, so read the first one at first, * if the first one is invalid, move to read the second one. */ static int read_raw_super_block(struct super_block *sb, struct f2fs_super_block **raw_super, struct buffer_head **raw_super_buf) { int block = 0; retry: *raw_super_buf = sb_bread(sb, block); if (!*raw_super_buf) { f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock", block + 1); if (block == 0) { block++; goto retry; } else { return -EIO; } } *raw_super = (struct f2fs_super_block *) ((char *)(*raw_super_buf)->b_data + F2FS_SUPER_OFFSET); /* sanity checking of raw super */ if (sanity_check_raw_super(sb, *raw_super)) { brelse(*raw_super_buf); f2fs_msg(sb, KERN_ERR, "Can't find valid F2FS filesystem in %dth superblock", block + 1); if (block == 0) { block++; goto retry; } else { return -EINVAL; } } return 0; }
int ux_readdir(struct file *filp, void *dirent, filldir_t filldir) { unsigned long pos; struct inode *inode = filp->f_dentry->d_inode; struct ux_inode *uip = (struct ux_inode *) &inode->i_private; struct ux_dirent *udir; struct buffer_head *bh; __u32 blk; start_again: pos = filp->f_pos; if (pos >= inode->i_size) { return 0; } blk = (pos + 1) / UX_BSIZE; blk = uip->i_addr[blk]; bh = sb_bread(inode->i_sb, blk); udir = (struct ux_dirent *)(bh->b_data + pos % UX_BSIZE); /* * Skip over 'null' directory entries. */ if (udir->d_ino == 0) { filp->f_pos += sizeof(struct ux_dirent); brelse(bh); goto start_again; } else { filldir(dirent, udir->d_name, sizeof(udir->d_name), pos, udir->d_ino, DT_UNKNOWN); } filp->f_pos += sizeof(struct ux_dirent); brelse(bh); return 0; }
/* Returns the inode number of the directory entry at offset pos. If bh is non-NULL, it is brelse'd before. Pos is incremented. The buffer header is returned in bh. AV. Most often we do it item-by-item. Makes sense to optimize. AV. OK, there we go: if both bh and de are non-NULL we assume that we just AV. want the next entry (took one explicit de=NULL in vfat/namei.c). AV. It's done in fat_get_entry() (inlined), here the slow case lives. AV. Additionally, when we return -1 (i.e. reached the end of directory) AV. we make bh NULL. */ static int fat__get_entry(struct inode *dir, loff_t *pos, struct buffer_head **bh, struct msdos_dir_entry **de) { struct super_block *sb = dir->i_sb; sector_t phys, iblock; unsigned long mapped_blocks; int err, offset; next: if (*bh) brelse(*bh); *bh = NULL; iblock = *pos >> sb->s_blocksize_bits; err = fat_bmap(dir, iblock, &phys, &mapped_blocks, 0); if (err || !phys) return -1; /* beyond EOF or error */ fat_dir_readahead(dir, iblock, phys); *bh = sb_bread(sb, phys); if (*bh == NULL) { /* [email protected] remove error message at pulling sd card without unmount */ #ifdef LGE_REMOVE_ERROR printk(KERN_ERR "FAT: Directory bread(block %llu) failed\n", (llu)phys); #endif /* skip this block */ *pos = (iblock + 1) << sb->s_blocksize_bits; goto next; } offset = *pos & (sb->s_blocksize - 1); *pos += sizeof(struct msdos_dir_entry); *de = (struct msdos_dir_entry *)((*bh)->b_data + offset); return 0; }
static int udf_symlink_filler(struct file *file, struct page *page) { struct inode *inode = page->mapping->host; struct buffer_head *bh = NULL; char *symlink; int err = -EIO; char *p = kmap(page); struct udf_inode_info *iinfo; lock_kernel(); iinfo = UDF_I(inode); if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { symlink = iinfo->i_ext.i_data + iinfo->i_lenEAttr; } else { bh = sb_bread(inode->i_sb, udf_block_map(inode, 0)); if (!bh) goto out; symlink = bh->b_data; } udf_pc_to_char(inode->i_sb, symlink, inode->i_size, p); brelse(bh); unlock_kernel(); SetPageUptodate(page); kunmap(page); unlock_page(page); return 0; out: unlock_kernel(); SetPageError(page); kunmap(page); unlock_page(page); return err; }
ssize_t hellofs_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos) { struct super_block *sb; struct inode *inode; struct hellofs_inode *hellofs_inode; struct buffer_head *bh; char *buffer; int nbytes; inode = filp->f_path.dentry->d_inode; sb = inode->i_sb; hellofs_inode = HELLOFS_INODE(inode); if (*ppos >= hellofs_inode->file_size) { return 0; } bh = sb_bread(sb, hellofs_inode->data_block_no); if (!bh) { printk(KERN_ERR "Failed to read data block %llu\n", hellofs_inode->data_block_no); return 0; } buffer = (char *)bh->b_data + *ppos; nbytes = min((size_t)(hellofs_inode->file_size - *ppos), len); if (copy_to_user(buf, buffer, nbytes)) { brelse(bh); printk(KERN_ERR "Error copying file content to userspace buffer\n"); return -EFAULT; } brelse(bh); *ppos += nbytes; return nbytes; }
static int read_from_real_sfs(sfs_info_t *info, byte4_t block, byte4_t offset, void *buf, byte4_t len) { byte4_t block_size = info->sb.block_size; byte4_t bd_block_size = info->vfs_sb->s_bdev->bd_block_size; byte4_t abs; struct buffer_head *bh; // Translating the real SFS block numbering to underlying block device block numbering, for sb_bread() abs = block * block_size + offset; block = abs / bd_block_size; offset = abs % bd_block_size; if (offset + len > bd_block_size) // Should never happen { return -EINVAL; } if (!(bh = sb_bread(info->vfs_sb, block))) { return -EIO; } memcpy(buf, bh->b_data + offset, len); brelse(bh); return 0; }
struct buffer_head * befs_bread(struct super_block *sb, befs_blocknr_t block) { struct buffer_head *bh = NULL; befs_debug(sb, "---> Enter %s %lu", __func__, (unsigned long)block); bh = sb_bread(sb, block); if (bh == NULL) { befs_error(sb, "Failed to read block %lu", (unsigned long)block); goto error; } befs_debug(sb, "<--- %s", __func__); return bh; error: befs_debug(sb, "<--- %s ERROR", __func__); return NULL; }
struct ufs_buffer_head * ubh_bread_uspi (struct ufs_sb_private_info * uspi, struct super_block *sb, u64 fragment, u64 size) { unsigned i, j; u64 count = 0; if (size & ~uspi->s_fmask) return NULL; count = size >> uspi->s_fshift; if (count <= 0 || count > UFS_MAXFRAG) return NULL; USPI_UBH(uspi)->fragment = fragment; USPI_UBH(uspi)->count = count; for (i = 0; i < count; i++) if (!(USPI_UBH(uspi)->bh[i] = sb_bread(sb, fragment + i))) goto failed; for (; i < UFS_MAXFRAG; i++) USPI_UBH(uspi)->bh[i] = NULL; return USPI_UBH(uspi); failed: for (j = 0; j < i; j++) brelse (USPI_UBH(uspi)->bh[j]); return NULL; }
struct minix_inode * minix_V1_raw_inode(struct super_block *sb, ino_t ino, struct buffer_head **bh) { int block; struct minix_sb_info *sbi = minix_sb(sb); struct minix_inode *p; if (!ino || ino > sbi->s_ninodes) { printk("Bad inode number on dev %s: %ld is out of range\n", sb->s_id, (long)ino); return NULL; } ino--; block = 2 + sbi->s_imap_blocks + sbi->s_zmap_blocks + ino / MINIX_INODES_PER_BLOCK; *bh = sb_bread(sb, block); if (!*bh) { printk("Unable to read inode block\n"); return NULL; } p = (void *)(*bh)->b_data; return p + ino % MINIX_INODES_PER_BLOCK; }
/* * Initialize a data index block for specified inode at specified block number * */ int lab5fs_inode_init_block_index(struct inode *ino, int bi_block_num) { int err = 0; struct super_block *sb = ino->i_sb; struct buffer_head *bibh = NULL; struct lab5fs_inode_data_index *lab5fs_data_index = NULL; /* read the inode's block index. */ if (!(bibh = sb_bread(sb, bi_block_num))) { printk("unable to read inode block index, block %d.\n", bi_block_num); err = -ENOMEM; goto ret; } lab5fs_data_index = (struct lab5fs_inode_data_index *)(bibh->b_data); memset(lab5fs_data_index->blocks, 0, sizeof(*lab5fs_data_index)); mark_buffer_dirty(bibh); ret: if (bibh) brelse(bibh); return err; }
/*Clear out data and data index blocks of given inode*/ void lab5fs_inode_clear_blocks(struct inode *ino){ struct super_block *sb = ino->i_sb; struct lab5fs_inode_info *inode_info = LAB5FS_INODE_INFO(ino); int bi_block_num = inode_info->i_bi_block_num; struct buffer_head *bibh = NULL; struct lab5fs_inode_data_index *block_index_table = NULL; int i, block_num; printk("inode_clear_blocks:: freeing data blocks \n"); /* read the inode's block index. */ if (!(bibh = sb_bread(sb, bi_block_num))) { printk("unable to read block index, block %d.\n",bi_block_num); } block_index_table = (struct lab5fs_inode_data_index *) bibh->b_data; for (i=0;i < LAB5FS_MAX_BLOCK_INDEX; i++) { block_num = le32_to_cpu(block_index_table->blocks[i]); if (block_num != 0) { //block is in used printk("freeing block %u\n",block_num); lab5fs_release_block_num(sb, block_num); } } ino->i_blocks=0; }
static void free_branches(struct inode *inode, block_t *p, block_t *q, int depth) { struct buffer_head * bh; unsigned long nr; if (depth--) { for ( ; p < q ; p++) { nr = block_to_cpu(*p); if (!nr) continue; *p = 0; bh = sb_bread(inode->i_sb, nr); if (!bh) continue; free_branches(inode, (block_t*)bh->b_data, block_end(bh), depth); bforget(bh); xiafs_free_block(inode, nr); mark_inode_dirty(inode); } } else free_data(inode, p, q); }
void dedupfs_inode_add(struct super_block *vsb, struct dedupfs_inode *inode) { struct dedupfs_super_block *sb = DEDUPFS_SB(vsb); struct buffer_head *bh; struct dedupfs_inode *inode_iterator; if(mutex_lock_interruptible(&dedupfs_inodes_mgmt_lock)) { printk(KERN_ERR "Failed to acquire mutex lock %s +%d\n", __FILE__, __LINE__); return; } bh = (struct buffer_head *)sb_bread(vsb, DEDUPFS_INODESTORE_BLOCK_NUMBER); inode_iterator = (struct dedupfs_inode *)bh->b_data; if(mutex_lock_interruptible(&dedupfs_sb_lock)) { printk(KERN_ERR "Failed to acquire mutex lock %s +%d\n", __FILE__, __LINE__); return; } /* Append the new inode in the end in the inode store */ inode_iterator += sb->inodes_count; memcpy(inode_iterator, inode, sizeof(struct dedupfs_inode)); sb->inodes_count++; mark_buffer_dirty(bh); dedupfs_sb_sync(vsb); mutex_unlock(&dedupfs_sb_lock); mutex_unlock(&dedupfs_inodes_mgmt_lock); }
static struct buffer_head *qnx4_find_entry(int len, struct inode *dir, const char *name, struct qnx4_inode_entry **res_dir, int *ino) { unsigned long block, offset, blkofs; struct buffer_head *bh; *res_dir = NULL; bh = NULL; block = offset = blkofs = 0; while (blkofs * QNX4_BLOCK_SIZE + offset < dir->i_size) { if (!bh) { block = qnx4_block_map(dir, blkofs); if (block) bh = sb_bread(dir->i_sb, block); if (!bh) { blkofs++; continue; } } *res_dir = (struct qnx4_inode_entry *) (bh->b_data + offset); if (qnx4_match(len, name, bh, &offset)) { *ino = block * QNX4_INODES_PER_BLOCK + (offset / QNX4_DIR_ENTRY_SIZE) - 1; return bh; } if (offset < bh->b_size) { continue; } brelse(bh); bh = NULL; offset = 0; blkofs++; } brelse(bh); *res_dir = NULL; return NULL; }
static int fat__get_entry(struct inode *dir, loff_t *pos, struct buffer_head **bh, struct msdos_dir_entry **de) { struct super_block *sb = dir->i_sb; sector_t phys, iblock; unsigned long mapped_blocks; int err, offset, retry_count; retry_count = MAX_BREAD_TRYCOUNT; next: if (*bh) brelse(*bh); *bh = NULL; iblock = *pos >> sb->s_blocksize_bits; err = fat_bmap(dir, iblock, &phys, &mapped_blocks, 0); if (err || !phys) return -1; /* beyond EOF or error */ fat_dir_readahead(dir, iblock, phys); *bh = sb_bread(sb, phys); if (*bh == NULL) { fat_msg(sb, KERN_DEBUG, "Directory bread(block %llu) failed", (llu)phys); /* skip this block */ *pos = (iblock + 1) << sb->s_blocksize_bits; if(retry_count-- < 0) return -1; goto next; } offset = *pos & (sb->s_blocksize - 1); *pos += sizeof(struct msdos_dir_entry); *de = (struct msdos_dir_entry *)((*bh)->b_data + offset); return 0; }
/* Returns the inode number of the directory entry at offset pos. If bh is non-NULL, it is brelse'd before. Pos is incremented. The buffer header is returned in bh. AV. Most often we do it item-by-item. Makes sense to optimize. AV. OK, there we go: if both bh and de are non-NULL we assume that we just AV. want the next entry (took one explicit de=NULL in vfat/namei.c). AV. It's done in fat_get_entry() (inlined), here the slow case lives. AV. Additionally, when we return -1 (i.e. reached the end of directory) AV. we make bh NULL. */ static int fat__get_entry(struct inode *dir, loff_t *pos, struct buffer_head **bh, struct msdos_dir_entry **de) { struct super_block *sb = dir->i_sb; sector_t phys, iblock; unsigned long mapped_blocks; int err, offset; next: if (*bh) brelse(*bh); *bh = NULL; iblock = *pos >> sb->s_blocksize_bits; err = fat_bmap(dir, iblock, &phys, &mapped_blocks, 0); if (err || !phys) return -1; /* beyond EOF or error */ fat_dir_readahead(dir, iblock, phys); *bh = sb_bread(sb, phys); if (*bh == NULL) { #ifndef CONFIG_MACH_LGE fat_msg_ratelimit(sb, KERN_ERR, "Directory bread(block %llu) failed", (llu)phys); #endif /* skip this block */ *pos = (iblock + 1) << sb->s_blocksize_bits; goto next; } offset = *pos & (sb->s_blocksize - 1); *pos += sizeof(struct msdos_dir_entry); *de = (struct msdos_dir_entry *)((*bh)->b_data + offset); return 0; }
int uxfs_find_entry(struct inode *dip, char *name) { struct uxfs_inode_info *uxi = uxfs_i(dip); struct super_block *sb = dip->i_sb; struct buffer_head *bh = NULL; struct uxfs_dirent *dirent; int i, blk = 0; for (blk = 0; blk < uxi->uip.i_blocks; blk++) { bh = sb_bread(sb, uxi->uip.i_addr[blk]); dirent = (struct uxfs_dirent *)bh->b_data; for (i = 0; i < UXFS_DIRS_PER_BLOCK; i++) { if (strcmp(dirent->d_name, name) == 0) { brelse(bh); return dirent->d_ino; } dirent++; } } if (bh) brelse(bh); return 0; }
struct buffer_head * befs_bread_iaddr(struct super_block *sb, befs_inode_addr iaddr) { struct buffer_head *bh = NULL; befs_blocknr_t block = 0; struct befs_sb_info *befs_sb = BEFS_SB(sb); befs_debug(sb, "---> Enter %s " "[%u, %hu, %hu]", __func__, iaddr.allocation_group, iaddr.start, iaddr.len); if (iaddr.allocation_group > befs_sb->num_ags) { befs_error(sb, "BEFS: Invalid allocation group %u, max is %u", iaddr.allocation_group, befs_sb->num_ags); goto error; } block = iaddr2blockno(sb, &iaddr); befs_debug(sb, "%s: offset = %lu", __func__, (unsigned long)block); bh = sb_bread(sb, block); if (bh == NULL) { befs_error(sb, "Failed to read block %lu", (unsigned long)block); goto error; } befs_debug(sb, "<--- %s", __func__); return bh; error: befs_debug(sb, "<--- %s ERROR", __func__); return NULL; }
/* Save the modified inode */ int simplefs_inode_save(struct super_block *sb, struct simplefs_inode *sfs_inode) { struct simplefs_inode *inode_iterator; struct buffer_head *bh; bh = sb_bread(sb, SIMPLEFS_INODESTORE_BLOCK_NUMBER); BUG_ON(!bh); if (mutex_lock_interruptible(&simplefs_sb_lock)) { sfs_trace("Failed to acquire mutex lock\n"); return -EINTR; } inode_iterator = simplefs_inode_search(sb, (struct simplefs_inode *)bh->b_data, sfs_inode); if (likely(inode_iterator)) { memcpy(inode_iterator, sfs_inode, sizeof(*inode_iterator)); printk(KERN_INFO "The inode updated\n"); mark_buffer_dirty(bh); sync_dirty_buffer(bh); } else { mutex_unlock(&simplefs_sb_lock); printk(KERN_ERR "The new filesize could not be stored to the inode."); return -EIO; } brelse(bh); mutex_unlock(&simplefs_sb_lock); return 0; }
static int affs_fill_super(struct super_block *sb, void *data, int silent) { struct affs_sb_info *sbi; struct buffer_head *root_bh = NULL; struct buffer_head *boot_bh; struct inode *root_inode = NULL; s32 root_block; int size, blocksize; u32 chksum; int num_bm; int i, j; s32 key; uid_t uid; gid_t gid; int reserved; unsigned long mount_flags; int tmp_flags; /* fix remount prototype... */ u8 sig[4]; int ret = -EINVAL; save_mount_options(sb, data); pr_debug("AFFS: read_super(%s)\n",data ? (const char *)data : "no options"); sb->s_magic = AFFS_SUPER_MAGIC; sb->s_op = &affs_sops; sb->s_flags |= MS_NODIRATIME; sbi = kzalloc(sizeof(struct affs_sb_info), GFP_KERNEL); if (!sbi) return -ENOMEM; sb->s_fs_info = sbi; mutex_init(&sbi->s_bmlock); spin_lock_init(&sbi->symlink_lock); if (!parse_options(data,&uid,&gid,&i,&reserved,&root_block, &blocksize,&sbi->s_prefix, sbi->s_volume, &mount_flags)) { printk(KERN_ERR "AFFS: Error parsing options\n"); kfree(sbi->s_prefix); kfree(sbi); return -EINVAL; } /* N.B. after this point s_prefix must be released */ sbi->s_flags = mount_flags; sbi->s_mode = i; sbi->s_uid = uid; sbi->s_gid = gid; sbi->s_reserved= reserved; /* Get the size of the device in 512-byte blocks. * If we later see that the partition uses bigger * blocks, we will have to change it. */ size = sb->s_bdev->bd_inode->i_size >> 9; pr_debug("AFFS: initial blocksize=%d, #blocks=%d\n", 512, size); affs_set_blocksize(sb, PAGE_SIZE); /* Try to find root block. Its location depends on the block size. */ i = 512; j = 4096; if (blocksize > 0) { i = j = blocksize; size = size / (blocksize / 512); } for (blocksize = i, key = 0; blocksize <= j; blocksize <<= 1, size >>= 1) { sbi->s_root_block = root_block; if (root_block < 0) sbi->s_root_block = (reserved + size - 1) / 2; pr_debug("AFFS: setting blocksize to %d\n", blocksize); affs_set_blocksize(sb, blocksize); sbi->s_partition_size = size; /* The root block location that was calculated above is not * correct if the partition size is an odd number of 512- * byte blocks, which will be rounded down to a number of * 1024-byte blocks, and if there were an even number of * reserved blocks. Ideally, all partition checkers should * report the real number of blocks of the real blocksize, * but since this just cannot be done, we have to try to * find the root block anyways. In the above case, it is one * block behind the calculated one. So we check this one, too. */ for (num_bm = 0; num_bm < 2; num_bm++) { pr_debug("AFFS: Dev %s, trying root=%u, bs=%d, " "size=%d, reserved=%d\n", sb->s_id, sbi->s_root_block + num_bm, blocksize, size, reserved); root_bh = affs_bread(sb, sbi->s_root_block + num_bm); if (!root_bh) continue; if (!affs_checksum_block(sb, root_bh) && be32_to_cpu(AFFS_ROOT_HEAD(root_bh)->ptype) == T_SHORT && be32_to_cpu(AFFS_ROOT_TAIL(sb, root_bh)->stype) == ST_ROOT) { sbi->s_hashsize = blocksize / 4 - 56; sbi->s_root_block += num_bm; key = 1; goto got_root; } affs_brelse(root_bh); root_bh = NULL; } } if (!silent) printk(KERN_ERR "AFFS: No valid root block on device %s\n", sb->s_id); goto out_error; /* N.B. after this point bh must be released */ got_root: root_block = sbi->s_root_block; /* Find out which kind of FS we have */ boot_bh = sb_bread(sb, 0); if (!boot_bh) { printk(KERN_ERR "AFFS: Cannot read boot block\n"); goto out_error; } memcpy(sig, boot_bh->b_data, 4); brelse(boot_bh); chksum = be32_to_cpu(*(__be32 *)sig); /* Dircache filesystems are compatible with non-dircache ones * when reading. As long as they aren't supported, writing is * not recommended. */ if ((chksum == FS_DCFFS || chksum == MUFS_DCFFS || chksum == FS_DCOFS || chksum == MUFS_DCOFS) && !(sb->s_flags & MS_RDONLY)) { printk(KERN_NOTICE "AFFS: Dircache FS - mounting %s read only\n", sb->s_id); sb->s_flags |= MS_RDONLY; } switch (chksum) { case MUFS_FS: case MUFS_INTLFFS: case MUFS_DCFFS: sbi->s_flags |= SF_MUFS; /* fall thru */ case FS_INTLFFS: case FS_DCFFS: sbi->s_flags |= SF_INTL; break; case MUFS_FFS: sbi->s_flags |= SF_MUFS; break; case FS_FFS: break; case MUFS_OFS: sbi->s_flags |= SF_MUFS; /* fall thru */ case FS_OFS: sbi->s_flags |= SF_OFS; sb->s_flags |= MS_NOEXEC; break; case MUFS_DCOFS: case MUFS_INTLOFS: sbi->s_flags |= SF_MUFS; case FS_DCOFS: case FS_INTLOFS: sbi->s_flags |= SF_INTL | SF_OFS; sb->s_flags |= MS_NOEXEC; break; default: printk(KERN_ERR "AFFS: Unknown filesystem on device %s: %08X\n", sb->s_id, chksum); goto out_error; } if (mount_flags & SF_VERBOSE) { u8 len = AFFS_ROOT_TAIL(sb, root_bh)->disk_name[0]; printk(KERN_NOTICE "AFFS: Mounting volume \"%.*s\": Type=%.3s\\%c, Blocksize=%d\n", len > 31 ? 31 : len, AFFS_ROOT_TAIL(sb, root_bh)->disk_name + 1, sig, sig[3] + '0', blocksize); } sb->s_flags |= MS_NODEV | MS_NOSUID; sbi->s_data_blksize = sb->s_blocksize; if (sbi->s_flags & SF_OFS) sbi->s_data_blksize -= 24; /* Keep super block in cache */ sbi->s_root_bh = root_bh; /* N.B. after this point s_root_bh must be released */ tmp_flags = sb->s_flags; if (affs_init_bitmap(sb, &tmp_flags)) goto out_error; sb->s_flags = tmp_flags; /* set up enough so that it can read an inode */ root_inode = affs_iget(sb, root_block); if (IS_ERR(root_inode)) { ret = PTR_ERR(root_inode); goto out_error_noinode; } sb->s_root = d_alloc_root(root_inode); if (!sb->s_root) { printk(KERN_ERR "AFFS: Get root inode failed\n"); goto out_error; } sb->s_root->d_op = &affs_dentry_operations; pr_debug("AFFS: s_flags=%lX\n",sb->s_flags); return 0; /* * Begin the cascaded cleanup ... */ out_error: if (root_inode) iput(root_inode); out_error_noinode: kfree(sbi->s_bitmap); affs_brelse(root_bh); kfree(sbi->s_prefix); kfree(sbi); sb->s_fs_info = NULL; return ret; }
/* * jfs_extendfs() * * function: extend file system; * * |-------------------------------|----------|----------| * file system space fsck inline log * workspace space * * input: * new LVSize: in LV blocks (required) * new LogSize: in LV blocks (optional) * new FSSize: in LV blocks (optional) * * new configuration: * 1. set new LogSize as specified or default from new LVSize; * 2. compute new FSCKSize from new LVSize; * 3. set new FSSize as MIN(FSSize, LVSize-(LogSize+FSCKSize)) where * assert(new FSSize >= old FSSize), * i.e., file system must not be shrinked; */ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize) { int rc = 0; struct jfs_sb_info *sbi = JFS_SBI(sb); struct inode *ipbmap = sbi->ipbmap; struct inode *ipbmap2; struct inode *ipimap = sbi->ipimap; struct jfs_log *log = sbi->log; struct bmap *bmp = sbi->bmap; s64 newLogAddress, newFSCKAddress; int newFSCKSize; s64 newMapSize = 0, mapSize; s64 XAddress, XSize, nblocks, xoff, xaddr, t64; s64 oldLVSize; s64 newFSSize; s64 VolumeSize; int newNpages = 0, nPages, newPage, xlen, t32; int tid; int log_formatted = 0; struct inode *iplist[1]; struct jfs_superblock *j_sb, *j_sb2; uint old_agsize; struct buffer_head *bh, *bh2; /* If the volume hasn't grown, get out now */ if (sbi->mntflag & JFS_INLINELOG) oldLVSize = addressPXD(&sbi->logpxd) + lengthPXD(&sbi->logpxd); else oldLVSize = addressPXD(&sbi->fsckpxd) + lengthPXD(&sbi->fsckpxd); if (oldLVSize >= newLVSize) { printk(KERN_WARNING "jfs_extendfs: volume hasn't grown, returning\n"); goto out; } VolumeSize = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits; if (VolumeSize) { if (newLVSize > VolumeSize) { printk(KERN_WARNING "jfs_extendfs: invalid size\n"); rc = -EINVAL; goto out; } } else { /* check the device */ bh = sb_bread(sb, newLVSize - 1); if (!bh) { printk(KERN_WARNING "jfs_extendfs: invalid size\n"); rc = -EINVAL; goto out; } bforget(bh); } /* Can't extend write-protected drive */ if (isReadOnly(ipbmap)) { printk(KERN_WARNING "jfs_extendfs: read-only file system\n"); rc = -EROFS; goto out; } /* * reconfigure LV spaces * --------------------- * * validate new size, or, if not specified, determine new size */ /* * reconfigure inline log space: */ if ((sbi->mntflag & JFS_INLINELOG)) { if (newLogSize == 0) { /* * no size specified: default to 1/256 of aggregate * size; rounded up to a megabyte boundary; */ newLogSize = newLVSize >> 8; t32 = (1 << (20 - sbi->l2bsize)) - 1; newLogSize = (newLogSize + t32) & ~t32; newLogSize = min(newLogSize, MEGABYTE32 >> sbi->l2bsize); } else {
int uxfs_mkdir(struct inode *dip, struct dentry *dentry, umode_t mode) { struct uxfs_inode *nip; struct buffer_head *bh; struct super_block *sb = dip->i_sb; struct uxfs_dirent *dirent; struct inode *inode; ino_t inum = 0; int blk; /* * Make sure there isn't already an entry. If not, * allocate one, a new inode and new incore inode. */ inum = uxfs_find_entry(dip, (char *)dentry->d_name.name); if (inum) return -EEXIST; inode = new_inode(sb); if (!inode) return -ENOSPC; inum = uxfs_ialloc(sb); if (!inum) { iput(inode); return -ENOSPC; } uxfs_diradd(dip, (char *)dentry->d_name.name, inum); inode->i_uid = current_fsuid(); inode->i_gid = (dip->i_mode & S_ISGID) ? dip->i_gid : current_fsgid(); inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; inode->i_blocks = 1; // inode->i_blksize = UXFS_BSIZE; inode->i_op = &uxfs_dir_inops; inode->i_fop = &uxfs_dir_operations; inode->i_mapping->a_ops = &uxfs_aops; inode->i_mode = mode | S_IFDIR; inode->i_ino = inum; inode->i_size = UXFS_BSIZE; inode->i_private = uxfs_i(inode); //initialize private, again! set_nlink(inode, 2); nip = (struct uxfs_inode *)inode->i_private; nip->i_mode = mode | S_IFDIR; nip->i_nlink = 2; nip->i_atime = nip->i_ctime = nip->i_mtime = CURRENT_TIME.tv_sec; nip->i_uid = current_fsuid(); nip->i_gid = (dip->i_mode & S_ISGID) ? dip->i_gid : current_fsgid(); nip->i_size = 512; nip->i_blocks = 1; memset(nip->i_addr, 0, UXFS_DIRECT_BLOCKS * sizeof(nip->i_addr[0])); blk = uxfs_block_alloc(sb); nip->i_addr[0] = blk; bh = sb_bread(sb, blk); memset(bh->b_data, 0, UXFS_BSIZE); dirent = (struct uxfs_dirent *)bh->b_data; dirent->d_ino = inum; strcpy(dirent->d_name, "."); dirent++; dirent->d_ino = inode->i_ino; strcpy(dirent->d_name, ".."); mark_buffer_dirty(bh); brelse(bh); insert_inode_hash(inode); d_instantiate(dentry, inode); mark_inode_dirty(inode); /* * Increment the link count of the parent directory. */ inode_inc_link_count(dip); mark_inode_dirty(dip); return 0; }
struct inode *bfs_iget(struct super_block *sb, unsigned long ino) { struct bfs_inode *di; struct inode *inode; struct buffer_head *bh; int block, off; inode = iget_locked(sb, ino); if (IS_ERR(inode)) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; if ((ino < BFS_ROOT_INO) || (ino > BFS_SB(inode->i_sb)->si_lasti)) { printf("Bad inode number %s:%08lx\n", inode->i_sb->s_id, ino); goto error; } block = (ino - BFS_ROOT_INO) / BFS_INODES_PER_BLOCK + 1; bh = sb_bread(inode->i_sb, block); if (!bh) { printf("Unable to read inode %s:%08lx\n", inode->i_sb->s_id, ino); goto error; } off = (ino - BFS_ROOT_INO) % BFS_INODES_PER_BLOCK; di = (struct bfs_inode *)bh->b_data + off; inode->i_mode = 0x0000FFFF & le32_to_cpu(di->i_mode); if (le32_to_cpu(di->i_vtype) == BFS_VDIR) { inode->i_mode |= S_IFDIR; inode->i_op = &bfs_dir_inops; inode->i_fop = &bfs_dir_operations; } else if (le32_to_cpu(di->i_vtype) == BFS_VREG) { inode->i_mode |= S_IFREG; inode->i_op = &bfs_file_inops; inode->i_fop = &bfs_file_operations; inode->i_mapping->a_ops = &bfs_aops; } BFS_I(inode)->i_sblock = le32_to_cpu(di->i_sblock); BFS_I(inode)->i_eblock = le32_to_cpu(di->i_eblock); BFS_I(inode)->i_dsk_ino = le16_to_cpu(di->i_ino); inode->i_uid = le32_to_cpu(di->i_uid); inode->i_gid = le32_to_cpu(di->i_gid); inode->i_nlink = le32_to_cpu(di->i_nlink); inode->i_size = BFS_FILESIZE(di); inode->i_blocks = BFS_FILEBLOCKS(di); inode->i_atime.tv_sec = le32_to_cpu(di->i_atime); inode->i_mtime.tv_sec = le32_to_cpu(di->i_mtime); inode->i_ctime.tv_sec = le32_to_cpu(di->i_ctime); inode->i_atime.tv_nsec = 0; inode->i_mtime.tv_nsec = 0; inode->i_ctime.tv_nsec = 0; brelse(bh); unlock_new_inode(inode); return inode; error: iget_failed(inode); return ERR_PTR(-EIO); }
static int bfs_fill_super(struct super_block *s, void *data, int silent) { struct buffer_head *bh, *sbh; struct bfs_super_block *bfs_sb; struct inode *inode; unsigned i, imap_len; struct bfs_sb_info *info; int ret = -EINVAL; unsigned long i_sblock, i_eblock, i_eoff, s_size; info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; mutex_init(&info->bfs_lock); s->s_fs_info = info; sb_set_blocksize(s, BFS_BSIZE); sbh = sb_bread(s, 0); if (!sbh) goto out; bfs_sb = (struct bfs_super_block *)sbh->b_data; if (le32_to_cpu(bfs_sb->s_magic) != BFS_MAGIC) { if (!silent) printf("No BFS filesystem on %s (magic=%08x)\n", s->s_id, le32_to_cpu(bfs_sb->s_magic)); goto out1; } if (BFS_UNCLEAN(bfs_sb, s) && !silent) printf("%s is unclean, continuing\n", s->s_id); s->s_magic = BFS_MAGIC; if (le32_to_cpu(bfs_sb->s_start) > le32_to_cpu(bfs_sb->s_end)) { printf("Superblock is corrupted\n"); goto out1; } info->si_lasti = (le32_to_cpu(bfs_sb->s_start) - BFS_BSIZE) / sizeof(struct bfs_inode) + BFS_ROOT_INO - 1; imap_len = (info->si_lasti / 8) + 1; info->si_imap = kzalloc(imap_len, GFP_KERNEL); if (!info->si_imap) goto out1; for (i = 0; i < BFS_ROOT_INO; i++) set_bit(i, info->si_imap); s->s_op = &bfs_sops; inode = bfs_iget(s, BFS_ROOT_INO); if (IS_ERR(inode)) { ret = PTR_ERR(inode); goto out2; } s->s_root = d_alloc_root(inode); if (!s->s_root) { iput(inode); ret = -ENOMEM; goto out2; } info->si_blocks = (le32_to_cpu(bfs_sb->s_end) + 1) >> BFS_BSIZE_BITS; info->si_freeb = (le32_to_cpu(bfs_sb->s_end) + 1 - le32_to_cpu(bfs_sb->s_start)) >> BFS_BSIZE_BITS; info->si_freei = 0; info->si_lf_eblk = 0; /* can we read the last block? */ bh = sb_bread(s, info->si_blocks - 1); if (!bh) { printf("Last block not available: %lu\n", info->si_blocks - 1); ret = -EIO; goto out3; } brelse(bh); bh = NULL; for (i = BFS_ROOT_INO; i <= info->si_lasti; i++) { struct bfs_inode *di; int block = (i - BFS_ROOT_INO) / BFS_INODES_PER_BLOCK + 1; int off = (i - BFS_ROOT_INO) % BFS_INODES_PER_BLOCK; unsigned long eblock; if (!off) { brelse(bh); bh = sb_bread(s, block); } if (!bh) continue; di = (struct bfs_inode *)bh->b_data + off; /* test if filesystem is not corrupted */ i_eoff = le32_to_cpu(di->i_eoffset); i_sblock = le32_to_cpu(di->i_sblock); i_eblock = le32_to_cpu(di->i_eblock); s_size = le32_to_cpu(bfs_sb->s_end); if (i_sblock > info->si_blocks || i_eblock > info->si_blocks || i_sblock > i_eblock || i_eoff > s_size || i_sblock * BFS_BSIZE > i_eoff) { printf("Inode 0x%08x corrupted\n", i); brelse(bh); ret = -EIO; goto out3; } if (!di->i_ino) { info->si_freei++; continue; } set_bit(i, info->si_imap); info->si_freeb -= BFS_FILEBLOCKS(di); eblock = le32_to_cpu(di->i_eblock); if (eblock > info->si_lf_eblk) info->si_lf_eblk = eblock; } brelse(bh); brelse(sbh); dump_imap("read_super", s); return 0; out3: dput(s->s_root); s->s_root = NULL; out2: kfree(info->si_imap); out1: brelse(sbh); out: mutex_destroy(&info->bfs_lock); kfree(info); s->s_fs_info = NULL; return ret; }
static int minix_fill_super(struct super_block *s, void *data, int silent) { struct buffer_head *bh; struct buffer_head **map; struct minix_super_block *ms; struct minix3_super_block *m3s = NULL; unsigned long i, block; struct inode *root_inode; struct minix_sb_info *sbi; int ret = -EINVAL; sbi = kzalloc(sizeof(struct minix_sb_info), GFP_KERNEL); if (!sbi) return -ENOMEM; s->s_fs_info = sbi; BUILD_BUG_ON(32 != sizeof (struct minix_inode)); BUILD_BUG_ON(64 != sizeof(struct minix2_inode)); if (!sb_set_blocksize(s, BLOCK_SIZE)) goto out_bad_hblock; if (!(bh = sb_bread(s, 1))) goto out_bad_sb; ms = (struct minix_super_block *) bh->b_data; sbi->s_ms = ms; sbi->s_sbh = bh; sbi->s_mount_state = ms->s_state; sbi->s_ninodes = ms->s_ninodes; sbi->s_nzones = ms->s_nzones; sbi->s_imap_blocks = ms->s_imap_blocks; sbi->s_zmap_blocks = ms->s_zmap_blocks; sbi->s_firstdatazone = ms->s_firstdatazone; sbi->s_log_zone_size = ms->s_log_zone_size; sbi->s_max_size = ms->s_max_size; s->s_magic = ms->s_magic; if (s->s_magic == MINIX_SUPER_MAGIC) { sbi->s_version = MINIX_V1; sbi->s_dirsize = 16; sbi->s_namelen = 14; s->s_max_links = MINIX_LINK_MAX; } else if (s->s_magic == MINIX_SUPER_MAGIC2) { sbi->s_version = MINIX_V1; sbi->s_dirsize = 32; sbi->s_namelen = 30; s->s_max_links = MINIX_LINK_MAX; } else if (s->s_magic == MINIX2_SUPER_MAGIC) { sbi->s_version = MINIX_V2; sbi->s_nzones = ms->s_zones; sbi->s_dirsize = 16; sbi->s_namelen = 14; s->s_max_links = MINIX2_LINK_MAX; } else if (s->s_magic == MINIX2_SUPER_MAGIC2) { sbi->s_version = MINIX_V2; sbi->s_nzones = ms->s_zones; sbi->s_dirsize = 32; sbi->s_namelen = 30; s->s_max_links = MINIX2_LINK_MAX; } else if ( *(__u16 *)(bh->b_data + 24) == MINIX3_SUPER_MAGIC) { m3s = (struct minix3_super_block *) bh->b_data; s->s_magic = m3s->s_magic; sbi->s_imap_blocks = m3s->s_imap_blocks; sbi->s_zmap_blocks = m3s->s_zmap_blocks; sbi->s_firstdatazone = m3s->s_firstdatazone; sbi->s_log_zone_size = m3s->s_log_zone_size; sbi->s_max_size = m3s->s_max_size; sbi->s_ninodes = m3s->s_ninodes; sbi->s_nzones = m3s->s_zones; sbi->s_dirsize = 64; sbi->s_namelen = 60; sbi->s_version = MINIX_V3; sbi->s_mount_state = MINIX_VALID_FS; sb_set_blocksize(s, m3s->s_blocksize); s->s_max_links = MINIX2_LINK_MAX; } else goto out_no_fs; /* * Allocate the buffer map to keep the superblock small. */ if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0) goto out_illegal_sb; i = (sbi->s_imap_blocks + sbi->s_zmap_blocks) * sizeof(bh); map = kzalloc(i, GFP_KERNEL); if (!map) goto out_no_map; sbi->s_imap = &map[0]; sbi->s_zmap = &map[sbi->s_imap_blocks]; block=2; for (i=0 ; i < sbi->s_imap_blocks ; i++) { if (!(sbi->s_imap[i]=sb_bread(s, block))) goto out_no_bitmap; block++; } for (i=0 ; i < sbi->s_zmap_blocks ; i++) { if (!(sbi->s_zmap[i]=sb_bread(s, block))) goto out_no_bitmap; block++; } minix_set_bit(0,sbi->s_imap[0]->b_data); minix_set_bit(0,sbi->s_zmap[0]->b_data); /* Apparently minix can create filesystems that allocate more blocks for * the bitmaps than needed. We simply ignore that, but verify it didn't * create one with not enough blocks and bail out if so. */ block = minix_blocks_needed(sbi->s_ninodes, s->s_blocksize); if (sbi->s_imap_blocks < block) { printk("MINIX-fs: file system does not have enough " "imap blocks allocated. Refusing to mount\n"); goto out_no_bitmap; } block = minix_blocks_needed( (sbi->s_nzones - (sbi->s_firstdatazone + 1)), s->s_blocksize); if (sbi->s_zmap_blocks < block) { printk("MINIX-fs: file system does not have enough " "zmap blocks allocated. Refusing to mount.\n"); goto out_no_bitmap; } /* set up enough so that it can read an inode */ s->s_op = &minix_sops; root_inode = minix_iget(s, MINIX_ROOT_INO); if (IS_ERR(root_inode)) { ret = PTR_ERR(root_inode); goto out_no_root; } ret = -ENOMEM; s->s_root = d_make_root(root_inode); if (!s->s_root) goto out_no_root; if (!(s->s_flags & MS_RDONLY)) { if (sbi->s_version != MINIX_V3) /* s_state is now out from V3 sb */ ms->s_state &= ~MINIX_VALID_FS; mark_buffer_dirty(bh); } if (!(sbi->s_mount_state & MINIX_VALID_FS)) printk("MINIX-fs: mounting unchecked file system, " "running fsck is recommended\n"); else if (sbi->s_mount_state & MINIX_ERROR_FS) printk("MINIX-fs: mounting file system with errors, " "running fsck is recommended\n"); return 0; out_no_root: if (!silent) printk("MINIX-fs: get root inode failed\n"); goto out_freemap; out_no_bitmap: printk("MINIX-fs: bad superblock or unable to read bitmaps\n"); out_freemap: for (i = 0; i < sbi->s_imap_blocks; i++) brelse(sbi->s_imap[i]); for (i = 0; i < sbi->s_zmap_blocks; i++) brelse(sbi->s_zmap[i]); kfree(sbi->s_imap); goto out_release; out_no_map: ret = -ENOMEM; if (!silent) printk("MINIX-fs: can't allocate map\n"); goto out_release; out_illegal_sb: if (!silent) printk("MINIX-fs: bad superblock\n"); goto out_release; out_no_fs: if (!silent) printk("VFS: Can't find a Minix filesystem V1 | V2 | V3 " "on device %s.\n", s->s_id); out_release: brelse(bh); goto out; out_bad_hblock: printk("MINIX-fs: blocksize too small for device\n"); goto out; out_bad_sb: printk("MINIX-fs: unable to read superblock\n"); out: s->s_fs_info = NULL; kfree(sbi); return ret; }
/* * add a new entry to a directory * * @dir_vi: the VFS inode of the directory * @inode_no: inode number of the new entry * @filename: name of the new entry * @length: size of name * * return: 0 on success, error code otherwise */ int wtfs_add_entry(struct inode * dir_vi, uint64_t inode_no, const char * filename, size_t length) { struct super_block * vsb = dir_vi->i_sb; struct wtfs_sb_info * sbi = WTFS_SB_INFO(vsb); struct wtfs_inode_info * dir_info = WTFS_INODE_INFO(dir_vi); struct wtfs_dir_block * blk = NULL; struct buffer_head * bh = NULL, * bh2 = NULL; uint64_t next = dir_info->first_block, blk_no = 0; int i; int ret = -EIO; /* check name */ if (length == 0) { wtfs_error("no dentry name specified\n"); ret = -ENOENT; goto error; } if (length >= WTFS_FILENAME_MAX) { wtfs_error("dentry name too long %s\n", filename); ret = -ENAMETOOLONG; goto error; } /* find an empty entry in existing entries */ while (1) { if ((bh = sb_bread(vsb, next)) == NULL) { wtfs_error("unable to read the block %llu\n", next); goto error; } blk = (struct wtfs_dir_block *)bh->b_data; for (i = 0; i < WTFS_INODE_COUNT_PER_TABLE; ++i) { /* find it */ if (blk->entries[i].inode_no == 0) { blk->entries[i].inode_no = inode_no; strncpy(blk->entries[i].filename, filename, length); mark_buffer_dirty(bh); brelse(bh); dir_vi->i_ctime = CURRENT_TIME_SEC; dir_vi->i_mtime = CURRENT_TIME_SEC; ++dir_info->dir_entry_count; mark_inode_dirty(dir_vi); return 0; } } next = wtfs64_to_cpu(blk->next); /* * do not release the last block because we are to set its * pointer */ if (next == 0) { break; } brelse(bh); } /* entries used up, so we have to create a new data block */ if ((blk_no = wtfs_alloc_block(vsb)) == 0) { ret = -ENOSPC; goto error; } bh2 = wtfs_init_linked_block(vsb, blk_no, bh); if (IS_ERR(bh2)) { ret = PTR_ERR(bh2); bh2 = NULL; goto error; } brelse(bh); /* now we can release the previous block */ blk = (struct wtfs_dir_block *)bh2->b_data; blk->entries[0].inode_no = inode_no; strncpy(blk->entries[0].filename, filename, length); mark_buffer_dirty(bh2); brelse(bh2); /* update parent directory's information */ dir_vi->i_ctime = dir_vi->i_mtime = CURRENT_TIME_SEC; ++dir_vi->i_blocks; i_size_write(dir_vi, i_size_read(dir_vi) + sbi->block_size); ++dir_info->dir_entry_count; mark_inode_dirty(dir_vi); return 0; error: if (bh != NULL) { brelse(bh); } if (bh2 != NULL) { brelse(bh2); } if (blk_no != 0) { wtfs_free_block(vsb, blk_no); } return ret; }
static int v7_fill_super(struct super_block *sb, void *data, int silent) { struct sysv_sb_info *sbi; struct buffer_head *bh, *bh2 = NULL; struct v7_super_block *v7sb; struct sysv_inode *v7i; if (440 != sizeof (struct v7_super_block)) panic("V7 FS: bad super-block size"); if (64 != sizeof (struct sysv_inode)) panic("sysv fs: bad i-node size"); sbi = kmalloc(sizeof(struct sysv_sb_info), GFP_KERNEL); if (!sbi) return -ENOMEM; memset(sbi, 0, sizeof(struct sysv_sb_info)); sbi->s_sb = sb; sbi->s_block_base = 0; sbi->s_type = FSTYPE_V7; sbi->s_bytesex = BYTESEX_PDP; sb->s_fs_info = sbi; sb_set_blocksize(sb, 512); if ((bh = sb_bread(sb, 1)) == NULL) { if (!silent) printk("VFS: unable to read V7 FS superblock on " "device %s.\n", sb->s_id); goto failed; } /* plausibility check on superblock */ v7sb = (struct v7_super_block *) bh->b_data; if (fs16_to_cpu(sbi, v7sb->s_nfree) > V7_NICFREE || fs16_to_cpu(sbi, v7sb->s_ninode) > V7_NICINOD || fs32_to_cpu(sbi, v7sb->s_time) == 0) goto failed; /* plausibility check on root inode: it is a directory, with a nonzero size that is a multiple of 16 */ if ((bh2 = sb_bread(sb, 2)) == NULL) goto failed; v7i = (struct sysv_inode *)(bh2->b_data + 64); if ((fs16_to_cpu(sbi, v7i->i_mode) & ~0777) != S_IFDIR || (fs32_to_cpu(sbi, v7i->i_size) == 0) || (fs32_to_cpu(sbi, v7i->i_size) & 017) != 0) goto failed; brelse(bh2); bh2 = NULL; sbi->s_bh1 = bh; sbi->s_bh2 = bh; if (complete_read_super(sb, silent, 1)) return 0; failed: brelse(bh2); brelse(bh); kfree(sbi); return -EINVAL; }