static long fat_fallocate(struct file *file, int mode, loff_t offset, loff_t len) { int err = 0; struct inode *inode = file->f_mapping->host; int cluster, nr_cluster, fclus, dclus, free_bytes, nr_bytes; struct super_block *sb = inode->i_sb; struct msdos_sb_info *sbi = MSDOS_SB(sb); if (mode & ~FALLOC_FL_KEEP_SIZE) return -EOPNOTSUPP; if ((offset + len) <= MSDOS_I(inode)->mmu_private) { fat_msg(sb, KERN_ERR, "fat_fallocate():Blocks already allocated"); return -EINVAL; } if ((mode & FALLOC_FL_KEEP_SIZE)) { if (inode->i_size > 0) { err = fat_get_cluster(inode, FAT_ENT_EOF, &fclus, &dclus); if (err < 0) { fat_msg(sb, KERN_ERR, "fat_fallocate():fat_get_cluster() error"); return err; } free_bytes = ((fclus+1) << sbi->cluster_bits) - (inode->i_size); nr_bytes = (offset + len - inode->i_size) - free_bytes; } else nr_bytes = (offset + len - inode->i_size); nr_cluster = (nr_bytes + (sbi->cluster_size - 1)) >> sbi->cluster_bits; mutex_lock(&inode->i_mutex); while (nr_cluster-- > 0) { err = fat_alloc_clusters(inode, &cluster, 1); if (err) { fat_msg(sb, KERN_ERR, "fat_fallocate():fat_alloc_clusters() error"); goto error; } err = fat_chain_add(inode, cluster, 1); if (err) { fat_free_clusters(inode, cluster); goto error; } } err = fat_get_cluster(inode, FAT_ENT_EOF, &fclus, &dclus); if (err < 0) { fat_msg(sb, KERN_ERR, "fat_fallocate():fat_get_cluster() error"); goto error; } MSDOS_I(inode)->mmu_private = (fclus + 1) << sbi->cluster_bits; } else {
/* Returns the inode number of the directory entry at offset pos. If bh is non-NULL, it is brelse'd before. Pos is incremented. The buffer header is returned in bh. AV. Most often we do it item-by-item. Makes sense to optimize. AV. OK, there we go: if both bh and de are non-NULL we assume that we just AV. want the next entry (took one explicit de=NULL in vfat/namei.c). AV. It's done in fat_get_entry() (inlined), here the slow case lives. AV. Additionally, when we return -1 (i.e. reached the end of directory) AV. we make bh NULL. */ static int fat__get_entry(struct inode *dir, loff_t *pos, struct buffer_head **bh, struct msdos_dir_entry **de) { struct super_block *sb = dir->i_sb; sector_t phys, iblock; unsigned long mapped_blocks; int err, offset; next: if (*bh) brelse(*bh); *bh = NULL; iblock = *pos >> sb->s_blocksize_bits; err = fat_bmap(dir, iblock, &phys, &mapped_blocks, 0); if (err || !phys) return -1; /* beyond EOF or error */ fat_dir_readahead(dir, iblock, phys); *bh = sb_bread(sb, phys); if (*bh == NULL) { fat_msg(sb, KERN_DEBUG, "Directory bread(block %llu) failed", (llu)phys); /* skip this block */ *pos = (iblock + 1) << sb->s_blocksize_bits; goto next; } offset = *pos & (sb->s_blocksize - 1); *pos += sizeof(struct msdos_dir_entry); *de = (struct msdos_dir_entry *)((*bh)->b_data + offset); return 0; }
/* Returns the inode number of the directory entry at offset pos. If bh is non-NULL, it is brelse'd before. Pos is incremented. The buffer header is returned in bh. AV. Most often we do it item-by-item. Makes sense to optimize. AV. OK, there we go: if both bh and de are non-NULL we assume that we just AV. want the next entry (took one explicit de=NULL in vfat/namei.c). AV. It's done in fat_get_entry() (inlined), here the slow case lives. AV. Additionally, when we return -1 (i.e. reached the end of directory) AV. we make bh NULL. */ static int fat__get_entry(struct inode *dir, loff_t *pos, struct buffer_head **bh, struct msdos_dir_entry **de) { struct super_block *sb = dir->i_sb; sector_t phys, iblock; unsigned long mapped_blocks; int err, offset; next: if (*bh) brelse(*bh); *bh = NULL; iblock = *pos >> sb->s_blocksize_bits; err = fat_bmap(dir, iblock, &phys, &mapped_blocks, 0); if (err || !phys) return -1; /* beyond EOF or error */ fat_dir_readahead(dir, iblock, phys); *bh = sb_bread(sb, phys); if (*bh == NULL) { /* LGE_CHANGE_S : avoid error message * 2012-02-02, [email protected] */ #ifndef CONFIG_MACH_LGE fat_msg(sb, KERN_ERR, "Directory bread(block %llu) failed", (llu)phys); #endif /* LGE_CHANGE_E :avoid error message */ /* skip this block */ *pos = (iblock + 1) << sb->s_blocksize_bits; goto next; } offset = *pos & (sb->s_blocksize - 1); *pos += sizeof(struct msdos_dir_entry); *de = (struct msdos_dir_entry *)((*bh)->b_data + offset); return 0; }
/* Returns the inode number of the directory entry at offset pos. If bh is non-NULL, it is brelse'd before. Pos is incremented. The buffer header is returned in bh. AV. Most often we do it item-by-item. Makes sense to optimize. AV. OK, there we go: if both bh and de are non-NULL we assume that we just AV. want the next entry (took one explicit de=NULL in vfat/namei.c). AV. It's done in fat_get_entry() (inlined), here the slow case lives. AV. Additionally, when we return -1 (i.e. reached the end of directory) AV. we make bh NULL. */ static int fat__get_entry(struct inode *dir, loff_t *pos, struct buffer_head **bh, struct msdos_dir_entry **de) { struct super_block *sb = dir->i_sb; sector_t phys, iblock; unsigned long mapped_blocks; int err, offset; next: if (*bh) brelse(*bh); *bh = NULL; iblock = *pos >> sb->s_blocksize_bits; err = fat_bmap(dir, iblock, &phys, &mapped_blocks, 0); if (err || !phys) return -1; /* beyond EOF or error */ fat_dir_readahead(dir, iblock, phys); *bh = sb_bread(sb, phys); if (*bh == NULL) { /* [email protected] remove error message at pulling sd card without unmount */ #ifdef LGE_REMOVE_ERROR fat_msg(sb, KERN_ERR, "Directory bread(block %llu) failed", (llu)phys); #endif /* skip this block */ *pos = (iblock + 1) << sb->s_blocksize_bits; goto next; } offset = *pos & (sb->s_blocksize - 1); *pos += sizeof(struct msdos_dir_entry); *de = (struct msdos_dir_entry *)((*bh)->b_data + offset); return 0; }