static int jfs_direct_IO(int rw, struct file *filp, struct kiobuf *iobuf, unsigned long blocknr, int blocksize) { struct inode * inode = filp->f_dentry->d_inode->i_mapping->host; return generic_direct_IO(rw, inode, iobuf, blocknr, blocksize, jfs_get_block); }
static int blkdev_direct_IO(int rw, struct inode * inode, struct kiobuf * iobuf, unsigned long blocknr, int blocksize) { return generic_direct_IO(rw, inode, iobuf, blocknr, blocksize, blkdev_get_block); }
/* * NOTE: In spite of direc IO, * we use page cache for extend_with_zeorfill */ ret = extend_with_zerofill(inode, (u32) old_size, (u32) offset); if (ret) goto end_log; inode->i_size = offset; set_mmu_private(inode, offset); zerofilled = TRUE; } } ret = generic_direct_IO(rw, inode, iobuf, blocknr, blocksize, rfs_get_block); if (rw == WRITE) { if (ret == -EINVAL) { int err; /* * free some clusters if only unaligned offset & length * of iobuf exists which were allocated at direct IO op */ err = dealloc_clusters(inode, alloc_clus); if (!err) { inode->i_size = old_size; set_mmu_private(inode, old_size); }