Esempio n. 1
0
static ssize_t
HgfsAioRead(struct kiocb *iocb,      // IN:  I/O control block
            const struct iovec *iov, // OUT: Array of I/O buffers
            unsigned long numSegs,   // IN:  Number of buffers
            loff_t offset)           // IN:  Offset at which to read
{
   int result;

   ASSERT(iocb);
   ASSERT(iocb->ki_filp);
   ASSERT(iocb->ki_filp->f_dentry);
   ASSERT(iov);

   LOG(6, (KERN_DEBUG "VMware hgfs: HgfsAioRead: was called\n"));

   result = HgfsRevalidate(iocb->ki_filp->f_dentry);
   if (result) {
      LOG(4, (KERN_DEBUG "VMware hgfs: HgfsAioRead: invalid dentry\n"));
      goto out;
   }

   result = generic_file_aio_read(iocb, iov, numSegs, offset);
  out:
   return result;
}
Esempio n. 2
0
static ssize_t
smb_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
			unsigned long nr_segs, loff_t pos)
{
	struct file * file = iocb->ki_filp;
	struct dentry * dentry = file->f_path.dentry;
	ssize_t	status;

	VERBOSE("file %s/%s, count=%lu@%lu\n", DENTRY_PATH(dentry),
		(unsigned long) iocb->ki_left, (unsigned long) pos);

	status = smb_revalidate_inode(dentry);
	if (status) {
		PARANOIA("%s/%s validation failed, error=%Zd\n",
			 DENTRY_PATH(dentry), status);
		goto out;
	}

	VERBOSE("before read, size=%ld, flags=%x, atime=%ld\n",
		(long)dentry->d_inode->i_size,
		dentry->d_inode->i_flags, dentry->d_inode->i_atime.tv_sec);

	status = generic_file_aio_read(iocb, iov, nr_segs, pos);
out:
	return status;
}
Esempio n. 3
0
ssize_t testfs_aio_read(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos)
{
	int ret = 0;
	printk(KERN_INFO "testfs: testfs_aio_read\n");
	ret = generic_file_aio_read(iocb, iov, nr_segs, pos);

	return ret;
}
Esempio n. 4
0
ssize_t
xixfs_file_aio_read(
		struct kiocb *iocb,
#if LINUX_VERSION_2_6_19_REPLACE_INTERFACE
		const struct iovec *iov,
		unsigned long count,
#else
		char __user *buf, 
		size_t count, 
#endif
		
		loff_t pos
)
{

	struct address_space *mapping = iocb->ki_filp->f_mapping;
	struct inode *inode = mapping->host;
	PXIXFS_LINUX_FCB	pFCB = NULL;
	ssize_t 	ret = 0;

	XIXCORE_ASSERT(inode);
	pFCB = XIXFS_I(inode);
	XIXFS_ASSERT_FCB(pFCB);


	if(XIXCORE_TEST_FLAGS( pFCB->XixcoreFcb.FCBFlags, XIXCORE_FCB_CHANGE_DELETED )){
		DebugTrace(DEBUG_LEVEL_ERROR, DEBUG_TARGET_ALL,
			("ERROR DELETED FILE \n"));
		//printk(KERN_DEBUG "xixfs_file_aio_read ERROR\n");
		return -EPERM;
	}

	DebugTrace(DEBUG_LEVEL_ERROR, (DEBUG_TARGET_FCB|DEBUG_TARGET_VFSAPIT), 
		("ENTER xixfs_file_aio_read .\n"));	
#if LINUX_VERSION_2_6_19_REPLACE_INTERFACE
	ret = generic_file_aio_read(iocb, iov, count, pos);
#else
	ret =  generic_file_aio_read(iocb, buf, count, pos);
#endif
	//printk(KERN_DEBUG "xixfs_file_aio_read ret (0x%x)\n", ret);
	return ret;
}
ssize_t my_aio_read(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) {
    ssize_t r;
    printk("OSDI: custom read\n");
    r = generic_file_aio_read(iocb, iov, nr_segs, pos);
    if(ramfs_flag) {
        size_t i;
        char *ib = (char *)iov->iov_base;
        for( i=0 ; i<iov->iov_len ; i++ )   ib[i] ^= ENCODE_KEY;
        printk("ramfs_flag is up\n");
    }
    return r;
}
Esempio n. 6
0
/**
 * ecryptfs_read_update_atime
 *
 * generic_file_read updates the atime of upper layer inode.  But, it
 * doesn't give us a chance to update the atime of the lower layer
 * inode.  This function is a wrapper to generic_file_read.  It
 * updates the atime of the lower level inode if generic_file_read
 * returns without any errors. This is to be used only for file reads.
 * The function to be used for directory reads is ecryptfs_read.
 */
static ssize_t ecryptfs_read_update_atime(struct kiocb *iocb,
				const struct iovec *iov,
				unsigned long nr_segs, loff_t pos)
{
	ssize_t rc;
	struct path *path;
	struct file *file = iocb->ki_filp;

	rc = generic_file_aio_read(iocb, iov, nr_segs, pos);
	/*
	 * Even though this is a async interface, we need to wait
	 * for IO to finish to update atime
	 */
	if (-EIOCBQUEUED == rc)
		rc = wait_on_sync_kiocb(iocb);
	if (rc >= 0) {
		path = ecryptfs_dentry_to_lower_path(file->f_path.dentry);
		touch_atime(path);
	}
	return rc;
}
Esempio n. 7
0
static ssize_t
nfs_file_read(struct kiocb *iocb, char __user * buf, size_t count, loff_t pos)
{
	struct dentry * dentry = iocb->ki_filp->f_dentry;
	struct inode * inode = dentry->d_inode;
	ssize_t result;

#ifdef CONFIG_NFS_DIRECTIO
	if (iocb->ki_filp->f_flags & O_DIRECT)
		return nfs_file_direct_read(iocb, buf, count, pos);
#endif

	dfprintk(VFS, "nfs: read(%s/%s, %lu@%lu)\n",
		dentry->d_parent->d_name.name, dentry->d_name.name,
		(unsigned long) count, (unsigned long) pos);

	result = nfs_revalidate_file(inode, iocb->ki_filp);
	if (!result)
		result = generic_file_aio_read(iocb, buf, count, pos);
	return result;
}
Esempio n. 8
0
static ssize_t
nfs_file_read(struct kiocb *iocb, const struct iovec *iov,
		unsigned long nr_segs, loff_t pos)
{
	struct dentry * dentry = iocb->ki_filp->f_path.dentry;
	struct inode * inode = dentry->d_inode;
	ssize_t result;
	size_t count = iov_length(iov, nr_segs);

	if (iocb->ki_filp->f_flags & O_DIRECT)
		return nfs_file_direct_read(iocb, iov, nr_segs, pos);

	dprintk("NFS: read(%s/%s, %lu@%lu)\n",
		dentry->d_parent->d_name.name, dentry->d_name.name,
		(unsigned long) count, (unsigned long) pos);

	result = nfs_revalidate_mapping(inode, iocb->ki_filp->f_mapping);
	nfs_add_stats(inode, NFSIOS_NORMALREADBYTES, count);
	if (!result)
		result = generic_file_aio_read(iocb, iov, nr_segs, pos);
	return result;
}
Esempio n. 9
0
STATIC ssize_t
xfs_file_aio_read(
	struct kiocb		*iocb,
	const struct iovec	*iovp,
	unsigned long		nr_segs,
	loff_t			pos)
{
	struct file		*file = iocb->ki_filp;
	struct inode		*inode = file->f_mapping->host;
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_mount	*mp = ip->i_mount;
	size_t			size = 0;
	ssize_t			ret = 0;
	int			ioflags = 0;
	xfs_fsize_t		n;

	XFS_STATS_INC(xs_read_calls);

	BUG_ON(iocb->ki_pos != pos);

	if (unlikely(file->f_flags & O_DIRECT))
		ioflags |= IO_ISDIRECT;
	if (file->f_mode & FMODE_NOCMTIME)
		ioflags |= IO_INVIS;

	ret = generic_segment_checks(iovp, &nr_segs, &size, VERIFY_WRITE);
	if (ret < 0)
		return ret;

	if (unlikely(ioflags & IO_ISDIRECT)) {
		xfs_buftarg_t	*target =
			XFS_IS_REALTIME_INODE(ip) ?
				mp->m_rtdev_targp : mp->m_ddev_targp;
		if ((iocb->ki_pos & target->bt_smask) ||
		    (size & target->bt_smask)) {
			if (iocb->ki_pos == i_size_read(inode))
				return 0;
			return -XFS_ERROR(EINVAL);
		}
	}

	n = mp->m_super->s_maxbytes - iocb->ki_pos;
	if (n <= 0 || size == 0)
		return 0;

	if (n < size)
		size = n;

	if (XFS_FORCED_SHUTDOWN(mp))
		return -EIO;

	/*
	 * Locking is a bit tricky here. If we take an exclusive lock
	 * for direct IO, we effectively serialise all new concurrent
	 * read IO to this file and block it behind IO that is currently in
	 * progress because IO in progress holds the IO lock shared. We only
	 * need to hold the lock exclusive to blow away the page cache, so
	 * only take lock exclusively if the page cache needs invalidation.
	 * This allows the normal direct IO case of no page cache pages to
	 * proceeed concurrently without serialisation.
	 */
	xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
	if ((ioflags & IO_ISDIRECT) && inode->i_mapping->nrpages) {
		xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
		xfs_rw_ilock(ip, XFS_IOLOCK_EXCL);

		if (inode->i_mapping->nrpages) {
			ret = -xfs_flushinval_pages(ip,
					(iocb->ki_pos & PAGE_CACHE_MASK),
					-1, FI_REMAPF_LOCKED);
			if (ret) {
				xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL);
				return ret;
			}
		}
		xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
	}

	trace_xfs_file_read(ip, size, iocb->ki_pos, ioflags);

	ret = generic_file_aio_read(iocb, iovp, nr_segs, iocb->ki_pos);
	if (ret > 0)
		XFS_STATS_ADD(xs_read_bytes, ret);

	xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
	return ret;
}
Esempio n. 10
0
STATIC ssize_t
xfs_file_aio_read(
	struct kiocb		*iocb,
	const struct iovec	*iovp,
	unsigned long		nr_segs,
	loff_t			pos)
{
	struct file		*file = iocb->ki_filp;
	struct inode		*inode = file->f_mapping->host;
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_mount	*mp = ip->i_mount;
	size_t			size = 0;
	ssize_t			ret = 0;
	int			ioflags = 0;
	xfs_fsize_t		n;
	unsigned long		seg;

	XFS_STATS_INC(xs_read_calls);

	BUG_ON(iocb->ki_pos != pos);

	if (unlikely(file->f_flags & O_DIRECT))
		ioflags |= IO_ISDIRECT;
	if (file->f_mode & FMODE_NOCMTIME)
		ioflags |= IO_INVIS;

	/* START copy & waste from filemap.c */
	for (seg = 0; seg < nr_segs; seg++) {
		const struct iovec *iv = &iovp[seg];

		/*
		 * If any segment has a negative length, or the cumulative
		 * length ever wraps negative then return -EINVAL.
		 */
		size += iv->iov_len;
		if (unlikely((ssize_t)(size|iv->iov_len) < 0))
			return XFS_ERROR(-EINVAL);
	}
	/* END copy & waste from filemap.c */

	if (unlikely(ioflags & IO_ISDIRECT)) {
		xfs_buftarg_t	*target =
			XFS_IS_REALTIME_INODE(ip) ?
				mp->m_rtdev_targp : mp->m_ddev_targp;
		if ((iocb->ki_pos & target->bt_smask) ||
		    (size & target->bt_smask)) {
			if (iocb->ki_pos == ip->i_size)
				return 0;
			return -XFS_ERROR(EINVAL);
		}
	}

	n = XFS_MAXIOFFSET(mp) - iocb->ki_pos;
	if (n <= 0 || size == 0)
		return 0;

	if (n < size)
		size = n;

	if (XFS_FORCED_SHUTDOWN(mp))
		return -EIO;

	if (unlikely(ioflags & IO_ISDIRECT))
		mutex_lock(&inode->i_mutex);
	xfs_ilock(ip, XFS_IOLOCK_SHARED);

	if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) {
		int dmflags = FILP_DELAY_FLAG(file) | DM_SEM_FLAG_RD(ioflags);
		int iolock = XFS_IOLOCK_SHARED;

		ret = -XFS_SEND_DATA(mp, DM_EVENT_READ, ip, iocb->ki_pos, size,
					dmflags, &iolock);
		if (ret) {
			xfs_iunlock(ip, XFS_IOLOCK_SHARED);
			if (unlikely(ioflags & IO_ISDIRECT))
				mutex_unlock(&inode->i_mutex);
			return ret;
		}
	}

	if (unlikely(ioflags & IO_ISDIRECT)) {
		if (inode->i_mapping->nrpages) {
			ret = -xfs_flushinval_pages(ip,
					(iocb->ki_pos & PAGE_CACHE_MASK),
					-1, FI_REMAPF_LOCKED);
		}
		mutex_unlock(&inode->i_mutex);
		if (ret) {
			xfs_iunlock(ip, XFS_IOLOCK_SHARED);
			return ret;
		}
	}

	trace_xfs_file_read(ip, size, iocb->ki_pos, ioflags);

	ret = generic_file_aio_read(iocb, iovp, nr_segs, iocb->ki_pos);
	if (ret > 0)
		XFS_STATS_ADD(xs_read_bytes, ret);

	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
	return ret;
}
Esempio n. 11
0
static ssize_t lustre_generic_file_read(struct file *file,
                                        struct ccc_io *vio, loff_t *ppos)
{
        return generic_file_aio_read(vio->cui_iocb, vio->cui_iov,
                                     vio->cui_nrsegs, *ppos);
}
STATIC ssize_t
xfs_file_aio_read(
    struct kiocb		*iocb,
    const struct iovec	*iovp,
    unsigned long		nr_segs,
    loff_t			pos)
{
    struct file		*file = iocb->ki_filp;
    struct inode		*inode = file->f_mapping->host;
    struct xfs_inode	*ip = XFS_I(inode);
    struct xfs_mount	*mp = ip->i_mount;
    size_t			size = 0;
    ssize_t			ret = 0;
    int			ioflags = 0;
    xfs_fsize_t		n;
    unsigned long		seg;

    XFS_STATS_INC(xs_read_calls);

    BUG_ON(iocb->ki_pos != pos);

    if (unlikely(file->f_flags & O_DIRECT))
        ioflags |= IO_ISDIRECT;
    if (file->f_mode & FMODE_NOCMTIME)
        ioflags |= IO_INVIS;


    for (seg = 0; seg < nr_segs; seg++) {
        const struct iovec *iv = &iovp[seg];

        size += iv->iov_len;
        if (unlikely((ssize_t)(size|iv->iov_len) < 0))
            return XFS_ERROR(-EINVAL);
    }


    if (unlikely(ioflags & IO_ISDIRECT)) {
        xfs_buftarg_t	*target =
            XFS_IS_REALTIME_INODE(ip) ?
            mp->m_rtdev_targp : mp->m_ddev_targp;
        if ((iocb->ki_pos & target->bt_smask) ||
                (size & target->bt_smask)) {
            if (iocb->ki_pos == i_size_read(inode))
                return 0;
            return -XFS_ERROR(EINVAL);
        }
    }

    n = XFS_MAXIOFFSET(mp) - iocb->ki_pos;
    if (n <= 0 || size == 0)
        return 0;

    if (n < size)
        size = n;

    if (XFS_FORCED_SHUTDOWN(mp))
        return -EIO;

    xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
    if ((ioflags & IO_ISDIRECT) && inode->i_mapping->nrpages) {
        xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
        xfs_rw_ilock(ip, XFS_IOLOCK_EXCL);

        if (inode->i_mapping->nrpages) {
            ret = -xfs_flushinval_pages(ip,
                                        (iocb->ki_pos & PAGE_CACHE_MASK),
                                        -1, FI_REMAPF_LOCKED);
            if (ret) {
                xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL);
                return ret;
            }
        }
        xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
    }

    trace_xfs_file_read(ip, size, iocb->ki_pos, ioflags);

    ret = generic_file_aio_read(iocb, iovp, nr_segs, iocb->ki_pos);
    if (ret > 0)
        XFS_STATS_ADD(xs_read_bytes, ret);

    xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
    return ret;
}
Esempio n. 13
0
static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
				   const struct iovec *iov,
				   unsigned long nr_segs,
				   loff_t pos)
{
	int ret = 0, rw_level = -1, have_alloc_sem = 0, lock_level = 0;
	struct file *filp = iocb->ki_filp;
	struct inode *inode = filp->f_path.dentry->d_inode;

	mlog_entry("(0x%p, %u, '%.*s')\n", filp,
		   (unsigned int)nr_segs,
		   filp->f_path.dentry->d_name.len,
		   filp->f_path.dentry->d_name.name);

	if (!inode) {
		ret = -EINVAL;
		mlog_errno(ret);
		goto bail;
	}

	/* 
	 * buffered reads protect themselves in ->readpage().  O_DIRECT reads
	 * need locks to protect pending reads from racing with truncate.
	 */
	if (filp->f_flags & O_DIRECT) {
		down_read(&inode->i_alloc_sem);
		have_alloc_sem = 1;

		ret = ocfs2_rw_lock(inode, 0);
		if (ret < 0) {
			mlog_errno(ret);
			goto bail;
		}
		rw_level = 0;
		/* communicate with ocfs2_dio_end_io */
		ocfs2_iocb_set_rw_locked(iocb, rw_level);
	}

	/*
	 * We're fine letting folks race truncates and extending
	 * writes with read across the cluster, just like they can
	 * locally. Hence no rw_lock during read.
	 * 
	 * Take and drop the meta data lock to update inode fields
	 * like i_size. This allows the checks down below
	 * generic_file_aio_read() a chance of actually working. 
	 */
	ret = ocfs2_meta_lock_atime(inode, filp->f_vfsmnt, &lock_level);
	if (ret < 0) {
		mlog_errno(ret);
		goto bail;
	}
	ocfs2_meta_unlock(inode, lock_level);

	ret = generic_file_aio_read(iocb, iov, nr_segs, iocb->ki_pos);
	if (ret == -EINVAL)
		mlog(ML_ERROR, "generic_file_aio_read returned -EINVAL\n");

	/* buffered aio wouldn't have proper lock coverage today */
	BUG_ON(ret == -EIOCBQUEUED && !(filp->f_flags & O_DIRECT));

	/* see ocfs2_file_aio_write */
	if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
		rw_level = -1;
		have_alloc_sem = 0;
	}

bail:
	if (have_alloc_sem)
		up_read(&inode->i_alloc_sem);
	if (rw_level != -1) 
		ocfs2_rw_unlock(inode, rw_level);
	mlog_exit(ret);

	return ret;
}
Esempio n. 14
0
ssize_t			/* bytes read, or (-)  error */
xfs_read(
	bhv_desc_t		*bdp,
	struct kiocb		*iocb,
	const struct iovec	*iovp,
	unsigned int		segs,
	loff_t			*offset,
	int			ioflags,
	cred_t			*credp)
{
	struct file		*file = iocb->ki_filp;
	struct inode		*inode = file->f_mapping->host;
	size_t			size = 0;
	ssize_t			ret;
	xfs_fsize_t		n;
	xfs_inode_t		*ip;
	xfs_mount_t		*mp;
	bhv_vnode_t		*vp;
	unsigned long		seg;

	ip = XFS_BHVTOI(bdp);
	vp = BHV_TO_VNODE(bdp);
	mp = ip->i_mount;

	XFS_STATS_INC(xs_read_calls);

	/* START copy & waste from filemap.c */
	for (seg = 0; seg < segs; seg++) {
		const struct iovec *iv = &iovp[seg];

		/*
		 * If any segment has a negative length, or the cumulative
		 * length ever wraps negative then return -EINVAL.
		 */
		size += iv->iov_len;
		if (unlikely((ssize_t)(size|iv->iov_len) < 0))
			return XFS_ERROR(-EINVAL);
	}
	/* END copy & waste from filemap.c */

	if (unlikely(ioflags & IO_ISDIRECT)) {
		xfs_buftarg_t	*target =
			(ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
				mp->m_rtdev_targp : mp->m_ddev_targp;
		if ((*offset & target->bt_smask) ||
		    (size & target->bt_smask)) {
			if (*offset == ip->i_d.di_size) {
				return (0);
			}
			return -XFS_ERROR(EINVAL);
		}
	}

	n = XFS_MAXIOFFSET(mp) - *offset;
	if ((n <= 0) || (size == 0))
		return 0;

	if (n < size)
		size = n;

	if (XFS_FORCED_SHUTDOWN(mp))
		return -EIO;

	if (unlikely(ioflags & IO_ISDIRECT))
		mutex_lock(&inode->i_mutex);
	xfs_ilock(ip, XFS_IOLOCK_SHARED);

	if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) &&
	    !(ioflags & IO_INVIS)) {
		bhv_vrwlock_t locktype = VRWLOCK_READ;
		int dmflags = FILP_DELAY_FLAG(file) | DM_SEM_FLAG_RD(ioflags);

		ret = -XFS_SEND_DATA(mp, DM_EVENT_READ,
					BHV_TO_VNODE(bdp), *offset, size,
					dmflags, &locktype);
		if (ret) {
			xfs_iunlock(ip, XFS_IOLOCK_SHARED);
			if (unlikely(ioflags & IO_ISDIRECT))
				mutex_unlock(&inode->i_mutex);
			return ret;
		}
	}

	if (unlikely(ioflags & IO_ISDIRECT)) {
		if (VN_CACHED(vp))
			bhv_vop_flushinval_pages(vp, ctooff(offtoct(*offset)),
						 -1, FI_REMAPF_LOCKED);
		mutex_unlock(&inode->i_mutex);
	}

	xfs_rw_enter_trace(XFS_READ_ENTER, &ip->i_iocore,
				(void *)iovp, segs, *offset, ioflags);

	iocb->ki_pos = *offset;
	ret = generic_file_aio_read(iocb, iovp, segs, *offset);
	if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO))
		ret = wait_on_sync_kiocb(iocb);
	if (ret > 0)
		XFS_STATS_ADD(xs_read_bytes, ret);

	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
	return ret;
}
Esempio n. 15
0
static ssize_t ccfs_generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) {
  ssize_t res;
  res = generic_file_aio_read(iocb, iov, nr_segs, pos);
  mdbg(INFO3,"Async Read ... Pos: %lld, Res: %ld",(long long) pos, res);  
  return res;
}