Ejemplo n.º 1
0
/*
 * File pointers can no longer get ripped up by revoke so
 * we don't need to lock access to the vp.
 *
 * f_offset updates are not guaranteed against multiple readers
 */
static int
vn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
{
	struct vnode *vp;
	int error, ioflag;

	KASSERT(uio->uio_td == curthread,
		("uio_td %p is not td %p", uio->uio_td, curthread));
	vp = (struct vnode *)fp->f_data;

	ioflag = 0;
	if (flags & O_FBLOCKING) {
		/* ioflag &= ~IO_NDELAY; */
	} else if (flags & O_FNONBLOCKING) {
		ioflag |= IO_NDELAY;
	} else if (fp->f_flag & FNONBLOCK) {
		ioflag |= IO_NDELAY;
	}
	if (fp->f_flag & O_DIRECT) {
		ioflag |= IO_DIRECT;
	}
	if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0)
		uio->uio_offset = vn_get_fpf_offset(fp);
	vn_lock(vp, LK_SHARED | LK_RETRY);
	ioflag |= sequential_heuristic(uio, fp);

	error = VOP_READ(vp, uio, ioflag, cred);
	fp->f_nextoff = uio->uio_offset;
	vn_unlock(vp);
	if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0)
		vn_set_fpf_offset(fp, uio->uio_offset);
	return (error);
}
Ejemplo n.º 2
0
/*
 * MPSAFE
 */
static int
vn_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
{
	struct ccms_lock ccms_lock;
	struct vnode *vp;
	int error, ioflag;

	KASSERT(uio->uio_td == curthread,
		("uio_td %p is not p %p", uio->uio_td, curthread));
	vp = (struct vnode *)fp->f_data;

	ioflag = IO_UNIT;
	if (vp->v_type == VREG &&
	   ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
		ioflag |= IO_APPEND;
	}

	if (flags & O_FBLOCKING) {
		/* ioflag &= ~IO_NDELAY; */
	} else if (flags & O_FNONBLOCKING) {
		ioflag |= IO_NDELAY;
	} else if (fp->f_flag & FNONBLOCK) {
		ioflag |= IO_NDELAY;
	}
	if (flags & O_FBUFFERED) {
		/* ioflag &= ~IO_DIRECT; */
	} else if (flags & O_FUNBUFFERED) {
		ioflag |= IO_DIRECT;
	} else if (fp->f_flag & O_DIRECT) {
		ioflag |= IO_DIRECT;
	}
	if (flags & O_FASYNCWRITE) {
		/* ioflag &= ~IO_SYNC; */
	} else if (flags & O_FSYNCWRITE) {
		ioflag |= IO_SYNC;
	} else if (fp->f_flag & O_FSYNC) {
		ioflag |= IO_SYNC;
	}

	if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
		ioflag |= IO_SYNC;
	if ((flags & O_FOFFSET) == 0)
		uio->uio_offset = vn_get_fpf_offset(fp);
	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
	ioflag |= sequential_heuristic(uio, fp);
	ccms_lock_get_uio(&vp->v_ccms, &ccms_lock, uio);
	error = VOP_WRITE(vp, uio, ioflag, cred);
	ccms_lock_put(&vp->v_ccms, &ccms_lock);
	fp->f_nextoff = uio->uio_offset;
	vn_unlock(vp);
	if ((flags & O_FOFFSET) == 0)
		vn_set_fpf_offset(fp, uio->uio_offset);
	return (error);
}
Ejemplo n.º 3
0
/*
 * MPSAFE
 */
static __inline off_t
vn_poll_fpf_offset(struct file *fp)
{
#if defined(__x86_64__) || !defined(SMP)
	return(fp->f_offset);
#else
	off_t off = vn_get_fpf_offset(fp);
	vn_set_fpf_offset(fp, off);
	return(off);
#endif
}