Пример #1
0
/*
 * File pointers can no longer get ripped up by revoke so
 * we don't need to lock access to the vp.
 *
 * f_offset updates are not guaranteed against multiple readers
 */
static int
vn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
{
	struct vnode *vp;
	int error, ioflag;

	KASSERT(uio->uio_td == curthread,
		("uio_td %p is not td %p", uio->uio_td, curthread));
	vp = (struct vnode *)fp->f_data;

	ioflag = 0;
	if (flags & O_FBLOCKING) {
		/* ioflag &= ~IO_NDELAY; */
	} else if (flags & O_FNONBLOCKING) {
		ioflag |= IO_NDELAY;
	} else if (fp->f_flag & FNONBLOCK) {
		ioflag |= IO_NDELAY;
	}
	if (fp->f_flag & O_DIRECT) {
		ioflag |= IO_DIRECT;
	}
	if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0)
		uio->uio_offset = vn_get_fpf_offset(fp);
	vn_lock(vp, LK_SHARED | LK_RETRY);
	ioflag |= sequential_heuristic(uio, fp);

	error = VOP_READ(vp, uio, ioflag, cred);
	fp->f_nextoff = uio->uio_offset;
	vn_unlock(vp);
	if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0)
		vn_set_fpf_offset(fp, uio->uio_offset);
	return (error);
}
Пример #2
0
/*
 * Device-optimized file table vnode read routine.
 *
 * This bypasses the VOP table and talks directly to the device.  Most
 * filesystems just route to specfs and can make this optimization.
 *
 * MPALMOSTSAFE - acquires mplock
 */
static int
devfs_fo_read(struct file *fp, struct uio *uio,
		 struct ucred *cred, int flags)
{
	struct devfs_node *node;
	struct vnode *vp;
	int ioflag;
	int error;
	cdev_t dev;

	KASSERT(uio->uio_td == curthread,
		("uio_td %p is not td %p", uio->uio_td, curthread));

	if (uio->uio_resid == 0)
		return 0;

	vp = (struct vnode *)fp->f_data;
	if (vp == NULL || vp->v_type == VBAD)
		return EBADF;

	node = DEVFS_NODE(vp);

	if ((dev = vp->v_rdev) == NULL)
		return EBADF;

	reference_dev(dev);

	if ((flags & O_FOFFSET) == 0)
		uio->uio_offset = fp->f_offset;

	ioflag = 0;
	if (flags & O_FBLOCKING) {
		/* ioflag &= ~IO_NDELAY; */
	} else if (flags & O_FNONBLOCKING) {
		ioflag |= IO_NDELAY;
	} else if (fp->f_flag & FNONBLOCK) {
		ioflag |= IO_NDELAY;
	}
	if (flags & O_FBUFFERED) {
		/* ioflag &= ~IO_DIRECT; */
	} else if (flags & O_FUNBUFFERED) {
		ioflag |= IO_DIRECT;
	} else if (fp->f_flag & O_DIRECT) {
		ioflag |= IO_DIRECT;
	}
	ioflag |= sequential_heuristic(uio, fp);

	error = dev_dread(dev, uio, ioflag, fp);

	release_dev(dev);
	if (node)
		nanotime(&node->atime);
	if ((flags & O_FOFFSET) == 0)
		fp->f_offset = uio->uio_offset;
	fp->f_nextoff = uio->uio_offset;

	return (error);
}
Пример #3
0
/*
 * MPSAFE
 */
static int
vn_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
{
	struct ccms_lock ccms_lock;
	struct vnode *vp;
	int error, ioflag;

	KASSERT(uio->uio_td == curthread,
		("uio_td %p is not p %p", uio->uio_td, curthread));
	vp = (struct vnode *)fp->f_data;

	ioflag = IO_UNIT;
	if (vp->v_type == VREG &&
	   ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
		ioflag |= IO_APPEND;
	}

	if (flags & O_FBLOCKING) {
		/* ioflag &= ~IO_NDELAY; */
	} else if (flags & O_FNONBLOCKING) {
		ioflag |= IO_NDELAY;
	} else if (fp->f_flag & FNONBLOCK) {
		ioflag |= IO_NDELAY;
	}
	if (flags & O_FBUFFERED) {
		/* ioflag &= ~IO_DIRECT; */
	} else if (flags & O_FUNBUFFERED) {
		ioflag |= IO_DIRECT;
	} else if (fp->f_flag & O_DIRECT) {
		ioflag |= IO_DIRECT;
	}
	if (flags & O_FASYNCWRITE) {
		/* ioflag &= ~IO_SYNC; */
	} else if (flags & O_FSYNCWRITE) {
		ioflag |= IO_SYNC;
	} else if (fp->f_flag & O_FSYNC) {
		ioflag |= IO_SYNC;
	}

	if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
		ioflag |= IO_SYNC;
	if ((flags & O_FOFFSET) == 0)
		uio->uio_offset = vn_get_fpf_offset(fp);
	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
	ioflag |= sequential_heuristic(uio, fp);
	ccms_lock_get_uio(&vp->v_ccms, &ccms_lock, uio);
	error = VOP_WRITE(vp, uio, ioflag, cred);
	ccms_lock_put(&vp->v_ccms, &ccms_lock);
	fp->f_nextoff = uio->uio_offset;
	vn_unlock(vp);
	if ((flags & O_FOFFSET) == 0)
		vn_set_fpf_offset(fp, uio->uio_offset);
	return (error);
}
Пример #4
0
static int
devfs_fo_write(struct file *fp, struct uio *uio,
		  struct ucred *cred, int flags)
{
	struct devfs_node *node;
	struct vnode *vp;
	int ioflag;
	int error;
	cdev_t dev;

	KASSERT(uio->uio_td == curthread,
		("uio_td %p is not p %p", uio->uio_td, curthread));

	vp = (struct vnode *)fp->f_data;
	if (vp == NULL || vp->v_type == VBAD)
		return EBADF;

	node = DEVFS_NODE(vp);

	if (vp->v_type == VREG)
		bwillwrite(uio->uio_resid);

	vp = (struct vnode *)fp->f_data;

	if ((dev = vp->v_rdev) == NULL)
		return EBADF;

	reference_dev(dev);

	if ((flags & O_FOFFSET) == 0)
		uio->uio_offset = fp->f_offset;

	ioflag = IO_UNIT;
	if (vp->v_type == VREG &&
	   ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
		ioflag |= IO_APPEND;
	}

	if (flags & O_FBLOCKING) {
		/* ioflag &= ~IO_NDELAY; */
	} else if (flags & O_FNONBLOCKING) {
		ioflag |= IO_NDELAY;
	} else if (fp->f_flag & FNONBLOCK) {
		ioflag |= IO_NDELAY;
	}
	if (flags & O_FBUFFERED) {
		/* ioflag &= ~IO_DIRECT; */
	} else if (flags & O_FUNBUFFERED) {
		ioflag |= IO_DIRECT;
	} else if (fp->f_flag & O_DIRECT) {
		ioflag |= IO_DIRECT;
	}
	if (flags & O_FASYNCWRITE) {
		/* ioflag &= ~IO_SYNC; */
	} else if (flags & O_FSYNCWRITE) {
		ioflag |= IO_SYNC;
	} else if (fp->f_flag & O_FSYNC) {
		ioflag |= IO_SYNC;
	}

	if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
		ioflag |= IO_SYNC;
	ioflag |= sequential_heuristic(uio, fp);

	error = dev_dwrite(dev, uio, ioflag, fp);

	release_dev(dev);
	if (node) {
		nanotime(&node->atime);
		nanotime(&node->mtime);
	}

	if ((flags & O_FOFFSET) == 0)
		fp->f_offset = uio->uio_offset;
	fp->f_nextoff = uio->uio_offset;

	return (error);
}