/*
 * Run the chain and if the bottom-most object is a vnode-type lock the
 * underlying vnode.  A locked vnode or NULL is returned.
 */
struct vnode *
vnode_pager_lock(vm_object_t object)
{
	struct vnode *vp = NULL;
	vm_object_t lobject;
	vm_object_t tobject;
	int error;

	if (object == NULL)
		return(NULL);

	ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
	lobject = object;

	while (lobject->type != OBJT_VNODE) {
		if (lobject->flags & OBJ_DEAD)
			break;
		tobject = lobject->backing_object;
		if (tobject == NULL)
			break;
		vm_object_hold_shared(tobject);
		if (tobject == lobject->backing_object) {
			if (lobject != object) {
				vm_object_lock_swap();
				vm_object_drop(lobject);
			}
			lobject = tobject;
		} else {
			vm_object_drop(tobject);
		}
	}
	while (lobject->type == OBJT_VNODE &&
	       (lobject->flags & OBJ_DEAD) == 0) {
		/*
		 * Extract the vp
		 */
		vp = lobject->handle;
		error = vget(vp, LK_SHARED | LK_RETRY | LK_CANRECURSE);
		if (error == 0) {
			if (lobject->handle == vp)
				break;
			vput(vp);
		} else {
			kprintf("vnode_pager_lock: vp %p error %d "
				"lockstatus %d, retrying\n",
				vp, error,
				lockstatus(&vp->v_lock, curthread));
			tsleep(object->handle, 0, "vnpgrl", hz);
		}
		vp = NULL;
	}
	if (lobject != object)
		vm_object_drop(lobject);
	return (vp);
}
Пример #2
0
/*
 * A VFS can call this function to try to dispose of a read request
 * directly from the VM system, pretty much bypassing almost all VFS
 * overhead except for atime updates.
 *
 * If 0 is returned some or all of the uio was handled.  The caller must
 * check the uio and handle the remainder.
 *
 * The caller must fail on a non-zero error.
 */
int
vop_helper_read_shortcut(struct vop_read_args *ap)
{
	struct vnode *vp;
	struct uio *uio;
	struct lwbuf *lwb;
	struct lwbuf lwb_cache;
	vm_object_t obj;
	vm_page_t m;
	int offset;
	int n;
	int error;

	vp = ap->a_vp;
	uio = ap->a_uio;

	/*
	 * We can't short-cut if there is no VM object or this is a special
	 * UIO_NOCOPY read (typically from VOP_STRATEGY()).  We also can't
	 * do this if we cannot extract the filesize from the vnode.
	 */
	if (vm_read_shortcut_enable == 0)
		return(0);
	if (vp->v_object == NULL || uio->uio_segflg == UIO_NOCOPY)
		return(0);
	if (vp->v_filesize == NOOFFSET)
		return(0);
	if (uio->uio_resid == 0)
		return(0);

	/*
	 * Iterate the uio on a page-by-page basis
	 *
	 * XXX can we leave the object held shared during the uiomove()?
	 */
	++vm_read_shortcut_count;
	obj = vp->v_object;
	vm_object_hold_shared(obj);

	error = 0;
	while (uio->uio_resid && error == 0) {
		offset = (int)uio->uio_offset & PAGE_MASK;
		n = PAGE_SIZE - offset;
		if (n > uio->uio_resid)
			n = uio->uio_resid;
		if (vp->v_filesize < uio->uio_offset)
			break;
		if (uio->uio_offset + n > vp->v_filesize)
			n = vp->v_filesize - uio->uio_offset;
		if (n == 0)
			break;	/* hit EOF */

		m = vm_page_lookup_busy_try(obj, OFF_TO_IDX(uio->uio_offset),
					    FALSE, &error);
		if (error || m == NULL) {
			++vm_read_shortcut_failed;
			error = 0;
			break;
		}
		if ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) {
			++vm_read_shortcut_failed;
			vm_page_wakeup(m);
			break;
		}
		lwb = lwbuf_alloc(m, &lwb_cache);

		/*
		 * Use a no-fault uiomove() to avoid deadlocking against
		 * our VM object (which could livelock on the same object
		 * due to shared-vs-exclusive), or deadlocking against
		 * our busied page.  Returns EFAULT on any fault which
		 * winds up diving a vnode.
		 */
		error = uiomove_nofault((char *)lwbuf_kva(lwb) + offset,
					n, uio);

		vm_page_flag_set(m, PG_REFERENCED);
		lwbuf_free(lwb);
		vm_page_wakeup(m);
	}
	vm_object_drop(obj);

	/*
	 * Ignore EFAULT since we used uiomove_nofault(), causes caller
	 * to fall-back to normal code for this case.
	 */
	if (error == EFAULT)
		error = 0;

	return (error);
}