/*
 * Allocate a VM object for a vnode, typically a regular file vnode.
 *
 * Some additional information is required to generate a properly sized
 * object which covers the entire buffer cache buffer straddling the file
 * EOF.  Userland does not see the extra pages as the VM fault code tests
 * against v_filesize.
 */
vm_object_t
vnode_pager_alloc(void *handle, off_t length, vm_prot_t prot, off_t offset,
		  int blksize, int boff)
{
	vm_object_t object;
	struct vnode *vp;
	off_t loffset;
	vm_pindex_t lsize;

	/*
	 * Pageout to vnode, no can do yet.
	 */
	if (handle == NULL)
		return (NULL);

	/*
	 * XXX hack - This initialization should be put somewhere else.
	 */
	if (vnode_pbuf_freecnt < 0) {
	    vnode_pbuf_freecnt = nswbuf / 2 + 1;
	}

	/*
	 * Serialize potential vnode/object teardowns and interlocks
	 */
	vp = (struct vnode *)handle;
	lwkt_gettoken(&vp->v_token);

	/*
	 * If the object is being terminated, wait for it to
	 * go away.
	 */
	object = vp->v_object;
	if (object) {
		vm_object_hold(object);
		KKASSERT((object->flags & OBJ_DEAD) == 0);
	}

	if (VREFCNT(vp) <= 0)
		panic("vnode_pager_alloc: no vnode reference");

	/*
	 * Round up to the *next* block, then destroy the buffers in question.
	 * Since we are only removing some of the buffers we must rely on the
	 * scan count to determine whether a loop is necessary.
	 *
	 * Destroy any pages beyond the last buffer.
	 */
	if (boff < 0)
		boff = (int)(length % blksize);
	if (boff)
		loffset = length + (blksize - boff);
	else
		loffset = length;
	lsize = OFF_TO_IDX(round_page64(loffset));

	if (object == NULL) {
		/*
		 * And an object of the appropriate size
		 */
		object = vm_object_allocate_hold(OBJT_VNODE, lsize);
		object->handle = handle;
		vp->v_object = object;
		vp->v_filesize = length;
		if (vp->v_mount && (vp->v_mount->mnt_kern_flag & MNTK_NOMSYNC))
			vm_object_set_flag(object, OBJ_NOMSYNC);
		vref(vp);
	} else {
		vm_object_reference_quick(object);	/* also vref's */
		if (object->size != lsize) {
			kprintf("vnode_pager_alloc: Warning, objsize "
				"mismatch %jd/%jd vp=%p obj=%p\n",
				(intmax_t)object->size,
				(intmax_t)lsize,
				vp, object);
		}
		if (vp->v_filesize != length) {
			kprintf("vnode_pager_alloc: Warning, filesize "
				"mismatch %jd/%jd vp=%p obj=%p\n",
				(intmax_t)vp->v_filesize,
				(intmax_t)length,
				vp, object);
		}
	}
	vm_object_drop(object);
	lwkt_reltoken(&vp->v_token);

	return (object);
}
vm_object_t
cdev_pager_allocate(void *handle, enum obj_type tp, struct cdev_pager_ops *ops,
	vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t foff, struct ucred *cred)
{
	cdev_t dev;
	vm_object_t object;
	u_short color;

	/*
	 * Offset should be page aligned.
	 */
	if (foff & PAGE_MASK)
		return (NULL);

	size = round_page64(size);

	if (ops->cdev_pg_ctor(handle, size, prot, foff, cred, &color) != 0)
		return (NULL);

	/*
	 * Look up pager, creating as necessary.
	 */
	mtx_lock(&dev_pager_mtx);
	object = vm_pager_object_lookup(&dev_pager_object_list, handle);
	if (object == NULL) {
		/*
		 * Allocate object and associate it with the pager.
		 */
		object = vm_object_allocate_hold(tp,
						 OFF_TO_IDX(foff + size));
		object->handle = handle;
		object->un_pager.devp.ops = ops;
		object->un_pager.devp.dev = handle;
		TAILQ_INIT(&object->un_pager.devp.devp_pglist);

		/*
		 * handle is only a device for old_dev_pager_ctor.
		 */
		if (ops->cdev_pg_ctor == old_dev_pager_ctor) {
			dev = handle;
			dev->si_object = object;
		}

		TAILQ_INSERT_TAIL(&dev_pager_object_list, object,
		    pager_object_list);

		vm_object_drop(object);
	} else {
		/*
		 * Gain a reference to the object.
		 */
		vm_object_hold(object);
		vm_object_reference_locked(object);
		if (OFF_TO_IDX(foff + size) > object->size)
			object->size = OFF_TO_IDX(foff + size);
		vm_object_drop(object);
	}
	mtx_unlock(&dev_pager_mtx);

	return (object);
}
Ejemplo n.º 3
0
/*
 * Resizes the aobj associated to the regular file pointed to by vp to
 * the size newsize.  'vp' must point to a vnode that represents a regular
 * file.  'newsize' must be positive.
 *
 * pass trivial as 1 when buf content will be overwritten, otherwise set 0
 * to be zero filled.
 *
 * Returns zero on success or an appropriate error code on failure.
 *
 * Caller must hold the node exclusively locked.
 */
int
tmpfs_reg_resize(struct vnode *vp, off_t newsize, int trivial)
{
	int error;
	vm_pindex_t newpages, oldpages;
	struct tmpfs_mount *tmp;
	struct tmpfs_node *node;
	off_t oldsize;

#ifdef INVARIANTS
	KKASSERT(vp->v_type == VREG);
	KKASSERT(newsize >= 0);
#endif

	node = VP_TO_TMPFS_NODE(vp);
	tmp = VFS_TO_TMPFS(vp->v_mount);

	/*
	 * Convert the old and new sizes to the number of pages needed to
	 * store them.  It may happen that we do not need to do anything
	 * because the last allocated page can accommodate the change on
	 * its own.
	 */
	oldsize = node->tn_size;
	oldpages = round_page64(oldsize) / PAGE_SIZE;
	KKASSERT(oldpages == node->tn_reg.tn_aobj_pages);
	newpages = round_page64(newsize) / PAGE_SIZE;

	if (newpages > oldpages &&
	   tmp->tm_pages_used + newpages - oldpages > tmp->tm_pages_max) {
		error = ENOSPC;
		goto out;
	}
	node->tn_reg.tn_aobj_pages = newpages;
	node->tn_size = newsize;

	if (newpages != oldpages)
		atomic_add_long(&tmp->tm_pages_used, (newpages - oldpages));

	/*
	 * When adjusting the vnode filesize and its VM object we must
	 * also adjust our backing VM object (aobj).  The blocksize
	 * used must match the block sized we use for the buffer cache.
	 *
	 * The backing VM object may contain VM pages as well as swap
	 * assignments if we previously renamed main object pages into
	 * it during deactivation.
	 */
	if (newsize < oldsize) {
		vm_pindex_t osize;
		vm_pindex_t nsize;
		vm_object_t aobj;

		error = nvtruncbuf(vp, newsize, TMPFS_BLKSIZE, -1, 0);
		aobj = node->tn_reg.tn_aobj;
		if (aobj) {
			osize = aobj->size;
			nsize = vp->v_object->size;
			if (nsize < osize) {
				aobj->size = osize;
				swap_pager_freespace(aobj, nsize,
						     osize - nsize);
				vm_object_page_remove(aobj, nsize, osize,
						      FALSE);
			}
		}
	} else {
		vm_object_t aobj;

		error = nvextendbuf(vp, oldsize, newsize,
				    TMPFS_BLKSIZE, TMPFS_BLKSIZE,
				    -1, -1, trivial);
		aobj = node->tn_reg.tn_aobj;
		if (aobj)
			aobj->size = vp->v_object->size;
	}

out:
	return error;
}