Exemple #1
0
static int
xmem_freesp(struct vnode *vp, struct flock64 *lp, int flag)
{
	register int i;
	register struct xmemnode *xp = VTOXN(vp);
	int error;

	ASSERT(vp->v_type == VREG);
	ASSERT(lp->l_start >= 0);

	if (lp->l_len != 0)
		return (EINVAL);

	rw_enter(&xp->xn_rwlock, RW_WRITER);
	if (xp->xn_size == lp->l_start) {
		rw_exit(&xp->xn_rwlock);
		return (0);
	}

	/*
	 * Check for any mandatory locks on the range
	 */
	if (MANDLOCK(vp, xp->xn_mode)) {
		long save_start;

		save_start = lp->l_start;

		if (xp->xn_size < lp->l_start) {
			/*
			 * "Truncate up" case: need to make sure there
			 * is no lock beyond current end-of-file. To
			 * do so, we need to set l_start to the size
			 * of the file temporarily.
			 */
			lp->l_start = xp->xn_size;
		}
		lp->l_type = F_WRLCK;
		lp->l_sysid = 0;
		lp->l_pid = ttoproc(curthread)->p_pid;
		i = (flag & (FNDELAY|FNONBLOCK)) ? 0 : SLPFLCK;
		if ((i = reclock(vp, lp, i, 0, lp->l_start, NULL)) != 0 ||
		    lp->l_type != F_UNLCK) {
			rw_exit(&xp->xn_rwlock);
			return (i ? i : EAGAIN);
		}

		lp->l_start = save_start;
	}

	rw_enter(&xp->xn_contents, RW_WRITER);
	error = xmemnode_trunc((struct xmount *)VFSTOXM(vp->v_vfsp),
						xp, lp->l_start);
	rw_exit(&xp->xn_contents);
	rw_exit(&xp->xn_rwlock);
	return (error);
}
int
nbl_svmand(vnode_t *vp, cred_t *cr, int *svp)
{
	struct vattr va;
	int error;

	va.va_mask = AT_MODE;
	error = VOP_GETATTR(vp, &va, 0, cr, NULL);
	if (error != 0)
		return (error);

	*svp = MANDLOCK(vp, va.va_mode);
	return (0);
}
Exemple #3
0
/* ARGSUSED */
void
acl2_access(ACCESS2args *args, ACCESS2res *resp, struct exportinfo *exi,
    struct svc_req *req, cred_t *cr, bool_t ro)
{
	int error;
	vnode_t *vp;
	vattr_t va;
	int checkwriteperm;

	vp = nfs_fhtovp(&args->fh, exi);
	if (vp == NULL) {
		resp->status = NFSERR_STALE;
		return;
	}

	/*
	 * If the file system is exported read only, it is not appropriate
	 * to check write permissions for regular files and directories.
	 * Special files are interpreted by the client, so the underlying
	 * permissions are sent back to the client for interpretation.
	 */
	if (rdonly(ro, vp) && (vp->v_type == VREG || vp->v_type == VDIR))
		checkwriteperm = 0;
	else
		checkwriteperm = 1;

	/*
	 * We need the mode so that we can correctly determine access
	 * permissions relative to a mandatory lock file.  Access to
	 * mandatory lock files is denied on the server, so it might
	 * as well be reflected to the server during the open.
	 */
	va.va_mask = AT_MODE;
	error = VOP_GETATTR(vp, &va, 0, cr, NULL);
	if (error) {
		VN_RELE(vp);
		resp->status = puterrno(error);
		return;
	}

	resp->resok.access = 0;

	if (args->access & ACCESS2_READ) {
		error = VOP_ACCESS(vp, VREAD, 0, cr, NULL);
		if (!error && !MANDLOCK(vp, va.va_mode))
			resp->resok.access |= ACCESS2_READ;
	}
	if ((args->access & ACCESS2_LOOKUP) && vp->v_type == VDIR) {
		error = VOP_ACCESS(vp, VEXEC, 0, cr, NULL);
		if (!error)
			resp->resok.access |= ACCESS2_LOOKUP;
	}
	if (checkwriteperm &&
	    (args->access & (ACCESS2_MODIFY|ACCESS2_EXTEND))) {
		error = VOP_ACCESS(vp, VWRITE, 0, cr, NULL);
		if (!error && !MANDLOCK(vp, va.va_mode))
			resp->resok.access |=
			    (args->access & (ACCESS2_MODIFY|ACCESS2_EXTEND));
	}
	if (checkwriteperm &&
	    (args->access & ACCESS2_DELETE) && (vp->v_type == VDIR)) {
		error = VOP_ACCESS(vp, VWRITE, 0, cr, NULL);
		if (!error)
			resp->resok.access |= ACCESS2_DELETE;
	}
	if (args->access & ACCESS2_EXECUTE) {
		error = VOP_ACCESS(vp, VEXEC, 0, cr, NULL);
		if (!error && !MANDLOCK(vp, va.va_mode))
			resp->resok.access |= ACCESS2_EXECUTE;
	}

	va.va_mask = AT_ALL;
	error = rfs4_delegated_getattr(vp, &va, 0, cr);

	VN_RELE(vp);

	/* check for overflowed values */
	if (!error) {
		error = vattr_to_nattr(&va, &resp->resok.attr);
	}
	if (error) {
		resp->status = puterrno(error);
		return;
	}

	resp->status = NFS_OK;
}
Exemple #4
0
/*
 * rdxmem does the real work of read requests for xmemfs.
 */
static int
rdxmem(
	struct xmount *xm,
	struct xmemnode *xp,
	struct uio *uio,
	struct caller_context *ct)
{
	ulong_t blockoffset;	/* offset in xmemfs file (uio_offset) */
	caddr_t base;
	ssize_t bytes;		/* bytes to uiomove */
	struct vnode *vp;
	int error;
	uint_t	blocknumber;
	long oresid = uio->uio_resid;
	size_t	bsize = xm->xm_bsize;
	offset_t	offset;

	vp = XNTOV(xp);

	XMEMPRINTF(1, ("rdxmem: vp %p\n", (void *)vp));

	ASSERT(RW_LOCK_HELD(&xp->xn_contents));

	if (MANDLOCK(vp, xp->xn_mode)) {
		rw_exit(&xp->xn_contents);
		/*
		 * xmem_getattr ends up being called by chklock
		 */
		error = chklock(vp, FREAD,
			uio->uio_loffset, uio->uio_resid, uio->uio_fmode, ct);
		rw_enter(&xp->xn_contents, RW_READER);
		if (error != 0) {
			XMEMPRINTF(1,
			    ("rdxmem: vp %p error %x\n", (void *)vp, error));
			return (error);
		}
	}
	ASSERT(xp->xn_type == VREG);

	if ((offset = uio->uio_loffset) >= MAXOFF_T) {
		XMEMPRINTF(1, ("rdxmem: vp %p bad offset %llx\n",
		    (void *)vp, uio->uio_loffset));
		return (0);
	}
	if (offset < 0)
		return (EINVAL);

	if (uio->uio_resid == 0) {
		XMEMPRINTF(1, ("rdxmem: vp %p resid 0\n", (void *)vp));
		return (0);
	}

	blocknumber = offset >> xm->xm_bshift;
	do {
		offset_t diff, pagestart, pageend;
		uint_t	pageinblock;

		blockoffset = offset & (bsize - 1);
		/*
		 * A maximum of xm->xm_bsize bytes of data is transferred
		 * each pass through this loop
		 */
		bytes = MIN(bsize - blockoffset, uio->uio_resid);

		diff = xp->xn_size - offset;

		if (diff <= 0) {
			error = 0;
			goto out;
		}
		if (diff < bytes)
			bytes = diff;

		if (!xp->xn_ppa[blocknumber])
			if (error = xmem_fillpages(xp, vp, offset, bytes, 1)) {
				return (error);
			}
		/*
		 * We have to drop the contents lock to prevent the VM
		 * system from trying to reacquire it in xmem_getpage()
		 * should the uiomove cause a pagefault.
		 */
		rw_exit(&xp->xn_contents);

#ifdef LOCKNEST
		xmem_getpage();
#endif

		/* 2/10 panic in hat_memload_array - len & MMU_OFFSET */

		pagestart = offset & ~(offset_t)(PAGESIZE - 1);
		pageend = (offset + bytes - 1) & ~(offset_t)(PAGESIZE - 1);
		if (xm->xm_ppb == 1)
			base = segxmem_getmap(xm->xm_map, vp,
			    pagestart, pageend - pagestart + PAGESIZE,
			    (page_t **)&xp->xn_ppa[blocknumber], S_READ);
		else {
			pageinblock = btop(blockoffset);
			base = segxmem_getmap(xm->xm_map, vp,
			    pagestart, pageend - pagestart + PAGESIZE,
			    &xp->xn_ppa[blocknumber][pageinblock], S_READ);

		}
		error = uiomove(base + (blockoffset & (PAGESIZE - 1)),
			bytes, UIO_READ, uio);

		segxmem_release(xm->xm_map, base,
			pageend - pagestart + PAGESIZE);
		/*
		 * Re-acquire contents lock.
		 */
		rw_enter(&xp->xn_contents, RW_READER);

		offset = uio->uio_loffset;
		blocknumber++;
	} while (error == 0 && uio->uio_resid > 0);

out:
	gethrestime(&xp->xn_atime);

	/*
	 * If we've already done a partial read, terminate
	 * the read but return no error.
	 */
	if (oresid != uio->uio_resid)
		error = 0;

	return (error);
}
Exemple #5
0
/*
 * wrxmem does the real work of write requests for xmemfs.
 */
static int
wrxmem(struct xmount *xm, struct xmemnode *xp, struct uio *uio,
	struct cred *cr, struct caller_context *ct)
{
	uint_t		blockoffset;	/* offset in the block */
	uint_t		blkwr;		/* offset in blocks into xmem file */
	uint_t		blkcnt;
	caddr_t		base;
	ssize_t		bytes;		/* bytes to uiomove */
	struct vnode	*vp;
	int		error = 0;
	size_t		bsize = xm->xm_bsize;
	rlim64_t	limit = uio->uio_llimit;
	long		oresid = uio->uio_resid;
	timestruc_t 	now;
	offset_t	offset;

	/*
	 * xp->xn_size is incremented before the uiomove
	 * is done on a write.  If the move fails (bad user
	 * address) reset xp->xn_size.
	 * The better way would be to increment xp->xn_size
	 * only if the uiomove succeeds.
	 */
	long		xn_size_changed = 0;
	offset_t	old_xn_size;

	vp = XNTOV(xp);
	ASSERT(vp->v_type == VREG);

	XMEMPRINTF(1, ("wrxmem: vp %p resid %lx off %llx\n",
	    (void *)vp, uio->uio_resid, uio->uio_loffset));

	ASSERT(RW_WRITE_HELD(&xp->xn_contents));
	ASSERT(RW_WRITE_HELD(&xp->xn_rwlock));

	if (MANDLOCK(vp, xp->xn_mode)) {
		rw_exit(&xp->xn_contents);
		/*
		 * xmem_getattr ends up being called by chklock
		 */
		error = chklock(vp, FWRITE,
			uio->uio_loffset, uio->uio_resid, uio->uio_fmode, ct);

		rw_enter(&xp->xn_contents, RW_WRITER);
		if (error != 0) {
			XMEMPRINTF(8, ("wrxmem: vp %p error %x\n",
			    (void *)vp, error));
			return (error);
		}
	}

	if ((offset = uio->uio_loffset) < 0)
		return (EINVAL);

	if (offset >= limit) {
		proc_t *p = ttoproc(curthread);

		mutex_enter(&p->p_lock);
		(void) rctl_action(rctlproc_legacy[RLIMIT_FSIZE], p->p_rctls,
		    p, RCA_UNSAFE_SIGINFO);
		mutex_exit(&p->p_lock);
		return (EFBIG);
	}

	if (uio->uio_resid == 0) {
		XMEMPRINTF(8, ("wrxmem: vp %p resid %lx\n",
			(void *)vp, uio->uio_resid));
		return (0);
	}

	/*
	 * Get the highest blocknumber and allocate page array if needed.
	 * Note that if xm_bsize != PAGESIZE, each ppa[] is pointer to
	 * a page array rather than just a page.
	 */
	blkcnt = howmany((offset + uio->uio_resid), bsize);
	blkwr = offset >> xm->xm_bshift;	/* write begins here */

	XMEMPRINTF(1, ("wrxmem: vp %p blkcnt %x blkwr %x xn_ppasz %lx\n",
	    (void *)vp, blkcnt, blkwr, xp->xn_ppasz));

	/* file size increase */
	if (xp->xn_ppasz < blkcnt) {

		page_t		***ppa;
		int		ppasz;
		uint_t		blksinfile = howmany(xp->xn_size, bsize);

		/*
		 * check if sufficient blocks available for the given offset.
		 */
		if (blkcnt - blksinfile > xm->xm_max - xm->xm_mem)
			return (ENOSPC);

		/*
		 * to prevent reallocating every time the file grows by a
		 * single block, double the size of the array.
		 */
		if (blkcnt < xp->xn_ppasz * 2)
			ppasz = xp->xn_ppasz * 2;
		else
			ppasz = blkcnt;


		ppa = kmem_zalloc(ppasz * sizeof (page_t **), KM_SLEEP);

		ASSERT(ppa);

		if (xp->xn_ppasz) {
			bcopy(xp->xn_ppa, ppa, blksinfile * sizeof (*ppa));
			kmem_free(xp->xn_ppa, xp->xn_ppasz * sizeof (*ppa));
		}
		xp->xn_ppa = ppa;
		xp->xn_ppasz = ppasz;

		/*
		 * fill in the 'hole' if write offset beyond file size. This
		 * helps in creating large files quickly; an application can
		 * lseek to a large offset and perform a single write
		 * operation to create the large file.
		 */

		if (blksinfile < blkwr) {

			old_xn_size = xp->xn_size;
			xp->xn_size = (offset_t)blkwr * bsize;

			XMEMPRINTF(4, ("wrxmem: fill vp %p blks %x to %x\n",
			    (void *)vp, blksinfile, blkcnt - 1));
			error = xmem_fillpages(xp, vp,
				(offset_t)blksinfile * bsize,
				(offset_t)(blkcnt - blksinfile) * bsize, 1);
			if (error) {
				/* truncate file back to original size */
				(void) xmemnode_trunc(xm, xp, old_xn_size);
				return (error);
			}
			/*
			 * if error on blkwr, this allows truncation of the
			 * filled hole.
			 */
			xp->xn_size = old_xn_size;
		}
	}

	do {
		offset_t	pagestart, pageend;
		page_t		**ppp;

		blockoffset = (uint_t)offset & (bsize - 1);
		/*
		 * A maximum of xm->xm_bsize bytes of data is transferred
		 * each pass through this loop
		 */
		bytes = MIN(bsize - blockoffset, uio->uio_resid);

		ASSERT(bytes);

		if (offset + bytes >= limit) {
			if (offset >= limit) {
				error = EFBIG;
				goto out;
			}
			bytes = limit - offset;
		}


		if (!xp->xn_ppa[blkwr]) {
			/* zero fill new pages - simplify partial updates */
			error = xmem_fillpages(xp, vp, offset, bytes, 1);
			if (error)
				return (error);
		}

		/* grow the file to the new length */
		if (offset + bytes > xp->xn_size) {
			xn_size_changed = 1;
			old_xn_size = xp->xn_size;
			xp->xn_size = offset + bytes;
		}

#ifdef LOCKNEST
		xmem_getpage();
#endif

		/* xn_ppa[] is a page_t * if ppb == 1 */
		if (xm->xm_ppb == 1)
			ppp = (page_t **)&xp->xn_ppa[blkwr];
		else
			ppp = &xp->xn_ppa[blkwr][btop(blockoffset)];

		pagestart = offset & ~(offset_t)(PAGESIZE - 1);
		/*
		 * subtract 1 in case (offset + bytes) is mod PAGESIZE
		 * so that pageend is the actual index of last page.
		 */
		pageend = (offset + bytes - 1) & ~(offset_t)(PAGESIZE - 1);

		base = segxmem_getmap(xm->xm_map, vp,
			pagestart, pageend - pagestart + PAGESIZE,
			ppp, S_WRITE);

		rw_exit(&xp->xn_contents);

		error = uiomove(base + (offset - pagestart), bytes,
							UIO_WRITE, uio);
		segxmem_release(xm->xm_map, base,
				pageend - pagestart + PAGESIZE);

		/*
		 * Re-acquire contents lock.
		 */
		rw_enter(&xp->xn_contents, RW_WRITER);
		/*
		 * If the uiomove failed, fix up xn_size.
		 */
		if (error) {
			if (xn_size_changed) {
				/*
				 * The uiomove failed, and we
				 * allocated blocks,so get rid
				 * of them.
				 */
				(void) xmemnode_trunc(xm, xp, old_xn_size);
			}
		} else {
			if ((xp->xn_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) &&
			    (xp->xn_mode & (S_ISUID | S_ISGID)) &&
			    secpolicy_vnode_setid_retain(cr,
			    (xp->xn_mode & S_ISUID) != 0 && xp->xn_uid == 0)
				!= 0) {

				/*
				 * Clear Set-UID & Set-GID bits on
				 * successful write if not privileged
				 * and at least one of the execute bits
				 * is set.  If we always clear Set-GID,
				 * mandatory file and record locking is
				 * unuseable.
				 */
				xp->xn_mode &= ~(S_ISUID | S_ISGID);
			}
			gethrestime(&now);
			xp->xn_mtime = now;
			xp->xn_ctime = now;
		}
		offset = uio->uio_loffset;	/* uiomove sets uio_loffset */
		blkwr++;
	} while (error == 0 && uio->uio_resid > 0 && bytes != 0);

out:
	/*
	 * If we've already done a partial-write, terminate
	 * the write but return no error.
	 */
	if (oresid != uio->uio_resid)
		error = 0;
	return (error);
}