コード例 #1
0
/*
 * Perform chown operation on inode ip;
 * inode must be locked prior to call.
 */
static int
ulfs_chown(struct vnode *vp, uid_t uid, gid_t gid, kauth_cred_t cred,
    	struct lwp *l)
{
	struct inode	*ip;
	int		error = 0;
#if defined(LFS_QUOTA) || defined(LFS_QUOTA2)
	uid_t		ouid;
	gid_t		ogid;
	int64_t		change;
#endif
	ip = VTOI(vp);
	error = 0;

	if (uid == (uid_t)VNOVAL)
		uid = ip->i_uid;
	if (gid == (gid_t)VNOVAL)
		gid = ip->i_gid;

	error = kauth_authorize_vnode(cred, KAUTH_VNODE_CHANGE_OWNERSHIP, vp,
	    NULL, genfs_can_chown(cred, ip->i_uid, ip->i_gid, uid, gid));
	if (error)
		return (error);

	fstrans_start(vp->v_mount, FSTRANS_SHARED);
#if defined(LFS_QUOTA) || defined(LFS_QUOTA2)
	ogid = ip->i_gid;
	ouid = ip->i_uid;
	change = DIP(ip, blocks);
	(void) lfs_chkdq(ip, -change, cred, 0);
	(void) lfs_chkiq(ip, -1, cred, 0);
#endif
	ip->i_gid = gid;
	DIP_ASSIGN(ip, gid, gid);
	ip->i_uid = uid;
	DIP_ASSIGN(ip, uid, uid);
#if defined(LFS_QUOTA) || defined(LFS_QUOTA2)
	if ((error = lfs_chkdq(ip, change, cred, 0)) == 0) {
		if ((error = lfs_chkiq(ip, 1, cred, 0)) == 0)
			goto good;
		else
			(void) lfs_chkdq(ip, -change, cred, FORCE);
	}
	ip->i_gid = ogid;
	DIP_ASSIGN(ip, gid, ogid);
	ip->i_uid = ouid;
	DIP_ASSIGN(ip, uid, ouid);
	(void) lfs_chkdq(ip, change, cred, FORCE);
	(void) lfs_chkiq(ip, 1, cred, FORCE);
	fstrans_done(vp->v_mount);
	return (error);
 good:
#endif /* LFS_QUOTA || LFS_QUOTA2 */
	ip->i_flag |= IN_CHANGE;
	fstrans_done(vp->v_mount);
	return (0);
}
コード例 #2
0
static int
ulfs_check_possible(struct vnode *vp, struct inode *ip, mode_t mode,
    kauth_cred_t cred)
{
#if defined(LFS_QUOTA) || defined(LFS_QUOTA2)
	int error;
#endif

	/*
	 * Disallow write attempts on read-only file systems;
	 * unless the file is a socket, fifo, or a block or
	 * character device resident on the file system.
	 */
	if (mode & VWRITE) {
		switch (vp->v_type) {
		case VDIR:
		case VLNK:
		case VREG:
			if (vp->v_mount->mnt_flag & MNT_RDONLY)
				return (EROFS);
#if defined(LFS_QUOTA) || defined(LFS_QUOTA2)
			fstrans_start(vp->v_mount, FSTRANS_SHARED);
			error = lfs_chkdq(ip, 0, cred, 0);
			fstrans_done(vp->v_mount);
			if (error != 0)
				return error;
#endif
			break;
		case VBAD:
		case VBLK:
		case VCHR:
		case VSOCK:
		case VFIFO:
		case VNON:
		default:
			break;
		}
	}

	/* If it is a snapshot, nobody gets access to it. */
	if ((ip->i_flags & SF_SNAPSHOT))
		return (EPERM);
	/* If immutable bit set, nobody gets to write it. */
	if ((mode & VWRITE) && (ip->i_flags & IMMUTABLE))
		return (EPERM);

	return 0;
}
コード例 #3
0
/* VOP_BWRITE 1 time */
int
lfs_fragextend(struct vnode *vp, int osize, int nsize, daddr_t lbn, struct buf **bpp,
    kauth_cred_t cred)
{
	struct inode *ip;
	struct lfs *fs;
	long frags;
	int error;
	extern long locked_queue_bytes;
	size_t obufsize;

	ip = VTOI(vp);
	fs = ip->i_lfs;
	frags = (long)lfs_numfrags(fs, nsize - osize);
	error = 0;

	ASSERT_NO_SEGLOCK(fs);

	/*
	 * Get the seglock so we don't enlarge blocks while a segment
	 * is being written.  If we're called with bpp==NULL, though,
	 * we are only pretending to change a buffer, so we don't have to
	 * lock.
	 */
    top:
	if (bpp) {
		rw_enter(&fs->lfs_fraglock, RW_READER);
		LFS_DEBUG_COUNTLOCKED("frag");
	}

	if (!ISSPACE(fs, frags, cred)) {
		error = ENOSPC;
		goto out;
	}

	/*
	 * If we are not asked to actually return the block, all we need
	 * to do is allocate space for it.  UBC will handle dirtying the
	 * appropriate things and making sure it all goes to disk.
	 * Don't bother to read in that case.
	 */
	if (bpp && (error = bread(vp, lbn, osize, 0, bpp))) {
		goto out;
	}
#if defined(LFS_QUOTA) || defined(LFS_QUOTA2)
	if ((error = lfs_chkdq(ip, frags, cred, 0))) {
		if (bpp)
			brelse(*bpp, 0);
		goto out;
	}
#endif
	/*
	 * Adjust accounting for lfs_avail.  If there's not enough room,
	 * we will have to wait for the cleaner, which we can't do while
	 * holding a block busy or while holding the seglock.  In that case,
	 * release both and start over after waiting.
	 */

	if (bpp && ((*bpp)->b_oflags & BO_DELWRI)) {
		if (!lfs_fits(fs, frags)) {
			if (bpp)
				brelse(*bpp, 0);
#if defined(LFS_QUOTA) || defined(LFS_QUOTA2)
			lfs_chkdq(ip, -frags, cred, 0);
#endif
			rw_exit(&fs->lfs_fraglock);
			lfs_availwait(fs, frags);
			goto top;
		}
		lfs_sb_subavail(fs, frags);
	}

	mutex_enter(&lfs_lock);
	lfs_sb_subbfree(fs, frags);
	mutex_exit(&lfs_lock);
	ip->i_lfs_effnblks += frags;
	ip->i_flag |= IN_CHANGE | IN_UPDATE;

	if (bpp) {
		obufsize = (*bpp)->b_bufsize;
		allocbuf(*bpp, nsize, 1);

		/* Adjust locked-list accounting */
		if (((*bpp)->b_flags & B_LOCKED) != 0 &&
		    (*bpp)->b_iodone == NULL) {
			mutex_enter(&lfs_lock);
			locked_queue_bytes += (*bpp)->b_bufsize - obufsize;
			mutex_exit(&lfs_lock);
		}

		memset((char *)((*bpp)->b_data) + osize, 0, (u_int)(nsize - osize));
	}

    out:
	if (bpp) {
		rw_exit(&fs->lfs_fraglock);
	}
	return (error);
}
コード例 #4
0
ファイル: lfs_inode.c プロジェクト: marklee77/frankenlibc
int
lfs_truncate(struct vnode *ovp, off_t length, int ioflag, kauth_cred_t cred)
{
	daddr_t lastblock;
	struct inode *oip = VTOI(ovp);
	daddr_t bn, lbn, lastiblock[ULFS_NIADDR], indir_lbn[ULFS_NIADDR];
	/* XXX ondisk32 */
	int32_t newblks[ULFS_NDADDR + ULFS_NIADDR];
	struct lfs *fs;
	struct buf *bp;
	int offset, size, level;
	daddr_t count, rcount;
	daddr_t blocksreleased = 0, real_released = 0;
	int i, nblocks;
	int aflags, error, allerror = 0;
	off_t osize;
	long lastseg;
	size_t bc;
	int obufsize, odb;
	int usepc;

	if (ovp->v_type == VCHR || ovp->v_type == VBLK ||
	    ovp->v_type == VFIFO || ovp->v_type == VSOCK) {
		KASSERT(oip->i_size == 0);
		return 0;
	}

	if (length < 0)
		return (EINVAL);

	/*
	 * Just return and not update modification times.
	 */
	if (oip->i_size == length) {
		/* still do a uvm_vnp_setsize() as writesize may be larger */
		uvm_vnp_setsize(ovp, length);
		return (0);
	}

	fs = oip->i_lfs;

	if (ovp->v_type == VLNK &&
	    (oip->i_size < fs->um_maxsymlinklen ||
	     (fs->um_maxsymlinklen == 0 &&
	      oip->i_ffs1_blocks == 0))) {
#ifdef DIAGNOSTIC
		if (length != 0)
			panic("lfs_truncate: partial truncate of symlink");
#endif
		memset((char *)SHORTLINK(oip), 0, (u_int)oip->i_size);
		oip->i_size = oip->i_ffs1_size = 0;
		oip->i_flag |= IN_CHANGE | IN_UPDATE;
		return (lfs_update(ovp, NULL, NULL, 0));
	}
	if (oip->i_size == length) {
		oip->i_flag |= IN_CHANGE | IN_UPDATE;
		return (lfs_update(ovp, NULL, NULL, 0));
	}
	lfs_imtime(fs);
	osize = oip->i_size;
	usepc = (ovp->v_type == VREG && ovp != fs->lfs_ivnode);

	ASSERT_NO_SEGLOCK(fs);
	/*
	 * Lengthen the size of the file. We must ensure that the
	 * last byte of the file is allocated. Since the smallest
	 * value of osize is 0, length will be at least 1.
	 */
	if (osize < length) {
		if (length > fs->um_maxfilesize)
			return (EFBIG);
		aflags = B_CLRBUF;
		if (ioflag & IO_SYNC)
			aflags |= B_SYNC;
		if (usepc) {
			if (lfs_lblkno(fs, osize) < ULFS_NDADDR &&
			    lfs_lblkno(fs, osize) != lfs_lblkno(fs, length) &&
			    lfs_blkroundup(fs, osize) != osize) {
				off_t eob;

				eob = lfs_blkroundup(fs, osize);
				uvm_vnp_setwritesize(ovp, eob);
				error = ulfs_balloc_range(ovp, osize,
				    eob - osize, cred, aflags);
				if (error) {
					(void) lfs_truncate(ovp, osize,
						    ioflag & IO_SYNC, cred);
					return error;
				}
				if (ioflag & IO_SYNC) {
					mutex_enter(ovp->v_interlock);
					VOP_PUTPAGES(ovp,
					    trunc_page(osize & lfs_sb_getbmask(fs)),
					    round_page(eob),
					    PGO_CLEANIT | PGO_SYNCIO);
				}
			}
			uvm_vnp_setwritesize(ovp, length);
			error = ulfs_balloc_range(ovp, length - 1, 1, cred,
						 aflags);
			if (error) {
				(void) lfs_truncate(ovp, osize,
						    ioflag & IO_SYNC, cred);
				return error;
			}
			uvm_vnp_setsize(ovp, length);
			oip->i_flag |= IN_CHANGE | IN_UPDATE;
			KASSERT(ovp->v_size == oip->i_size);
			oip->i_lfs_hiblk = lfs_lblkno(fs, oip->i_size + lfs_sb_getbsize(fs) - 1) - 1;
			return (lfs_update(ovp, NULL, NULL, 0));
		} else {
			error = lfs_reserve(fs, ovp, NULL,
			    lfs_btofsb(fs, (ULFS_NIADDR + 2) << lfs_sb_getbshift(fs)));
			if (error)
				return (error);
			error = lfs_balloc(ovp, length - 1, 1, cred,
					   aflags, &bp);
			lfs_reserve(fs, ovp, NULL,
			    -lfs_btofsb(fs, (ULFS_NIADDR + 2) << lfs_sb_getbshift(fs)));
			if (error)
				return (error);
			oip->i_ffs1_size = oip->i_size = length;
			uvm_vnp_setsize(ovp, length);
			(void) VOP_BWRITE(bp->b_vp, bp);
			oip->i_flag |= IN_CHANGE | IN_UPDATE;
			oip->i_lfs_hiblk = lfs_lblkno(fs, oip->i_size + lfs_sb_getbsize(fs) - 1) - 1;
			return (lfs_update(ovp, NULL, NULL, 0));
		}
	}

	if ((error = lfs_reserve(fs, ovp, NULL,
	    lfs_btofsb(fs, (2 * ULFS_NIADDR + 3) << lfs_sb_getbshift(fs)))) != 0)
		return (error);

	/*
	 * Shorten the size of the file. If the file is not being
	 * truncated to a block boundary, the contents of the
	 * partial block following the end of the file must be
	 * zero'ed in case it ever becomes accessible again because
	 * of subsequent file growth. Directories however are not
	 * zero'ed as they should grow back initialized to empty.
	 */
	offset = lfs_blkoff(fs, length);
	lastseg = -1;
	bc = 0;

	if (ovp != fs->lfs_ivnode)
		lfs_seglock(fs, SEGM_PROT);
	if (offset == 0) {
		oip->i_size = oip->i_ffs1_size = length;
	} else if (!usepc) {
		lbn = lfs_lblkno(fs, length);
		aflags = B_CLRBUF;
		if (ioflag & IO_SYNC)
			aflags |= B_SYNC;
		error = lfs_balloc(ovp, length - 1, 1, cred, aflags, &bp);
		if (error) {
			lfs_reserve(fs, ovp, NULL,
			    -lfs_btofsb(fs, (2 * ULFS_NIADDR + 3) << lfs_sb_getbshift(fs)));
			goto errout;
		}
		obufsize = bp->b_bufsize;
		odb = lfs_btofsb(fs, bp->b_bcount);
		oip->i_size = oip->i_ffs1_size = length;
		size = lfs_blksize(fs, oip, lbn);
		if (ovp->v_type != VDIR)
			memset((char *)bp->b_data + offset, 0,
			       (u_int)(size - offset));
		allocbuf(bp, size, 1);
		if ((bp->b_flags & B_LOCKED) != 0 && bp->b_iodone == NULL) {
			mutex_enter(&lfs_lock);
			locked_queue_bytes -= obufsize - bp->b_bufsize;
			mutex_exit(&lfs_lock);
		}
		if (bp->b_oflags & BO_DELWRI) {
			lfs_sb_addavail(fs, odb - lfs_btofsb(fs, size));
			/* XXX shouldn't this wake up on lfs_availsleep? */
		}
		(void) VOP_BWRITE(bp->b_vp, bp);
	} else { /* vp->v_type == VREG && length < osize && offset != 0 */
		/*
		 * When truncating a regular file down to a non-block-aligned
		 * size, we must zero the part of last block which is past
		 * the new EOF.  We must synchronously flush the zeroed pages
		 * to disk since the new pages will be invalidated as soon
		 * as we inform the VM system of the new, smaller size.
		 * We must do this before acquiring the GLOCK, since fetching
		 * the pages will acquire the GLOCK internally.
		 * So there is a window where another thread could see a whole
		 * zeroed page past EOF, but that's life.
		 */
		daddr_t xlbn;
		voff_t eoz;

		aflags = ioflag & IO_SYNC ? B_SYNC : 0;
		error = ulfs_balloc_range(ovp, length - 1, 1, cred, aflags);
		if (error) {
			lfs_reserve(fs, ovp, NULL,
				    -lfs_btofsb(fs, (2 * ULFS_NIADDR + 3) << lfs_sb_getbshift(fs)));
			goto errout;
		}
		xlbn = lfs_lblkno(fs, length);
		size = lfs_blksize(fs, oip, xlbn);
		eoz = MIN(lfs_lblktosize(fs, xlbn) + size, osize);
		ubc_zerorange(&ovp->v_uobj, length, eoz - length,
		    UBC_UNMAP_FLAG(ovp));
		if (round_page(eoz) > round_page(length)) {
			mutex_enter(ovp->v_interlock);
			error = VOP_PUTPAGES(ovp, round_page(length),
			    round_page(eoz),
			    PGO_CLEANIT | PGO_DEACTIVATE |
			    ((ioflag & IO_SYNC) ? PGO_SYNCIO : 0));
			if (error) {
				lfs_reserve(fs, ovp, NULL,
					    -lfs_btofsb(fs, (2 * ULFS_NIADDR + 3) << lfs_sb_getbshift(fs)));
				goto errout;
			}
		}
	}

	genfs_node_wrlock(ovp);

	oip->i_size = oip->i_ffs1_size = length;
	uvm_vnp_setsize(ovp, length);

	/*
	 * Calculate index into inode's block list of
	 * last direct and indirect blocks (if any)
	 * which we want to keep.  Lastblock is -1 when
	 * the file is truncated to 0.
	 */
	/* Avoid sign overflow - XXX assumes that off_t is a quad_t. */
	if (length > QUAD_MAX - lfs_sb_getbsize(fs))
		lastblock = lfs_lblkno(fs, QUAD_MAX - lfs_sb_getbsize(fs));
	else
		lastblock = lfs_lblkno(fs, length + lfs_sb_getbsize(fs) - 1) - 1;
	lastiblock[SINGLE] = lastblock - ULFS_NDADDR;
	lastiblock[DOUBLE] = lastiblock[SINGLE] - LFS_NINDIR(fs);
	lastiblock[TRIPLE] = lastiblock[DOUBLE] - LFS_NINDIR(fs) * LFS_NINDIR(fs);
	nblocks = lfs_btofsb(fs, lfs_sb_getbsize(fs));
	/*
	 * Record changed file and block pointers before we start
	 * freeing blocks.  lastiblock values are also normalized to -1
	 * for calls to lfs_indirtrunc below.
	 */
	memcpy((void *)newblks, (void *)&oip->i_ffs1_db[0], sizeof newblks);
	for (level = TRIPLE; level >= SINGLE; level--)
		if (lastiblock[level] < 0) {
			newblks[ULFS_NDADDR+level] = 0;
			lastiblock[level] = -1;
		}
	for (i = ULFS_NDADDR - 1; i > lastblock; i--)
		newblks[i] = 0;

	oip->i_size = oip->i_ffs1_size = osize;
	error = lfs_vtruncbuf(ovp, lastblock + 1, false, 0);
	if (error && !allerror)
		allerror = error;

	/*
	 * Indirect blocks first.
	 */
	indir_lbn[SINGLE] = -ULFS_NDADDR;
	indir_lbn[DOUBLE] = indir_lbn[SINGLE] - LFS_NINDIR(fs) - 1;
	indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - LFS_NINDIR(fs) * LFS_NINDIR(fs) - 1;
	for (level = TRIPLE; level >= SINGLE; level--) {
		bn = oip->i_ffs1_ib[level];
		if (bn != 0) {
			error = lfs_indirtrunc(oip, indir_lbn[level],
					       bn, lastiblock[level],
					       level, &count, &rcount,
					       &lastseg, &bc);
			if (error)
				allerror = error;
			real_released += rcount;
			blocksreleased += count;
			if (lastiblock[level] < 0) {
				if (oip->i_ffs1_ib[level] > 0)
					real_released += nblocks;
				blocksreleased += nblocks;
				oip->i_ffs1_ib[level] = 0;
				lfs_blkfree(fs, oip, bn, lfs_sb_getbsize(fs),
					    &lastseg, &bc);
        			lfs_deregister_block(ovp, bn);
			}
		}
		if (lastiblock[level] >= 0)
			goto done;
	}

	/*
	 * All whole direct blocks or frags.
	 */
	for (i = ULFS_NDADDR - 1; i > lastblock; i--) {
		long bsize, obsize;

		bn = oip->i_ffs1_db[i];
		if (bn == 0)
			continue;
		bsize = lfs_blksize(fs, oip, i);
		if (oip->i_ffs1_db[i] > 0) {
			/* Check for fragment size changes */
			obsize = oip->i_lfs_fragsize[i];
			real_released += lfs_btofsb(fs, obsize);
			oip->i_lfs_fragsize[i] = 0;
		} else
			obsize = 0;
		blocksreleased += lfs_btofsb(fs, bsize);
		oip->i_ffs1_db[i] = 0;
		lfs_blkfree(fs, oip, bn, obsize, &lastseg, &bc);
        	lfs_deregister_block(ovp, bn);
	}
	if (lastblock < 0)
		goto done;

	/*
	 * Finally, look for a change in size of the
	 * last direct block; release any frags.
	 */
	bn = oip->i_ffs1_db[lastblock];
	if (bn != 0) {
		long oldspace, newspace;
#if 0
		long olddspace;
#endif

		/*
		 * Calculate amount of space we're giving
		 * back as old block size minus new block size.
		 */
		oldspace = lfs_blksize(fs, oip, lastblock);
#if 0
		olddspace = oip->i_lfs_fragsize[lastblock];
#endif

		oip->i_size = oip->i_ffs1_size = length;
		newspace = lfs_blksize(fs, oip, lastblock);
		if (newspace == 0)
			panic("itrunc: newspace");
		if (oldspace - newspace > 0) {
			blocksreleased += lfs_btofsb(fs, oldspace - newspace);
		}
#if 0
		if (bn > 0 && olddspace - newspace > 0) {
			/* No segment accounting here, just vnode */
			real_released += lfs_btofsb(fs, olddspace - newspace);
		}
#endif
	}

done:
	/* Finish segment accounting corrections */
	lfs_update_seguse(fs, oip, lastseg, bc);
#ifdef DIAGNOSTIC
	for (level = SINGLE; level <= TRIPLE; level++)
		if ((newblks[ULFS_NDADDR + level] == 0) !=
		    ((oip->i_ffs1_ib[level]) == 0)) {
			panic("lfs itrunc1");
		}
	for (i = 0; i < ULFS_NDADDR; i++)
		if ((newblks[i] == 0) != (oip->i_ffs1_db[i] == 0)) {
			panic("lfs itrunc2");
		}
	if (length == 0 &&
	    (!LIST_EMPTY(&ovp->v_cleanblkhd) || !LIST_EMPTY(&ovp->v_dirtyblkhd)))
		panic("lfs itrunc3");
#endif /* DIAGNOSTIC */
	/*
	 * Put back the real size.
	 */
	oip->i_size = oip->i_ffs1_size = length;
	oip->i_lfs_effnblks -= blocksreleased;
	oip->i_ffs1_blocks -= real_released;
	mutex_enter(&lfs_lock);
	lfs_sb_addbfree(fs, blocksreleased);
	mutex_exit(&lfs_lock);
#ifdef DIAGNOSTIC
	if (oip->i_size == 0 &&
	    (oip->i_ffs1_blocks != 0 || oip->i_lfs_effnblks != 0)) {
		printf("lfs_truncate: truncate to 0 but %d blks/%jd effblks\n",
		       oip->i_ffs1_blocks, (intmax_t)oip->i_lfs_effnblks);
		panic("lfs_truncate: persistent blocks");
	}
#endif

	/*
	 * If we truncated to zero, take us off the paging queue.
	 */
	mutex_enter(&lfs_lock);
	if (oip->i_size == 0 && oip->i_flags & IN_PAGING) {
		oip->i_flags &= ~IN_PAGING;
		TAILQ_REMOVE(&fs->lfs_pchainhd, oip, i_lfs_pchain);
	}
	mutex_exit(&lfs_lock);

	oip->i_flag |= IN_CHANGE;
#if defined(LFS_QUOTA) || defined(LFS_QUOTA2)
	(void) lfs_chkdq(oip, -blocksreleased, NOCRED, 0);
#endif
	lfs_reserve(fs, ovp, NULL,
	    -lfs_btofsb(fs, (2 * ULFS_NIADDR + 3) << lfs_sb_getbshift(fs)));
	genfs_node_unlock(ovp);
  errout:
	oip->i_lfs_hiblk = lfs_lblkno(fs, oip->i_size + lfs_sb_getbsize(fs) - 1) - 1;
	if (ovp != fs->lfs_ivnode)
		lfs_segunlock(fs);
	return (allerror ? allerror : error);
}