Example #1
0
/*
 * Rewrite an existing directory entry to point at the inode
 * supplied.  The parameters describing the directory entry are
 * set up by a call to namei.
 */
int
ufs_dirrewrite(struct inode *dp, struct inode *oip, ufsino_t newinum,
    int newtype, int isrmdir)
{
	struct buf *bp;
	struct direct *ep;
	struct vnode *vdp = ITOV(dp);
	int error;

	error = UFS_BUFATOFF(dp, (off_t)dp->i_offset, (char **)&ep, &bp);
	if (error)
		return (error);
	ep->d_ino = newinum;
	if (vdp->v_mount->mnt_maxsymlinklen > 0)
 		ep->d_type = newtype;
 	oip->i_effnlink--;
 	if (DOINGSOFTDEP(vdp)) {
		softdep_change_linkcnt(oip, 0);
 		softdep_setup_directory_change(bp, dp, oip, newinum, isrmdir);
 		bdwrite(bp);
 	} else {
		DIP_ADD(oip, nlink, -1);
		oip->i_flag |= IN_CHANGE;
		UFS_WAPBL_UPDATE(oip, MNT_WAIT);
		if (DOINGASYNC(vdp)) {
			bdwrite(bp);
			error = 0;
		} else {
			error = VOP_BWRITE(bp);
		}
 	}
	dp->i_flag |= IN_CHANGE | IN_UPDATE;
	UFS_WAPBL_UPDATE(dp, MNT_WAIT);
	return (error);
}
Example #2
0
/*
 * Update the access, modified, and inode change times as specified by the
 * IN_ACCESS, IN_UPDATE, and IN_CHANGE flags respectively.  Write the inode
 * to disk if the IN_MODIFIED flag is set (it may be set initially, or by
 * the timestamp update).  The IN_LAZYMOD flag is set to force a write
 * later if not now.  If we write now, then clear both IN_MODIFIED and
 * IN_LAZYMOD to reflect the presumably successful write, and if waitfor is
 * set, then wait for the write to complete.
 */
int
ext2_update(struct vnode *vp, int waitfor)
{
	struct m_ext2fs *fs;
	struct buf *bp;
	struct inode *ip;
	int error;

	ASSERT_VOP_ELOCKED(vp, "ext2_update");
	ext2_itimes(vp);
	ip = VTOI(vp);
	if ((ip->i_flag & IN_MODIFIED) == 0 && waitfor == 0)
		return (0);
	ip->i_flag &= ~(IN_LAZYACCESS | IN_LAZYMOD | IN_MODIFIED);
	fs = ip->i_e2fs;
	if(fs->e2fs_ronly)
		return (0);
	if ((error = bread(ip->i_devvp,
	    fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
		(int)fs->e2fs_bsize, NOCRED, &bp)) != 0) {
		brelse(bp);
		return (error);
	}
	ext2_i2ei(ip, (struct ext2fs_dinode *)((char *)bp->b_data +
	    EXT2_INODE_SIZE(fs) * ino_to_fsbo(fs, ip->i_number)));
	if (waitfor && !DOINGASYNC(vp))
		return (bwrite(bp));
	else {
		bdwrite(bp);
		return (0);
	}
}
Example #3
0
File: devio.c Project: 8l/FUZIX
bufptr freebuf(void)
{
	bufptr bp;
	bufptr oldest;
	int16_t oldtime;

	/* Try to find a non-busy buffer and write out the data if it is dirty */
	oldest = NULL;
	oldtime = 0;
	for (bp = bufpool; bp < bufpool_end; ++bp) {
		if (bufclock - bp->bf_time >= oldtime && bp->bf_busy == BF_FREE) {
			oldest = bp;
			oldtime = bufclock - bp->bf_time;
		}
	}
	if (!oldest)
		panic(PANIC_NOFREEB);

	if (oldest->bf_dirty) {
		if (bdwrite(oldest) == -1)
			udata.u_error = EIO;
		oldest->bf_dirty = false;
	}
	return oldest;
}
Example #4
0
File: devio.c Project: 8l/FUZIX
static void bdput(bufptr bp)
{
	bdwrite(bp);
	if (bp->bf_busy == BF_FREE)
		bp->bf_dirty = false;
	d_flush(bp->bf_dev);
}
Example #5
0
bufptr freebuf(void)
{
	register bufptr bp;
	register bufptr oldest;
	register int oldtime;

	/* Try to find a non-busy buffer and write out the data if it is dirty */
	oldest = NULL;
	oldtime = 0;
	for (bp = bufpool; bp < bufpool + NBUFS; ++bp) {
		if (bufclock - bp->bf_time >= oldtime && !bp->bf_busy) {
			oldest = bp;
			oldtime = bufclock - bp->bf_time;
		}
	}
	ifnot(oldest)
	    panic("no free buffers");

	if (oldest->bf_dirty) {
		if (bdwrite(oldest) == -1)
			udata.u_error = EIO;
		oldest->bf_dirty = 0;
	}
	return (oldest);
}
Example #6
0
int
deupdat(struct denode *dep, int waitfor)
{
	struct buf *bp;
	struct direntry *dirp;
	int error;
	struct timespec ts;

	if (DETOV(dep)->v_mount->mnt_flag & MNT_RDONLY)
		return (0);
	getnanotime(&ts);
	DETIMES(dep, &ts, &ts, &ts);
	if ((dep->de_flag & DE_MODIFIED) == 0)
		return (0);
	dep->de_flag &= ~DE_MODIFIED;
	if (dep->de_Attributes & ATTR_DIRECTORY)
		return (0);
	if (dep->de_refcnt <= 0)
		return (0);
	error = readde(dep, &bp, &dirp);
	if (error)
		return (error);
	DE_EXTERNALIZE(dirp, dep);
	if (waitfor)
		return (bwrite(bp));
	else {
		bdwrite(bp);
		return (0);
	}
}
Example #7
0
bufptr freebuf(void)
{
	regptr bufptr bp;
	bufptr oldest;
	int16_t oldtime;

	/* Try to find a non-busy buffer and write out the data if it is dirty */
	oldest = NULL;
	oldtime = 0;
	for (bp = bufpool; bp < bufpool_end; ++bp) {
		if (bufclock - bp->bf_time >= oldtime && !bisbusy(bp)) {
			oldest = bp;
			oldtime = bufclock - bp->bf_time;
		}
	}
	if (!oldest)
		panic(PANIC_NOFREEB);

	block(oldest);
	if (oldest->bf_dirty) {
		bdwrite(oldest);
		oldest->bf_dirty = false;
	}
	return oldest;
}
Example #8
0
/*
 * This function increments the inode version number
 *
 * This may be used one day by the NFS server
 */
static void
inc_inode_version(struct inode *inode, struct ext2_group_desc *gdp, int mode)
{
	unsigned long inode_block;
	struct buf *bh;
	struct ext2_inode *raw_inode;

	inode_block = gdp->bg_inode_table + (((inode->i_number - 1) %
			EXT2_INODES_PER_GROUP(inode->i_sb)) /
			EXT2_INODES_PER_BLOCK(inode->i_sb));
	bh = bread (inode->i_sb->s_dev, dbtob(inode_block), inode->i_sb->s_blocksize);
	if (!bh) {
		kprintf ("inc_inode_version Cannot load inode table block - "
			    "inode=%lu, inode_block=%lu\n",
			    inode->i_number, inode_block);
		inode->u.ext2_i.i_version = 1;
		return;
	}
	raw_inode = ((struct ext2_inode *) bh->b_data) +
			(((inode->i_number - 1) %
			EXT2_INODES_PER_GROUP(inode->i_sb)) %
			EXT2_INODES_PER_BLOCK(inode->i_sb));
	raw_inode->i_version++;
	inode->u.ext2_i.i_version = raw_inode->i_version;
	bdwrite (bh);
}
Example #9
0
/*
 * Determine whether an inode can be allocated.
 *
 * Check to see if an inode is available, and if it is,
 * allocate it using the following policy:
 *   1) allocate the requested inode.
 *   2) allocate the next available inode after the requested
 *	  inode in the specified cylinder group.
 */
static daddr_t
ext2fs_nodealloccg(struct inode *ip, int cg, daddr_t ipref, int mode)
{
	struct m_ext2fs *fs;
	char *ibp;
	struct buf *bp;
	int error, start, len, loc, map, i;

	ipref--; /* to avoid a lot of (ipref -1) */
	if (ipref == -1)
		ipref = 0;
	fs = ip->i_e2fs;
	if (fs->e2fs_gd[cg].ext2bgd_nifree == 0)
		return (0);
	error = bread(ip->i_devvp, fsbtodb(fs,
		fs->e2fs_gd[cg].ext2bgd_i_bitmap),
		(int)fs->e2fs_bsize, NOCRED, B_MODIFY, &bp);
	if (error) {
		brelse(bp, 0);
		return (0);
	}
	ibp = (char *)bp->b_data;
	if (ipref) {
		ipref %= fs->e2fs.e2fs_ipg;
		if (isclr(ibp, ipref))
			goto gotit;
	}
	start = ipref / NBBY;
	len = howmany(fs->e2fs.e2fs_ipg - ipref, NBBY);
	loc = skpc(0xff, len, &ibp[start]);
	if (loc == 0) {
		len = start + 1;
		start = 0;
		loc = skpc(0xff, len, &ibp[0]);
		if (loc == 0) {
			printf("cg = %d, ipref = %lld, fs = %s\n",
				cg, (long long)ipref, fs->e2fs_fsmnt);
			panic("ext2fs_nodealloccg: map corrupted");
			/* NOTREACHED */
		}
	}
	i = start + len - loc;
	map = ibp[i] ^ 0xff;
	if (map == 0) {
		printf("fs = %s\n", fs->e2fs_fsmnt);
		panic("ext2fs_nodealloccg: block not in map");
	}
	ipref = i * NBBY + ffs(map) - 1;
gotit:
	setbit(ibp, ipref);
	fs->e2fs.e2fs_ficount--;
	fs->e2fs_gd[cg].ext2bgd_nifree--;
	fs->e2fs_fmod = 1;
	if ((mode & IFMT) == IFDIR) {
		fs->e2fs_gd[cg].ext2bgd_ndirs++;
	}
	bdwrite(bp);
	return (cg * fs->e2fs.e2fs_ipg + ipref +1);
}
Example #10
0
/*
 * Update the access, modified, and inode change times as specified by the
 * IN_ACCESS, IN_UPDATE, and IN_CHANGE flags respectively.  Write the inode
 * to disk if the IN_MODIFIED flag is set (it may be set initially, or by
 * the timestamp update).  The IN_LAZYMOD flag is set to force a write
 * later if not now.  If we write now, then clear both IN_MODIFIED and
 * IN_LAZYMOD to reflect the presumably successful write, and if waitfor is
 * set, then wait for the write to complete.
 */
int
ffs_update(struct vnode *vp, int waitfor)
{
	struct fs *fs;
	struct buf *bp;
	struct inode *ip;
	int error;

	ufs_itimes(vp);
	ip = VTOI(vp);
	if ((ip->i_flag & IN_MODIFIED) == 0 && waitfor == 0)
		return (0);
	ip->i_flag &= ~(IN_LAZYMOD | IN_MODIFIED);
	fs = ip->i_fs;
	if (fs->fs_ronly)
		return (0);

	/*
	 * The vnode type is usually set to VBAD if an unrecoverable I/O
	 * error has occured (such as when reading the inode).  Clear the
	 * modified bits but do not write anything out in this case.
	 */
	if (vp->v_type == VBAD)
		return (0);
	/*
	 * Ensure that uid and gid are correct. This is a temporary
	 * fix until fsck has been changed to do the update.
	 */
	if (fs->fs_inodefmt < FS_44INODEFMT) {		/* XXX */
		ip->i_din.di_ouid = ip->i_uid;		/* XXX */
		ip->i_din.di_ogid = ip->i_gid;		/* XXX */
	}						/* XXX */
	error = bread(ip->i_devvp, 
		      fsbtodoff(fs, ino_to_fsba(fs, ip->i_number)),
		      (int)fs->fs_bsize, &bp);
	if (error) {
		brelse(bp);
		return (error);
	}
	if (DOINGSOFTDEP(vp))
		softdep_update_inodeblock(ip, bp, waitfor);
	else if (ip->i_effnlink != ip->i_nlink)
		panic("ffs_update: bad link cnt");
	*((struct ufs1_dinode *)bp->b_data +
	    ino_to_fsbo(fs, ip->i_number)) = ip->i_din;
	if (waitfor && !DOINGASYNC(vp)) {
		return (bwrite(bp));
	} else if (vm_page_count_severe() || buf_dirty_count_severe()) {
		return (bwrite(bp));
	} else {
		if (bp->b_bufsize == fs->fs_bsize)
			bp->b_flags |= B_CLUSTEROK;
		bdwrite(bp);
		return (0);
	}
}
Example #11
0
/*
 * Change the number of unreferenced inodes.
 */
static int
ufs_gjournal_modref(struct vnode *vp, int count)
{
    struct cg *cgp;
    struct buf *bp;
    ufs2_daddr_t cgbno;
    int error, cg;
    struct cdev *dev;
    struct inode *ip;
    struct ufsmount *ump;
    struct fs *fs;
    struct vnode *devvp;
    ino_t ino;

    ip = VTOI(vp);
    ump = VFSTOUFS(vp->v_mount);
    fs = ump->um_fs;
    devvp = ump->um_devvp;
    ino = ip->i_number;

    cg = ino_to_cg(fs, ino);
    if (devvp->v_type == VREG) {
        /* devvp is a snapshot */
        dev = VFSTOUFS(devvp->v_mount)->um_devvp->v_rdev;
        cgbno = fragstoblks(fs, cgtod(fs, cg));
    } else if (devvp->v_type == VCHR) {
        /* devvp is a normal disk device */
        dev = devvp->v_rdev;
        cgbno = fsbtodb(fs, cgtod(fs, cg));
    } else {
        bp = NULL;
        return (EIO);
    }
    if ((u_int)ino >= fs->fs_ipg * fs->fs_ncg)
        panic("ufs_gjournal_modref: range: dev = %s, ino = %lu, fs = %s",
              devtoname(dev), (u_long)ino, fs->fs_fsmnt);
    if ((error = bread(devvp, cgbno, (int)fs->fs_cgsize, NOCRED, &bp))) {
        brelse(bp);
        return (error);
    }
    cgp = (struct cg *)bp->b_data;
    if (!cg_chkmagic(cgp)) {
        brelse(bp);
        return (0);
    }
    bp->b_xflags |= BX_BKGRDWRITE;
    cgp->cg_unrefs += count;
    UFS_LOCK(ump);
    fs->fs_unrefs += count;
    fs->fs_fmod = 1;
    ACTIVECLEAR(fs, cg);
    UFS_UNLOCK(ump);
    bdwrite(bp);
    return (0);
}
Example #12
0
/*
 * Write back a buffer doing the locking outselves. This is called when
 * we do a sync or when we get a media change and need to write back
 * data.
 *
 * FIXME: for the simple case I don't think we can ever get called within
 * an active I/O so the block/bunlock should be fine - but not needed. In
 * async mode they are
 */
static void bdput(regptr bufptr bp)
{
	block_s(bp);
	if (bp->bf_dirty) {
		bdwrite(bp);
		bp->bf_dirty = false;
		bunlock_s(bp);
		d_flush(bp->bf_dev);
	} else
		bunlock_s(bp);
}
Example #13
0
void bufsync(void)
{
	register bufptr bp;

	for (bp = bufpool; bp < bufpool + NBUFS; ++bp) {
		if (bp->bf_dev != -1 && bp->bf_dirty) {
			bdwrite(bp);
			if (!bp->bf_busy)
				bp->bf_dirty = 0;
		}
	}
}
Example #14
0
int bfree(bufptr bp, int dirty)
{
/*printf("Releasing block %d (%d)\n", bp->bf_blk, dirty);*/
	bp->bf_dirty |= dirty;
	bp->bf_busy = 0;

	if (dirty == 2) {	/* Extra dirty */
		if (bdwrite(bp) == -1)
			udata.u_error = EIO;
		bp->bf_dirty = 0;
		return (-1);
	}
	return (0);
}
Example #15
0
File: devio.c Project: 8l/FUZIX
int bfree(bufptr bp, uint8_t dirty)
{				/* dirty: 0=clean, 1=dirty (write back), 2=dirty+immediate write */
	if (dirty)
		bp->bf_dirty = true;
	
	if(bp->bf_busy == BF_BUSY) /* do not free BF_SUPERBLOCK */
		bp->bf_busy = BF_FREE;

	if (dirty > 1) {	/* immediate writeback */
		if (bdwrite(bp) == -1)
			udata.u_error = EIO;
		bp->bf_dirty = false;
		return -1;
	}
	return 0;
}
Example #16
0
/*
 * Free an inode.
 *
 */
int
ext2_vfree(struct vnode *pvp, ino_t ino, int mode)
{
    struct m_ext2fs *fs;
    struct inode *pip;
    struct buf *bp;
    struct ext2mount *ump;
    int error, cg;
    char * ibp;

    pip = VTOI(pvp);
    fs = pip->i_e2fs;
    ump = pip->i_ump;
    if ((u_int)ino > fs->e2fs_ipg * fs->e2fs_gcount)
        panic("ext2_vfree: range: devvp = %p, ino = %ju, fs = %s",
              pip->i_devvp, (uintmax_t)ino, fs->e2fs_fsmnt);

    cg = ino_to_cg(fs, ino);
    error = bread(pip->i_devvp,
                  fsbtodb(fs, fs->e2fs_gd[cg].ext2bgd_i_bitmap),
                  (int)fs->e2fs_bsize, NOCRED, &bp);
    if (error) {
        brelse(bp);
        return (0);
    }
    ibp = (char *)bp->b_data;
    ino = (ino - 1) % fs->e2fs->e2fs_ipg;
    if (isclr(ibp, ino)) {
        printf("ino = %llu, fs = %s\n",
               (unsigned long long)ino, fs->e2fs_fsmnt);
        if (fs->e2fs_ronly == 0)
            panic("ext2_vfree: freeing free inode");
    }
    clrbit(ibp, ino);
    EXT2_LOCK(ump);
    fs->e2fs->e2fs_ficount++;
    fs->e2fs_gd[cg].ext2bgd_nifree++;
    if ((mode & IFMT) == IFDIR) {
        fs->e2fs_gd[cg].ext2bgd_ndirs--;
        fs->e2fs_total_dir--;
    }
    fs->e2fs_fmod = 1;
    EXT2_UNLOCK(ump);
    bdwrite(bp);
    return (0);
}
Example #17
0
/*
 * Update the access, modified, and inode change times as specified by the
 * IACCESS, IUPDATE, and ICHANGE flags respectively. The IMODIFIED flag is
 * used to specify that the inode needs to be updated but that the times have
 * already been set. The access and modified times are taken from the second
 * and third parameters; the inode change time is always taken from the current
 * time. If waitfor is set, then wait for the disk write of the inode to
 * complete.
 */
int
ext2fs_update(struct inode *ip, int waitfor)
{
	struct m_ext2fs *fs;
	struct buf *bp;
	int error;
	caddr_t cp;

	if (ITOV(ip)->v_mount->mnt_flag & MNT_RDONLY)
		return (0);
	EXT2FS_ITIMES(ip);
	if ((ip->i_flag & IN_MODIFIED) == 0)
		return (0);
	ip->i_flag &= ~IN_MODIFIED;
	fs = ip->i_e2fs;
	error = bread(ip->i_devvp,
			  fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
			  (int)fs->e2fs_bsize, &bp);
	if (error) {
		brelse(bp);
		return (error);
	}
	ip->i_flag &= ~(IN_MODIFIED);
	cp = (caddr_t)bp->b_data +
	    (ino_to_fsbo(fs, ip->i_number) * EXT2_DINODE_SIZE(fs));

	/*
	 * See note about 16-bit UID/GID limitation in ext2fs_vget(). Now
	 * that we are about to write the inode, construct the split UID and
	 * GID fields out of the two 32-bit fields we kept in memory.
	 */
	ip->i_e2fs_uid_low = (u_int16_t)ip->i_e2fs_uid;
	ip->i_e2fs_gid_low = (u_int16_t)ip->i_e2fs_gid;
	ip->i_e2fs_uid_high = ip->i_e2fs_uid >> 16;
	ip->i_e2fs_gid_high = ip->i_e2fs_gid >> 16;

	e2fs_isave(fs, ip->i_e2din, (struct ext2fs_dinode *)cp);
	if (waitfor)
		return (bwrite(bp));
	else {
		bdwrite(bp);
		return (0);
	}
}
Example #18
0
int
deupdat(struct denode *dep, int waitfor)
{
	struct direntry dir;
	struct timespec ts;
	struct buf *bp;
	struct direntry *dirp;
	int error;

	if (DETOV(dep)->v_mount->mnt_flag & MNT_RDONLY) {
		dep->de_flag &= ~(DE_UPDATE | DE_CREATE | DE_ACCESS |
		    DE_MODIFIED);
		return (0);
	}
	getnanotime(&ts);
	DETIMES(dep, &ts, &ts, &ts);
	if ((dep->de_flag & DE_MODIFIED) == 0 && waitfor == 0)
		return (0);
	dep->de_flag &= ~DE_MODIFIED;
	if (DETOV(dep)->v_vflag & VV_ROOT)
		return (EINVAL);
	if (dep->de_refcnt <= 0)
		return (0);
	error = readde(dep, &bp, &dirp);
	if (error)
		return (error);
	DE_EXTERNALIZE(&dir, dep);
	if (bcmp(dirp, &dir, sizeof(dir)) == 0) {
		if (waitfor == 0 || (bp->b_flags & B_DELWRI) == 0) {
			brelse(bp);
			return (0);
		}
	} else
		*dirp = dir;
	if ((DETOV(dep)->v_mount->mnt_flag & MNT_NOCLUSTERW) == 0)
		bp->b_flags |= B_CLUSTEROK;
	if (waitfor)
		error = bwrite(bp);
	else if (vm_page_count_severe() || buf_dirty_count_severe())
		bawrite(bp);
	else
		bdwrite(bp);
	return (error);
}
Example #19
0
/*
 * Free an inode.
 *
 * The specified inode is placed back in the free map.
 */
int
ext2fs_vfree(struct vnode *pvp, ino_t ino, int mode)
{
	struct m_ext2fs *fs;
	char *ibp;
	struct inode *pip;
	struct buf *bp;
	int error, cg;

	pip = VTOI(pvp);
	fs = pip->i_e2fs;
	if ((u_int)ino > fs->e2fs.e2fs_icount || (u_int)ino < EXT2_FIRSTINO)
		panic("ifree: range: dev = 0x%llx, ino = %llu, fs = %s",
		    (unsigned long long)pip->i_dev, (unsigned long long)ino,
		    fs->e2fs_fsmnt);
	cg = ino_to_cg(fs, ino);
	error = bread(pip->i_devvp,
		fsbtodb(fs, fs->e2fs_gd[cg].ext2bgd_i_bitmap),
		(int)fs->e2fs_bsize, NOCRED, B_MODIFY, &bp);
	if (error) {
		brelse(bp, 0);
		return (0);
	}
	ibp = (char *)bp->b_data;
	ino = (ino - 1) % fs->e2fs.e2fs_ipg;
	if (isclr(ibp, ino)) {
		printf("dev = 0x%llx, ino = %llu, fs = %s\n",
		    (unsigned long long)pip->i_dev,
		    (unsigned long long)ino, fs->e2fs_fsmnt);
		if (fs->e2fs_ronly == 0)
			panic("ifree: freeing free inode");
	}
	clrbit(ibp, ino);
	fs->e2fs.e2fs_ficount++;
	fs->e2fs_gd[cg].ext2bgd_nifree++;
	if ((mode & IFMT) == IFDIR) {
		fs->e2fs_gd[cg].ext2bgd_ndirs--;
	}
	fs->e2fs_fmod = 1;
	bdwrite(bp);
	return (0);
}
Example #20
0
/*
 * Free a block or fragment.
 *
 */
void
ext2_blkfree(struct inode *ip, e4fs_daddr_t bno, long size)
{
    struct m_ext2fs *fs;
    struct buf *bp;
    struct ext2mount *ump;
    int cg, error;
    char *bbp;

    fs = ip->i_e2fs;
    ump = ip->i_ump;
    cg = dtog(fs, bno);
    if ((u_int)bno >= fs->e2fs->e2fs_bcount) {
        printf("bad block %lld, ino %llu\n", (long long)bno,
               (unsigned long long)ip->i_number);
        ext2_fserr(fs, ip->i_uid, "bad block");
        return;
    }
    error = bread(ip->i_devvp,
                  fsbtodb(fs, fs->e2fs_gd[cg].ext2bgd_b_bitmap),
                  (int)fs->e2fs_bsize, NOCRED, &bp);
    if (error) {
        brelse(bp);
        return;
    }
    bbp = (char *)bp->b_data;
    bno = dtogd(fs, bno);
    if (isclr(bbp, bno)) {
        printf("block = %lld, fs = %s\n",
               (long long)bno, fs->e2fs_fsmnt);
        panic("ext2_blkfree: freeing free block");
    }
    clrbit(bbp, bno);
    EXT2_LOCK(ump);
    ext2_clusteracct(fs, bbp, cg, bno, 1);
    fs->e2fs->e2fs_fbcount++;
    fs->e2fs_gd[cg].ext2bgd_nbfree++;
    fs->e2fs_fmod = 1;
    EXT2_UNLOCK(ump);
    bdwrite(bp);
}
Example #21
0
/*
 *	Release an entry in the buffer cache. Passed a locked buffer and
 *	a dirty status
 *
 *	0: Caller did not dirty buffer (but may be dirty already)
 *	1: Caller did dirty buffer
 *	2: Caller dirtied buffer and wants it written back now
 *
 *	If a writeback now is requested an an error occurs then u_error will
 *	be set and -1 returned.
 */
int bfree(regptr bufptr bp, uint8_t dirty)
{				/* dirty: 0=clean, 1=dirty (write back), 2=dirty+immediate write */
	int ret = 0;
	if (dirty)
		bp->bf_dirty = true;
	
	if (dirty > 1) {	/* immediate writeback */
		if (bdwrite(bp) != BLKSIZE) {
			udata.u_error = EIO;
			ret = -1;
		}
		bp->bf_dirty = false;
	}
	/* Time stamp the buffer on free up. It doesn't matter if we stamp it
	   on read or free as while locked it can't go away. However if we
	   stamp it on free we can in future make smarter decisions such as
	   recycling full dirty buffers faster than partial ones */
	bp->bf_time = ++bufclock;
	bunlock(bp);
	return ret;
}
Example #22
0
/*
 * Free a block.
 *
 * The specified block is placed back in the
 * free map.
 */
void
ext2fs_blkfree(struct inode *ip, daddr_t bno)
{
	struct m_ext2fs *fs;
	char *bbp;
	struct buf *bp;
	int error, cg;

	fs = ip->i_e2fs;
	cg = dtog(fs, bno);
	if ((u_int)bno >= fs->e2fs.e2fs_bcount) {
		printf("bad block %lld, ino %llu\n", (long long)bno,
		    (unsigned long long)ip->i_number);
		ext2fs_fserr(fs, ip->i_uid, "bad block");
		return;
	}
	error = bread(ip->i_devvp,
		fsbtodb(fs, fs->e2fs_gd[cg].ext2bgd_b_bitmap),
		(int)fs->e2fs_bsize, NOCRED, B_MODIFY, &bp);
	if (error) {
		brelse(bp, 0);
		return;
	}
	bbp = (char *)bp->b_data;
	bno = dtogd(fs, bno);
	if (isclr(bbp, bno)) {
		printf("dev = 0x%llx, block = %lld, fs = %s\n",
		    (unsigned long long)ip->i_dev, (long long)bno,
		    fs->e2fs_fsmnt);
		panic("blkfree: freeing free block");
	}
	clrbit(bbp, bno);
	fs->e2fs.e2fs_fbcount++;
	fs->e2fs_gd[cg].ext2bgd_nbfree++;

	fs->e2fs_fmod = 1;
	bdwrite(bp);
}
Example #23
0
int
xtaf_deupdat(struct denode *dep, int waitfor)
{
	int error;
	struct buf *bp;
	struct direntry *ep;
	struct timespec ts;

	if (DETOV(dep)->v_mount->mnt_flag & MNT_RDONLY)
		return (0);
	getnanotime(&ts);
	DETIMES(dep, &ts, &ts, &ts);
	if ((dep->de_flag & DE_MODIFIED) == 0)
		return (0);
	dep->de_flag &= ~DE_MODIFIED;
	if (dep->de_Attributes & ATTR_DIRECTORY)
		return (0);
	/*
	 * NOTE: The check for de_refcnt > 0 below ensures the denode being
	 * examined does not represent an unlinked but still open file.
	 * These files are not to be accessible even when the directory
	 * entry that represented the file happens to be reused while the
	 * deleted file is still open.
	*/
	if (dep->de_refcnt <= 0)
		return (0);
	error = xtaf_readde(dep, &bp, &ep);
	if (error)
		return (error);
	DE_EXTERNALIZE(ep, dep);
	if (waitfor)
		return (bwrite(bp));
	else {
		bdwrite(bp);
		return (0);
	}
}
Example #24
0
/*
 * Update the access, modified, and inode change times as specified by the
 * IN_ACCESS, IN_UPDATE, and IN_CHANGE flags respectively.  Write the inode
 * to disk if the IN_MODIFIED flag is set (it may be set initially, or by
 * the timestamp update).  The IN_LAZYMOD flag is set to force a write
 * later if not now.  If we write now, then clear both IN_MODIFIED and
 * IN_LAZYMOD to reflect the presumably successful write, and if waitfor is
 * set, then wait for the write to complete.
 */
int
ext2_update(struct vnode *vp, int waitfor)
{
    struct ext2_sb_info *fs;
    struct buf *bp;
    struct inode *ip;
    int error;

    ext2_itimes(vp);
    ip = VTOI(vp);
    if ((ip->i_flag & IN_MODIFIED) == 0)
        return (0);
    ip->i_flag &= ~(IN_LAZYMOD | IN_MODIFIED);
    if (vp->v_mount->mnt_flag & MNT_RDONLY)
        return (0);
    fs = ip->i_e2fs;
    error = bread(ip->i_devvp,
                  fsbtodoff(fs, ino_to_fsba(fs, ip->i_number)),
                  (int)fs->s_blocksize, &bp);
    if (error) {
        brelse(bp);
        return (error);
    }
    ext2_di2ei( &ip->i_din, (struct ext2_inode *) ((char *)bp->b_data + EXT2_INODE_SIZE(fs) *
                ino_to_fsbo(fs, ip->i_number)));
    /*
    	if (waitfor && (vp->v_mount->mnt_flag & MNT_ASYNC) == 0)
    		return (bwrite(bp));
    	else {
    */
    bdwrite(bp);
    return (0);
    /*
    	}
    */
}
Example #25
0
static int
tmpfs_write (struct vop_write_args *ap)
{
	struct buf *bp;
	struct vnode *vp = ap->a_vp;
	struct uio *uio = ap->a_uio;
	struct thread *td = uio->uio_td;
	struct tmpfs_node *node;
	boolean_t extended;
	off_t oldsize;
	int error;
	off_t base_offset;
	size_t offset;
	size_t len;
	struct rlimit limit;
	int trivial = 0;
	int kflags = 0;
	int seqcount;

	error = 0;
	if (uio->uio_resid == 0) {
		return error;
	}

	node = VP_TO_TMPFS_NODE(vp);

	if (vp->v_type != VREG)
		return (EINVAL);
	seqcount = ap->a_ioflag >> 16;

	TMPFS_NODE_LOCK(node);

	oldsize = node->tn_size;
	if (ap->a_ioflag & IO_APPEND)
		uio->uio_offset = node->tn_size;

	/*
	 * Check for illegal write offsets.
	 */
	if (uio->uio_offset + uio->uio_resid >
	  VFS_TO_TMPFS(vp->v_mount)->tm_maxfilesize) {
		error = EFBIG;
		goto done;
	}

	/*
	 * NOTE: Ignore if UIO does not come from a user thread (e.g. VN).
	 */
	if (vp->v_type == VREG && td != NULL && td->td_lwp != NULL) {
		error = kern_getrlimit(RLIMIT_FSIZE, &limit);
		if (error)
			goto done;
		if (uio->uio_offset + uio->uio_resid > limit.rlim_cur) {
			ksignal(td->td_proc, SIGXFSZ);
			error = EFBIG;
			goto done;
		}
	}

	/*
	 * Extend the file's size if necessary
	 */
	extended = ((uio->uio_offset + uio->uio_resid) > node->tn_size);

	while (uio->uio_resid > 0) {
		/*
		 * Don't completely blow out running buffer I/O
		 * when being hit from the pageout daemon.
		 */
		if (uio->uio_segflg == UIO_NOCOPY &&
		    (ap->a_ioflag & IO_RECURSE) == 0) {
			bwillwrite(TMPFS_BLKSIZE);
		}

		/*
		 * Use buffer cache I/O (via tmpfs_strategy)
		 */
		offset = (size_t)uio->uio_offset & TMPFS_BLKMASK64;
		base_offset = (off_t)uio->uio_offset - offset;
		len = TMPFS_BLKSIZE - offset;
		if (len > uio->uio_resid)
			len = uio->uio_resid;

		if ((uio->uio_offset + len) > node->tn_size) {
			trivial = (uio->uio_offset <= node->tn_size);
			error = tmpfs_reg_resize(vp, uio->uio_offset + len,
						 trivial);
			if (error)
				break;
		}

		/*
		 * Read to fill in any gaps.  Theoretically we could
		 * optimize this if the write covers the entire buffer
		 * and is not a UIO_NOCOPY write, however this can lead
		 * to a security violation exposing random kernel memory
		 * (whatever junk was in the backing VM pages before).
		 *
		 * So just use bread() to do the right thing.
		 */
		error = bread(vp, base_offset, TMPFS_BLKSIZE, &bp);
		error = uiomovebp(bp, (char *)bp->b_data + offset, len, uio);
		if (error) {
			kprintf("tmpfs_write uiomove error %d\n", error);
			brelse(bp);
			break;
		}

		if (uio->uio_offset > node->tn_size) {
			node->tn_size = uio->uio_offset;
			kflags |= NOTE_EXTEND;
		}
		kflags |= NOTE_WRITE;

		/*
		 * Always try to flush the page in the UIO_NOCOPY case.  This
		 * can come from the pageout daemon or during vnode eviction.
		 * It is not necessarily going to be marked IO_ASYNC/IO_SYNC.
		 *
		 * For the normal case we buwrite(), dirtying the underlying
		 * VM pages instead of dirtying the buffer and releasing the
		 * buffer as a clean buffer.  This allows tmpfs to use
		 * essentially all available memory to cache file data.
		 * If we used bdwrite() the buffer cache would wind up
		 * flushing the data to swap too quickly.
		 *
		 * But because tmpfs can seriously load the VM system we
		 * fall-back to using bdwrite() when free memory starts
		 * to get low.  This shifts the load away from the VM system
		 * and makes tmpfs act more like a normal filesystem with
		 * regards to disk activity.
		 *
		 * tmpfs pretty much fiddles directly with the VM
		 * system, don't let it exhaust it or we won't play
		 * nice with other processes.  Only do this if the
		 * VOP is coming from a normal read/write.  The VM system
		 * handles the case for UIO_NOCOPY.
		 */
		bp->b_flags |= B_CLUSTEROK;
		if (uio->uio_segflg == UIO_NOCOPY) {
			/*
			 * Flush from the pageout daemon, deal with
			 * potentially very heavy tmpfs write activity
			 * causing long stalls in the pageout daemon
			 * before pages get to free/cache.
			 *
			 * (a) Under severe pressure setting B_DIRECT will
			 *     cause a buffer release to try to free the
			 *     underlying pages.
			 *
			 * (b) Under modest memory pressure the B_RELBUF
			 *     alone is sufficient to get the pages moved
			 *     to the cache.  We could also force this by
			 *     setting B_NOTMETA but that might have other
			 *     unintended side-effects (e.g. setting
			 *     PG_NOTMETA on the VM page).
			 *
			 * Hopefully this will unblock the VM system more
			 * quickly under extreme tmpfs write load.
			 */
			if (vm_page_count_min(vm_page_free_hysteresis))
				bp->b_flags |= B_DIRECT;
			bp->b_flags |= B_AGE | B_RELBUF;
			bp->b_act_count = 0;	/* buffer->deactivate pgs */
			cluster_awrite(bp);
		} else if (vm_page_count_target()) {
			/*
			 * Normal (userland) write but we are low on memory,
			 * run the buffer the buffer cache.
			 */
			bp->b_act_count = 0;	/* buffer->deactivate pgs */
			bdwrite(bp);
		} else {
			/*
			 * Otherwise run the buffer directly through to the
			 * backing VM store.
			 */
			buwrite(bp);
			/*vm_wait_nominal();*/
		}

		if (bp->b_error) {
			kprintf("tmpfs_write bwrite error %d\n", bp->b_error);
			break;
		}
	}

	if (error) {
		if (extended) {
			(void)tmpfs_reg_resize(vp, oldsize, trivial);
			kflags &= ~NOTE_EXTEND;
		}
		goto done;
	}

	/*
	 * Currently we don't set the mtime on files modified via mmap()
	 * because we can't tell the difference between those modifications
	 * and an attempt by the pageout daemon to flush tmpfs pages to
	 * swap.
	 *
	 * This is because in order to defer flushes as long as possible
	 * buwrite() works by marking the underlying VM pages dirty in
	 * order to be able to dispose of the buffer cache buffer without
	 * flushing it.
	 */
	if (uio->uio_segflg != UIO_NOCOPY)
		node->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_MODIFIED;
	if (extended)
		node->tn_status |= TMPFS_NODE_CHANGED;

	if (node->tn_mode & (S_ISUID | S_ISGID)) {
		if (priv_check_cred(ap->a_cred, PRIV_VFS_RETAINSUGID, 0))
			node->tn_mode &= ~(S_ISUID | S_ISGID);
	}
done:
	TMPFS_NODE_UNLOCK(node);
	if (kflags)
		tmpfs_knote(vp, kflags);

	return(error);
}
Example #26
0
/*
 * Truncate the inode oip to at most length size, freeing the
 * disk blocks.
 */
int
ffs_truncate(struct vnode *vp, off_t length, int flags, struct ucred *cred)
{
	struct vnode *ovp = vp;
	ufs_daddr_t lastblock;
	struct inode *oip;
	ufs_daddr_t bn, lbn, lastiblock[NIADDR], indir_lbn[NIADDR];
	ufs_daddr_t oldblks[NDADDR + NIADDR], newblks[NDADDR + NIADDR];
	struct fs *fs;
	struct buf *bp;
	int offset, size, level;
	long count, nblocks, blocksreleased = 0;
	int i;
	int aflags, error, allerror;
	off_t osize;

	oip = VTOI(ovp);
	fs = oip->i_fs;
	if (length < 0)
		return (EINVAL);
	if (length > fs->fs_maxfilesize)
		return (EFBIG);
	if (ovp->v_type == VLNK &&
	    (oip->i_size < ovp->v_mount->mnt_maxsymlinklen || oip->i_din.di_blocks == 0)) {
#ifdef DIAGNOSTIC
		if (length != 0)
			panic("ffs_truncate: partial truncate of symlink");
#endif /* DIAGNOSTIC */
		bzero((char *)&oip->i_shortlink, (uint)oip->i_size);
		oip->i_size = 0;
		oip->i_flag |= IN_CHANGE | IN_UPDATE;
		return (ffs_update(ovp, 1));
	}
	if (oip->i_size == length) {
		oip->i_flag |= IN_CHANGE | IN_UPDATE;
		return (ffs_update(ovp, 0));
	}
	if (fs->fs_ronly)
		panic("ffs_truncate: read-only filesystem");
#ifdef QUOTA
	error = ufs_getinoquota(oip);
	if (error)
		return (error);
#endif
	ovp->v_lasta = ovp->v_clen = ovp->v_cstart = ovp->v_lastw = 0;
	if (DOINGSOFTDEP(ovp)) {
		if (length > 0 || softdep_slowdown(ovp)) {
			/*
			 * If a file is only partially truncated, then
			 * we have to clean up the data structures
			 * describing the allocation past the truncation
			 * point. Finding and deallocating those structures
			 * is a lot of work. Since partial truncation occurs
			 * rarely, we solve the problem by syncing the file
			 * so that it will have no data structures left.
			 */
			if ((error = VOP_FSYNC(ovp, MNT_WAIT, 0)) != 0)
				return (error);
		} else {
#ifdef QUOTA
			(void) ufs_chkdq(oip, -oip->i_blocks, NOCRED, 0);
#endif
			softdep_setup_freeblocks(oip, length);
			vinvalbuf(ovp, 0, 0, 0);
			nvnode_pager_setsize(ovp, 0, fs->fs_bsize, 0);
			oip->i_flag |= IN_CHANGE | IN_UPDATE;
			return (ffs_update(ovp, 0));
		}
	}
	osize = oip->i_size;

	/*
	 * Lengthen the size of the file. We must ensure that the
	 * last byte of the file is allocated. Since the smallest
	 * value of osize is 0, length will be at least 1.
	 *
	 * nvextendbuf() only breads the old buffer.  The blocksize
	 * of the new buffer must be specified so it knows how large
	 * to make the VM object.
	 */
	if (osize < length) {
		nvextendbuf(vp, osize, length,
			    blkoffsize(fs, oip, osize),	/* oblksize */
			    blkoffresize(fs, length),	/* nblksize */
			    blkoff(fs, osize),
			    blkoff(fs, length),
			    0);

		aflags = B_CLRBUF;
		if (flags & IO_SYNC)
			aflags |= B_SYNC;
		/* BALLOC will reallocate the fragment at the old EOF */
		error = VOP_BALLOC(ovp, length - 1, 1, cred, aflags, &bp);
		if (error)
			return (error);
		oip->i_size = length;
		if (bp->b_bufsize == fs->fs_bsize)
			bp->b_flags |= B_CLUSTEROK;
		if (aflags & B_SYNC)
			bwrite(bp);
		else
			bawrite(bp);
		oip->i_flag |= IN_CHANGE | IN_UPDATE;
		return (ffs_update(ovp, 1));
	}

	/*
	 * Shorten the size of the file.
	 *
	 * NOTE: The block size specified in nvtruncbuf() is the blocksize
	 *	 of the buffer containing length prior to any reallocation
	 *	 of the block.
	 */
	allerror = nvtruncbuf(ovp, length, blkoffsize(fs, oip, length),
			      blkoff(fs, length), 0);
	offset = blkoff(fs, length);
	if (offset == 0) {
		oip->i_size = length;
	} else {
		lbn = lblkno(fs, length);
		aflags = B_CLRBUF;
		if (flags & IO_SYNC)
			aflags |= B_SYNC;
		error = VOP_BALLOC(ovp, length - 1, 1, cred, aflags, &bp);
		if (error)
			return (error);

		/*
		 * When we are doing soft updates and the UFS_BALLOC
		 * above fills in a direct block hole with a full sized
		 * block that will be truncated down to a fragment below,
		 * we must flush out the block dependency with an FSYNC
		 * so that we do not get a soft updates inconsistency
		 * when we create the fragment below.
		 *
		 * nvtruncbuf() may have re-dirtied the underlying block
		 * as part of its truncation zeroing code.  To avoid a
		 * 'locking against myself' panic in the second fsync we
		 * can simply undirty the bp since the redirtying was
		 * related to areas of the buffer that we are going to
		 * throw away anyway, and we will b*write() the remainder
		 * anyway down below.
		 */
		if (DOINGSOFTDEP(ovp) && lbn < NDADDR &&
		    fragroundup(fs, blkoff(fs, length)) < fs->fs_bsize) {
			bundirty(bp);
			error = VOP_FSYNC(ovp, MNT_WAIT, 0);
			if (error) {
				bdwrite(bp);
				return (error);
			}
		}
		oip->i_size = length;
		size = blksize(fs, oip, lbn);
#if 0
		/* remove - nvtruncbuf deals with this */
		if (ovp->v_type != VDIR)
			bzero((char *)bp->b_data + offset,
			    (uint)(size - offset));
#endif
		/* Kirk's code has reallocbuf(bp, size, 1) here */
		allocbuf(bp, size);
		if (bp->b_bufsize == fs->fs_bsize)
			bp->b_flags |= B_CLUSTEROK;
		if (aflags & B_SYNC)
			bwrite(bp);
		else
			bawrite(bp);
	}
	/*
	 * Calculate index into inode's block list of
	 * last direct and indirect blocks (if any)
	 * which we want to keep.  Lastblock is -1 when
	 * the file is truncated to 0.
	 */
	lastblock = lblkno(fs, length + fs->fs_bsize - 1) - 1;
	lastiblock[SINGLE] = lastblock - NDADDR;
	lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs);
	lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs);
	nblocks = btodb(fs->fs_bsize);

	/*
	 * Update file and block pointers on disk before we start freeing
	 * blocks.  If we crash before free'ing blocks below, the blocks
	 * will be returned to the free list.  lastiblock values are also
	 * normalized to -1 for calls to ffs_indirtrunc below.
	 */
	bcopy((caddr_t)&oip->i_db[0], (caddr_t)oldblks, sizeof oldblks);
	for (level = TRIPLE; level >= SINGLE; level--)
		if (lastiblock[level] < 0) {
			oip->i_ib[level] = 0;
			lastiblock[level] = -1;
		}
	for (i = NDADDR - 1; i > lastblock; i--)
		oip->i_db[i] = 0;
	oip->i_flag |= IN_CHANGE | IN_UPDATE;
	error = ffs_update(ovp, 1);
	if (error && allerror == 0)
		allerror = error;
	
	/*
	 * Having written the new inode to disk, save its new configuration
	 * and put back the old block pointers long enough to process them.
	 * Note that we save the new block configuration so we can check it
	 * when we are done.
	 */
	bcopy((caddr_t)&oip->i_db[0], (caddr_t)newblks, sizeof newblks);
	bcopy((caddr_t)oldblks, (caddr_t)&oip->i_db[0], sizeof oldblks);
	oip->i_size = osize;

	if (error && allerror == 0)
		allerror = error;

	/*
	 * Indirect blocks first.
	 */
	indir_lbn[SINGLE] = -NDADDR;
	indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1;
	indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1;
	for (level = TRIPLE; level >= SINGLE; level--) {
		bn = oip->i_ib[level];
		if (bn != 0) {
			error = ffs_indirtrunc(oip, indir_lbn[level],
			    fsbtodb(fs, bn), lastiblock[level], level, &count);
			if (error)
				allerror = error;
			blocksreleased += count;
			if (lastiblock[level] < 0) {
				oip->i_ib[level] = 0;
				ffs_blkfree(oip, bn, fs->fs_bsize);
				blocksreleased += nblocks;
			}
		}
		if (lastiblock[level] >= 0)
			goto done;
	}

	/*
	 * All whole direct blocks or frags.
	 */
	for (i = NDADDR - 1; i > lastblock; i--) {
		long bsize;

		bn = oip->i_db[i];
		if (bn == 0)
			continue;
		oip->i_db[i] = 0;
		bsize = blksize(fs, oip, i);
		ffs_blkfree(oip, bn, bsize);
		blocksreleased += btodb(bsize);
	}
	if (lastblock < 0)
		goto done;

	/*
	 * Finally, look for a change in size of the
	 * last direct block; release any frags.
	 */
	bn = oip->i_db[lastblock];
	if (bn != 0) {
		long oldspace, newspace;

		/*
		 * Calculate amount of space we're giving
		 * back as old block size minus new block size.
		 */
		oldspace = blksize(fs, oip, lastblock);
		oip->i_size = length;
		newspace = blksize(fs, oip, lastblock);
		if (newspace == 0)
			panic("ffs_truncate: newspace");
		if (oldspace - newspace > 0) {
			/*
			 * Block number of space to be free'd is
			 * the old block # plus the number of frags
			 * required for the storage we're keeping.
			 */
			bn += numfrags(fs, newspace);
			ffs_blkfree(oip, bn, oldspace - newspace);
			blocksreleased += btodb(oldspace - newspace);
		}
	}
done:
#ifdef DIAGNOSTIC
	for (level = SINGLE; level <= TRIPLE; level++)
		if (newblks[NDADDR + level] != oip->i_ib[level])
			panic("ffs_truncate1");
	for (i = 0; i < NDADDR; i++)
		if (newblks[i] != oip->i_db[i])
			panic("ffs_truncate2");
	if (length == 0 && !RB_EMPTY(&ovp->v_rbdirty_tree))
		panic("ffs_truncate3");
#endif /* DIAGNOSTIC */
	/*
	 * Put back the real size.
	 */
	oip->i_size = length;
	oip->i_blocks -= blocksreleased;

	if (oip->i_blocks < 0)			/* sanity */
		oip->i_blocks = 0;
	oip->i_flag |= IN_CHANGE;
#ifdef QUOTA
	(void) ufs_chkdq(oip, -blocksreleased, NOCRED, 0);
#endif
	return (allerror);
}
Example #27
0
// ffs文件系统的写入操作
int
ffs_write(void *v)
{
	struct vop_write_args *ap = v;
	struct vnode *vp;
	struct uio *uio;
	struct inode *ip;
	struct fs *fs;
	struct buf *bp;
	daddr_t lbn;
	off_t osize;
	int blkoffset, error, extended, flags, ioflag, size, xfersize;
	ssize_t resid, overrun;

	extended = 0;
	ioflag = ap->a_ioflag;
	uio = ap->a_uio;
	vp = ap->a_vp;
	ip = VTOI(vp);

#ifdef DIAGNOSTIC
	if (uio->uio_rw != UIO_WRITE)
		panic("ffs_write: mode");
#endif

	/*
	 * If writing 0 bytes, succeed and do not change
	 * update time or file offset (standards compliance)
	 */
	if (uio->uio_resid == 0)
		return (0);

	switch (vp->v_type) {
	case VREG:
		if (ioflag & IO_APPEND)
			uio->uio_offset = DIP(ip, size);
		if ((DIP(ip, flags) & APPEND) && uio->uio_offset != DIP(ip, size))
			return (EPERM);
		/* FALLTHROUGH */
	case VLNK:
		break;
	case VDIR:
		if ((ioflag & IO_SYNC) == 0)
			panic("ffs_write: nonsync dir write");
		break;
	default:
		panic("ffs_write: type");
	}

	fs = ip->i_fs;
	if (uio->uio_offset < 0 ||
	    (u_int64_t)uio->uio_offset + uio->uio_resid > fs->fs_maxfilesize)
		return (EFBIG);

	/* do the filesize rlimit check */
	if ((error = vn_fsizechk(vp, uio, ioflag, &overrun)))
		return (error);

	resid = uio->uio_resid;
	osize = DIP(ip, size);
	flags = ioflag & IO_SYNC ? B_SYNC : 0;

	for (error = 0; uio->uio_resid > 0;) {
		lbn = lblkno(fs, uio->uio_offset);
		blkoffset = blkoff(fs, uio->uio_offset);
		xfersize = fs->fs_bsize - blkoffset;
		if (uio->uio_resid < xfersize)
			xfersize = uio->uio_resid;
		if (fs->fs_bsize > xfersize)
			flags |= B_CLRBUF;
		else
			flags &= ~B_CLRBUF;

		if ((error = UFS_BUF_ALLOC(ip, uio->uio_offset, xfersize,
			 ap->a_cred, flags, &bp)) != 0)
			break;
		if (uio->uio_offset + xfersize > DIP(ip, size)) {
			DIP_ASSIGN(ip, size, uio->uio_offset + xfersize);
			uvm_vnp_setsize(vp, DIP(ip, size));
			extended = 1;
		}
		(void)uvm_vnp_uncache(vp);

		size = blksize(fs, ip, lbn) - bp->b_resid;
		if (size < xfersize)
			xfersize = size;

		error =
		    uiomovei(bp->b_data + blkoffset, xfersize, uio);

		if (error != 0)
			memset(bp->b_data + blkoffset, 0, xfersize);

#if 0
		if (ioflag & IO_NOCACHE)
			bp->b_flags |= B_NOCACHE;
#endif
		if (ioflag & IO_SYNC)
			(void)bwrite(bp);
		else if (xfersize + blkoffset == fs->fs_bsize) {
			if (doclusterwrite)
				cluster_write(bp, &ip->i_ci, DIP(ip, size));
			else
				bawrite(bp);
		} else
			bdwrite(bp);

		if (error || xfersize == 0)
			break;
		ip->i_flag |= IN_CHANGE | IN_UPDATE;
	}
	/*
	 * If we successfully wrote any data, and we are not the superuser
	 * we clear the setuid and setgid bits as a precaution against
	 * tampering.
	 */
	if (resid > uio->uio_resid && ap->a_cred && ap->a_cred->cr_uid != 0)
		DIP_ASSIGN(ip, mode, DIP(ip, mode) & ~(ISUID | ISGID));
	if (resid > uio->uio_resid)
		VN_KNOTE(vp, NOTE_WRITE | (extended ? NOTE_EXTEND : 0));
	if (error) {
		if (ioflag & IO_UNIT) {
			(void)UFS_TRUNCATE(ip, osize,
			    ioflag & IO_SYNC, ap->a_cred);
			uio->uio_offset -= resid - uio->uio_resid;
			uio->uio_resid = resid;
		}
	} else if (resid > uio->uio_resid && (ioflag & IO_SYNC)) {
		error = UFS_UPDATE(ip, 1);
	}
	/* correct the result for writes clamped by vn_fsizechk() */
	uio->uio_resid += overrun;
	return (error);
}
Example #28
0
/*
 * Vnode op for writing.
 */
static int
ext2_write(struct vop_write_args *ap)
{
	struct vnode *vp;
	struct uio *uio;
	struct inode *ip;
	struct m_ext2fs *fs;
	struct buf *bp;
	daddr_t lbn;
	off_t osize;
	int blkoffset, error, flags, ioflag, resid, size, seqcount, xfersize;

	ioflag = ap->a_ioflag;
	uio = ap->a_uio;
	vp = ap->a_vp;

	seqcount = ioflag >> IO_SEQSHIFT;
	ip = VTOI(vp);

#ifdef INVARIANTS
	if (uio->uio_rw != UIO_WRITE)
		panic("%s: mode", "ext2_write");
#endif

	switch (vp->v_type) {
	case VREG:
		if (ioflag & IO_APPEND)
			uio->uio_offset = ip->i_size;
		if ((ip->i_flags & APPEND) && uio->uio_offset != ip->i_size)
			return (EPERM);
		/* FALLTHROUGH */
	case VLNK:
		break;
	case VDIR:
		/* XXX differs from ffs -- this is called from ext2_mkdir(). */
		if ((ioflag & IO_SYNC) == 0)
		panic("ext2_write: nonsync dir write");
		break;
	default:
		panic("ext2_write: type %p %d (%jd,%jd)", (void *)vp,
		    vp->v_type, (intmax_t)uio->uio_offset,
		    (intmax_t)uio->uio_resid);
	}

	KASSERT(uio->uio_resid >= 0, ("ext2_write: uio->uio_resid < 0"));
	KASSERT(uio->uio_offset >= 0, ("ext2_write: uio->uio_offset < 0"));
	fs = ip->i_e2fs;
	if ((uoff_t)uio->uio_offset + uio->uio_resid > fs->e2fs_maxfilesize)
		return (EFBIG);
	/*
	 * Maybe this should be above the vnode op call, but so long as
	 * file servers have no limits, I don't think it matters.
	 */
	if (vn_rlimit_fsize(vp, uio, uio->uio_td))
		return (EFBIG);

	resid = uio->uio_resid;
	osize = ip->i_size;
	if (seqcount > BA_SEQMAX)
		flags = BA_SEQMAX << BA_SEQSHIFT;
	else
		flags = seqcount << BA_SEQSHIFT;
	if ((ioflag & IO_SYNC) && !DOINGASYNC(vp))
		flags |= IO_SYNC;

	for (error = 0; uio->uio_resid > 0;) {
		lbn = lblkno(fs, uio->uio_offset);
		blkoffset = blkoff(fs, uio->uio_offset);
		xfersize = fs->e2fs_fsize - blkoffset;
		if (uio->uio_resid < xfersize)
			xfersize = uio->uio_resid;
		if (uio->uio_offset + xfersize > ip->i_size)
			vnode_pager_setsize(vp, uio->uio_offset + xfersize);

                /*
		 * We must perform a read-before-write if the transfer size
		 * does not cover the entire buffer.
                 */
		if (fs->e2fs_bsize > xfersize)
			flags |= BA_CLRBUF;
		else
			flags &= ~BA_CLRBUF;
		error = ext2_balloc(ip, lbn, blkoffset + xfersize,
		    ap->a_cred, &bp, flags);
		if (error != 0)
			break;

		/*
		 * If the buffer is not valid and we did not clear garbage
		 * out above, we have to do so here even though the write
		 * covers the entire buffer in order to avoid a mmap()/write
		 * race where another process may see the garbage prior to
		 * the uiomove() for a write replacing it.
		 */
		if ((bp->b_flags & B_CACHE) == 0 && fs->e2fs_bsize <= xfersize)
			vfs_bio_clrbuf(bp);
		if ((ioflag & (IO_SYNC|IO_INVAL)) == (IO_SYNC|IO_INVAL))
			bp->b_flags |= B_NOCACHE;
		if (uio->uio_offset + xfersize > ip->i_size)
			ip->i_size = uio->uio_offset + xfersize;
		size = blksize(fs, ip, lbn) - bp->b_resid;
		if (size < xfersize)
			xfersize = size;

		error =
		    uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio);
		if (ioflag & (IO_VMIO|IO_DIRECT)) {
			bp->b_flags |= B_RELBUF;
		}

		/*
		 * If IO_SYNC each buffer is written synchronously.  Otherwise
		 * if we have a severe page deficiency write the buffer
		 * asynchronously.  Otherwise try to cluster, and if that
		 * doesn't do it then either do an async write (if O_DIRECT),
		 * or a delayed write (if not).
		 */
		if (ioflag & IO_SYNC) {
			(void)bwrite(bp);
		} else if (vm_page_count_severe() ||
		    buf_dirty_count_severe() ||
		    (ioflag & IO_ASYNC)) {
			bp->b_flags |= B_CLUSTEROK;
			bawrite(bp);
		} else if (xfersize + blkoffset == fs->e2fs_fsize) {
			if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERW) == 0) {
				bp->b_flags |= B_CLUSTEROK;
				cluster_write(vp, bp, ip->i_size, seqcount, 0);
			} else {
				bawrite(bp);
			}
		} else if (ioflag & IO_DIRECT) {
			bp->b_flags |= B_CLUSTEROK;
			bawrite(bp);
		} else {
			bp->b_flags |= B_CLUSTEROK;
			bdwrite(bp);
		}
		if (error || xfersize == 0)
			break;
	}
	/*
	 * If we successfully wrote any data, and we are not the superuser
	 * we clear the setuid and setgid bits as a precaution against
	 * tampering.
	 */
	if ((ip->i_mode & (ISUID | ISGID)) && resid > uio->uio_resid &&
	    ap->a_cred) {
		if (priv_check_cred(ap->a_cred, PRIV_VFS_RETAINSUGID, 0))
			ip->i_mode &= ~(ISUID | ISGID);
	}
	if (error) {
		if (ioflag & IO_UNIT) {
			(void)ext2_truncate(vp, osize,
			    ioflag & IO_SYNC, ap->a_cred, uio->uio_td);
			uio->uio_offset -= resid - uio->uio_resid;
			uio->uio_resid = resid;
		}
	}
	if (uio->uio_resid != resid) {
               ip->i_flag |= IN_CHANGE | IN_UPDATE;
               if (ioflag & IO_SYNC)
                       error = ext2_update(vp, 1);
       }
	return (error);
}
Example #29
0
/*
 * Remove a directory entry after a call to namei, using
 * the parameters which it left in nameidata. The entry
 * dp->i_offset contains the offset into the directory of the
 * entry to be eliminated.  The dp->i_count field contains the
 * size of the previous record in the directory.  If this
 * is 0, the first entry is being deleted, so we need only
 * zero the inode number to mark the entry as free.  If the
 * entry is not the first in the directory, we must reclaim
 * the space of the now empty record by adding the record size
 * to the size of the previous entry.
 */
int
ufs_dirremove(struct vnode *dvp, struct inode *ip, int flags, int isrmdir)
{
	struct inode *dp;
	struct direct *ep;
	struct buf *bp;
	int error;

	UFS_WAPBL_JLOCK_ASSERT(dvp->v_mount);

	dp = VTOI(dvp);

	if ((error = UFS_BUFATOFF(dp,
	    (off_t)(dp->i_offset - dp->i_count), (char **)&ep, &bp)) != 0)
		return (error);
#ifdef UFS_DIRHASH
	/*
	 * Remove the dirhash entry. This is complicated by the fact
	 * that `ep' is the previous entry when dp->i_count != 0.
	 */
	if (dp->i_dirhash != NULL)
		ufsdirhash_remove(dp, (dp->i_count == 0) ? ep :
		(struct direct *)((char *)ep + ep->d_reclen), dp->i_offset);
#endif

	if (dp->i_count == 0) {
		/*
		 * First entry in block: set d_ino to zero.
		 */
		ep->d_ino = 0;
	} else {
 		/*
 		 * Collapse new free space into previous entry.
 		 */
 		ep->d_reclen += dp->i_reclen;
	}
#ifdef UFS_DIRHASH
	if (dp->i_dirhash != NULL)
		ufsdirhash_checkblock(dp, (char *)ep -
		    ((dp->i_offset - dp->i_count) & (DIRBLKSIZ - 1)),
		    dp->i_offset & ~(DIRBLKSIZ - 1));
#endif
 	if (DOINGSOFTDEP(dvp)) {
		if (ip) {
			ip->i_effnlink--;
			softdep_change_linkcnt(ip, 0);
			softdep_setup_remove(bp, dp, ip, isrmdir);
		}
		if (softdep_slowdown(dvp)) {
			error = bwrite(bp);
		} else {
			bdwrite(bp);
			error = 0;
		}
 	} else {
		if (ip) {
			ip->i_effnlink--;
			DIP_ADD(ip, nlink, -1);
			ip->i_flag |= IN_CHANGE;
			UFS_WAPBL_UPDATE(ip, 0);
		}
		if (DOINGASYNC(dvp) && dp->i_count != 0) {
			bdwrite(bp);
			error = 0;
		} else
			error = bwrite(bp);
	}
	dp->i_flag |= IN_CHANGE | IN_UPDATE;
	UFS_WAPBL_UPDATE(dp, 0);
	return (error);
}
Example #30
0
/*
 * Write a directory entry after a call to namei, using the parameters
 * that it left in nameidata. The argument dirp is the new directory
 * entry contents. Dvp is a pointer to the directory to be written,
 * which was left locked by namei. Remaining parameters (dp->i_offset,
 * dp->i_count) indicate how the space for the new entry is to be obtained.
 * Non-null bp indicates that a directory is being created (for the
 * soft dependency code).
 */
int
ufs_direnter(struct vnode *dvp, struct vnode *tvp, struct direct *dirp,
    struct componentname *cnp, struct buf *newdirbp)
{
  	struct ucred *cr;
  	struct proc *p;
  	int newentrysize;
  	struct inode *dp;
  	struct buf *bp;
  	u_int dsize;
  	struct direct *ep, *nep;
	int error, ret, blkoff, loc, spacefree, flags;
  	char *dirbuf;

	UFS_WAPBL_JLOCK_ASSERT(dvp->v_mount);

 	error = 0;
 	cr = cnp->cn_cred;
 	p = cnp->cn_proc;
  	dp = VTOI(dvp);
  	newentrysize = DIRSIZ(FSFMT(dvp), dirp);

	if (dp->i_count == 0) {
		/*
		 * If dp->i_count is 0, then namei could find no
		 * space in the directory. Here, dp->i_offset will
		 * be on a directory block boundary and we will write the
  		 * new entry into a fresh block.
  		 */
  		if (dp->i_offset & (DIRBLKSIZ - 1))
			panic("ufs_direnter: newblk");
		flags = B_CLRBUF;
		if (!DOINGSOFTDEP(dvp))
			flags |= B_SYNC;
		if ((error = UFS_BUF_ALLOC(dp, (off_t)dp->i_offset, DIRBLKSIZ,
		    cr, flags, &bp)) != 0) {
			if (DOINGSOFTDEP(dvp) && newdirbp != NULL)
				bdwrite(newdirbp);
			return (error);
		}
		DIP_ASSIGN(dp, size, dp->i_offset + DIRBLKSIZ);
		dp->i_flag |= IN_CHANGE | IN_UPDATE;
		uvm_vnp_setsize(dvp, DIP(dp, size));
  		dirp->d_reclen = DIRBLKSIZ;
		blkoff = dp->i_offset &
		    (VFSTOUFS(dvp->v_mount)->um_mountp->mnt_stat.f_iosize - 1);
		memcpy(bp->b_data + blkoff, dirp, newentrysize);

#ifdef UFS_DIRHASH
		if (dp->i_dirhash != NULL) {
			ufsdirhash_newblk(dp, dp->i_offset);
			ufsdirhash_add(dp, dirp, dp->i_offset);
			ufsdirhash_checkblock(dp, (char *)bp->b_data + blkoff,
			dp->i_offset);
		}
#endif

		if (DOINGSOFTDEP(dvp)) {
			/*
			 * Ensure that the entire newly allocated block is a
			 * valid directory so that future growth within the
			 * block does not have to ensure that the block is
			 * written before the inode.
			 */
			blkoff += DIRBLKSIZ;
			while (blkoff < bp->b_bcount) {
				((struct direct *)
				   (bp->b_data + blkoff))->d_reclen = DIRBLKSIZ;
				blkoff += DIRBLKSIZ;
			}
			if (softdep_setup_directory_add(bp, dp, dp->i_offset,
			    dirp->d_ino, newdirbp, 1) == 0) {
				bdwrite(bp);
				return (UFS_UPDATE(dp, 0));
			}
			/* We have just allocated a directory block in an
			 * indirect block. Rather than tracking when it gets
			 * claimed by the inode, we simply do a VOP_FSYNC
			 * now to ensure that it is there (in case the user
			 * does a future fsync). Note that we have to unlock
			 * the inode for the entry that we just entered, as
			 * the VOP_FSYNC may need to lock other inodes which
			 * can lead to deadlock if we also hold a lock on
			 * the newly entered node.
			 */
			if ((error = VOP_BWRITE(bp)))
				return (error);
			if (tvp != NULL)
				VOP_UNLOCK(tvp, 0);
			error = VOP_FSYNC(dvp, p->p_ucred, MNT_WAIT);
			if (tvp != NULL)
				vn_lock(tvp, LK_EXCLUSIVE | LK_RETRY, p);
			return (error);
		}
		error = VOP_BWRITE(bp);
 		ret = UFS_UPDATE(dp, !DOINGSOFTDEP(dvp));
 		if (error == 0)
 			return (ret);
  		return (error);
  	}
  
  	/*
	 * If dp->i_count is non-zero, then namei found space for the new
	 * entry in the range dp->i_offset to dp->i_offset + dp->i_count
	 * in the directory. To use this space, we may have to compact
	 * the entries located there, by copying them together towards the
	 * beginning of the block, leaving the free space in one usable
	 * chunk at the end.
  	 */
  
  	/*
	 * Increase size of directory if entry eats into new space.
	 * This should never push the size past a new multiple of
	 * DIRBLKSIZE.
	 *
	 * N.B. - THIS IS AN ARTIFACT OF 4.2 AND SHOULD NEVER HAPPEN.
	 */
	if (dp->i_offset + dp->i_count > DIP(dp, size)) {
		DIP_ASSIGN(dp, size, dp->i_offset + dp->i_count);
		dp->i_flag |= IN_CHANGE | IN_UPDATE;
		UFS_WAPBL_UPDATE(dp, MNT_WAIT);
	}
	/*
	 * Get the block containing the space for the new directory entry.
	 */
 	if ((error = UFS_BUFATOFF(dp, (off_t)dp->i_offset, &dirbuf, &bp)) 
	    != 0) {
 		if (DOINGSOFTDEP(dvp) && newdirbp != NULL)
 			bdwrite(newdirbp);
  		return (error);
 	}
	/*
	 * Find space for the new entry. In the simple case, the entry at
	 * offset base will have the space. If it does not, then namei
	 * arranged that compacting the region dp->i_offset to
	 * dp->i_offset + dp->i_count would yield the space.
	 */
	ep = (struct direct *)dirbuf;
	dsize = ep->d_ino ? DIRSIZ(FSFMT(dvp), ep) : 0;
	spacefree = ep->d_reclen - dsize;
	for (loc = ep->d_reclen; loc < dp->i_count; ) {
		nep = (struct direct *)(dirbuf + loc);

		/* Trim the existing slot (NB: dsize may be zero). */
		ep->d_reclen = dsize;
		ep = (struct direct *)((char *)ep + dsize);

		/* Read nep->d_reclen now as the memmove() may clobber it. */
		loc += nep->d_reclen;
		if (nep->d_ino == 0) {
			/*
			 * A mid-block unused entry. Such entries are
			 * never created by the kernel, but fsck_ffs
			 * can create them (and it doesn't fix them).
			 *
			 * Add up the free space, and initialise the
			 * relocated entry since we don't memmove it.
			 */
			spacefree += nep->d_reclen;
			ep->d_ino = 0;
			dsize = 0;
			continue;
		}
		dsize = DIRSIZ(FSFMT(dvp), nep);
		spacefree += nep->d_reclen - dsize;
#ifdef UFS_DIRHASH
		if (dp->i_dirhash != NULL)
			ufsdirhash_move(dp, nep,
			    dp->i_offset + ((char *)nep - dirbuf),
			    dp->i_offset + ((char *)ep - dirbuf));
#endif
 		if (DOINGSOFTDEP(dvp))
 			softdep_change_directoryentry_offset(dp, dirbuf,
 			    (caddr_t)nep, (caddr_t)ep, dsize); 
 		else
 			memmove(ep, nep, dsize);
	}
	/*
	 * Here, `ep' points to a directory entry containing `dsize' in-use
	 * bytes followed by `spacefree' unused bytes. If ep->d_ino == 0,
	 * then the entry is completely unused (dsize == 0). The value
	 * of ep->d_reclen is always indeterminate.
	 *
	 * Update the pointer fields in the previous entry (if any),
	 * copy in the new entry, and write out the block.
	 */
	if (ep->d_ino == 0) {
		if (spacefree + dsize < newentrysize)
			panic("ufs_direnter: compact1");
		dirp->d_reclen = spacefree + dsize;
	} else {
		if (spacefree < newentrysize)
			panic("ufs_direnter: compact2");
		dirp->d_reclen = spacefree;
		ep->d_reclen = dsize;
		ep = (struct direct *)((char *)ep + dsize);
	}

#ifdef UFS_DIRHASH
	if (dp->i_dirhash != NULL && (ep->d_ino == 0 ||
	    dirp->d_reclen == spacefree))
		ufsdirhash_add(dp, dirp, dp->i_offset + ((char *)ep - dirbuf));
#endif
	memcpy(ep, dirp, newentrysize);
#ifdef UFS_DIRHASH
	if (dp->i_dirhash != NULL)
		ufsdirhash_checkblock(dp, dirbuf -
		    (dp->i_offset & (DIRBLKSIZ - 1)),
		    dp->i_offset & ~(DIRBLKSIZ - 1));
#endif

  	if (DOINGSOFTDEP(dvp)) {
  		(void)softdep_setup_directory_add(bp, dp,
  		    dp->i_offset + (caddr_t)ep - dirbuf,
		    dirp->d_ino, newdirbp, 0);
  		bdwrite(bp);
  	} else {
  		error = VOP_BWRITE(bp);
  	}
	dp->i_flag |= IN_CHANGE | IN_UPDATE;

 	/*
 	 * If all went well, and the directory can be shortened, proceed
 	 * with the truncation. Note that we have to unlock the inode for
 	 * the entry that we just entered, as the truncation may need to
 	 * lock other inodes which can lead to deadlock if we also hold a
 	 * lock on the newly entered node.
 	 */

	if (error == 0 && dp->i_endoff && dp->i_endoff < DIP(dp, size)) {
		if (tvp != NULL)
			VOP_UNLOCK(tvp, 0);
#ifdef UFS_DIRHASH
		if (dp->i_dirhash != NULL)
			ufsdirhash_dirtrunc(dp, dp->i_endoff);
#endif


		error = UFS_TRUNCATE(dp, (off_t)dp->i_endoff, IO_SYNC, cr);

		if (tvp != NULL)
			vn_lock(tvp, LK_EXCLUSIVE | LK_RETRY, p);
	}
	UFS_WAPBL_UPDATE(dp, MNT_WAIT);
	return (error);
}