Esempio n. 1
0
/* Update segment and avail usage information when removing a block. */
static int
lfs_blkfree(struct lfs *fs, struct inode *ip, daddr_t daddr,
	    size_t bsize, long *lastseg, size_t *num)
{
	long seg;
	int error = 0;

	ASSERT_SEGLOCK(fs);
	bsize = lfs_fragroundup(fs, bsize);
	if (daddr > 0) {
		if (*lastseg != (seg = lfs_dtosn(fs, daddr))) {
			error = lfs_update_seguse(fs, ip, *lastseg, *num);
			*num = bsize;
			*lastseg = seg;
		} else
			*num += bsize;
	}

	return error;
}
Esempio n. 2
0
/* Finish the accounting updates for a segment. */
static int
lfs_update_seguse(struct lfs *fs, struct inode *ip, long lastseg, size_t num)
{
	struct segdelta *sd;

	ASSERT_SEGLOCK(fs);
	if (lastseg < 0 || num == 0)
		return 0;

	LIST_FOREACH(sd, &ip->i_lfs_segdhd, list)
		if (sd->segnum == lastseg)
			break;
	if (sd == NULL) {
		sd = malloc(sizeof(*sd), M_SEGMENT, M_WAITOK);
		sd->segnum = lastseg;
		sd->num = 0;
		LIST_INSERT_HEAD(&ip->i_lfs_segdhd, sd, list);
	}
	sd->num += num;

	return 0;
}
Esempio n. 3
0
static void
lfs_finalize_seguse(struct lfs *fs, void *v)
{
	SEGUSE *sup;
	struct buf *bp;
	struct segdelta *sd;
	LIST_HEAD(, segdelta) *hd = v;

	ASSERT_SEGLOCK(fs);
	while((sd = LIST_FIRST(hd)) != NULL) {
		LIST_REMOVE(sd, list);
		LFS_SEGENTRY(sup, fs, sd->segnum, bp);
		if (sd->num > sup->su_nbytes) {
			printf("lfs_finalize_seguse: segment %ld short by %ld\n",
				sd->segnum, (long)(sd->num - sup->su_nbytes));
			panic("lfs_finalize_seguse: negative bytes");
			sup->su_nbytes = sd->num;
		}
		sup->su_nbytes -= sd->num;
		LFS_WRITESEGENTRY(sup, fs, sd->segnum, bp);
		free(sd, M_SEGMENT);
	}
}
Esempio n. 4
0
/*
 * Allocate a particular inode with a particular version number, freeing
 * any previous versions of this inode that may have gone before.
 * Used by the roll-forward code.
 *
 * XXX this function does not have appropriate locking to be used on a live fs;
 * XXX but something similar could probably be used for an "undelete" call.
 *
 * Called with the Ifile inode locked.
 */
int
lfs_rf_valloc(struct lfs *fs, ino_t ino, int vers, struct lwp *l,
	      struct vnode **vpp)
{
	IFILE *ifp;
	struct buf *bp, *cbp;
	struct vnode *vp;
	struct inode *ip;
	ino_t tino, oldnext;
	int error;
	CLEANERINFO *cip;

	ASSERT_SEGLOCK(fs); /* XXX it doesn't, really */

	/*
	 * First, just try a vget. If the version number is the one we want,
	 * we don't have to do anything else.  If the version number is wrong,
	 * take appropriate action.
	 */
	error = VFS_VGET(fs->lfs_ivnode->v_mount, ino, &vp);
	if (error == 0) {
		DLOG((DLOG_RF, "lfs_rf_valloc[1]: ino %d vp %p\n", ino, vp));

		*vpp = vp;
		ip = VTOI(vp);
		if (ip->i_gen == vers)
			return 0;
		else if (ip->i_gen < vers) {
			lfs_truncate(vp, (off_t)0, 0, NOCRED);
			ip->i_gen = ip->i_ffs1_gen = vers;
			LFS_SET_UINO(ip, IN_CHANGE | IN_UPDATE);
			return 0;
		} else {
			DLOG((DLOG_RF, "ino %d: sought version %d, got %d\n",
			       ino, vers, ip->i_ffs1_gen));
			vput(vp);
			*vpp = NULLVP;
			return EEXIST;
		}
	}

	/*
	 * The inode is not in use.  Find it on the free list.
	 */
	/* If the Ifile is too short to contain this inum, extend it */
	while (VTOI(fs->lfs_ivnode)->i_size <= (ino /
		fs->lfs_ifpb + fs->lfs_cleansz + fs->lfs_segtabsz)
		<< fs->lfs_bshift) {
		lfs_extend_ifile(fs, NOCRED);
	}

	LFS_IENTRY(ifp, fs, ino, bp);
	oldnext = ifp->if_nextfree;
	ifp->if_version = vers;
	brelse(bp, 0);

	LFS_GET_HEADFREE(fs, cip, cbp, &ino);
	if (ino) {
		LFS_PUT_HEADFREE(fs, cip, cbp, oldnext);
	} else {
		tino = ino;
		while (1) {
			LFS_IENTRY(ifp, fs, tino, bp);
			if (ifp->if_nextfree == ino ||
			    ifp->if_nextfree == LFS_UNUSED_INUM)
				break;
			tino = ifp->if_nextfree;
			brelse(bp, 0);
		}
		if (ifp->if_nextfree == LFS_UNUSED_INUM) {
			brelse(bp, 0);
			return ENOENT;
		}
		ifp->if_nextfree = oldnext;
		LFS_BWRITE_LOG(bp);
	}

	error = lfs_ialloc(fs, fs->lfs_ivnode, ino, vers, &vp);
	if (error == 0) {
		/*
		 * Make it VREG so we can put blocks on it.  We will change
		 * this later if it turns out to be some other kind of file.
		 */
		ip = VTOI(vp);
		ip->i_mode = ip->i_ffs1_mode = IFREG;
		ip->i_nlink = ip->i_ffs1_nlink = 1;
		ufs_vinit(vp->v_mount, lfs_specop_p, lfs_fifoop_p, &vp);
		ip = VTOI(vp);

		DLOG((DLOG_RF, "lfs_rf_valloc: ino %d vp %p\n", ino, vp));

		/* The dirop-nature of this vnode is past */
		lfs_unmark_vnode(vp);
		(void)lfs_vunref(vp);
		vp->v_uflag &= ~VU_DIROP;
		mutex_enter(&lfs_lock);
		--lfs_dirvcount;
		--fs->lfs_dirvcount;
		TAILQ_REMOVE(&fs->lfs_dchainhd, ip, i_lfs_dchain);
		wakeup(&lfs_dirvcount);
		wakeup(&fs->lfs_dirvcount);
		mutex_exit(&lfs_lock);
	}
	*vpp = vp;
	return error;
}
Esempio n. 5
0
/*
 * Release blocks associated with the inode ip and stored in the indirect
 * block bn.  Blocks are free'd in LIFO order up to (but not including)
 * lastbn.  If level is greater than SINGLE, the block is an indirect block
 * and recursive calls to indirtrunc must be used to cleanse other indirect
 * blocks.
 *
 * NB: triple indirect blocks are untested.
 */
static int
lfs_indirtrunc(struct inode *ip, daddr_t lbn, daddr_t dbn,
	       daddr_t lastbn, int level, daddr_t *countp,
	       daddr_t *rcountp, long *lastsegp, size_t *bcp)
{
	int i;
	struct buf *bp;
	struct lfs *fs = ip->i_lfs;
	int32_t *bap;	/* XXX ondisk32 */
	struct vnode *vp;
	daddr_t nb, nlbn, last;
	int32_t *copy = NULL;	/* XXX ondisk32 */
	daddr_t blkcount, rblkcount, factor;
	int nblocks;
	daddr_t blocksreleased = 0, real_released = 0;
	int error = 0, allerror = 0;

	ASSERT_SEGLOCK(fs);
	/*
	 * Calculate index in current block of last
	 * block to be kept.  -1 indicates the entire
	 * block so we need not calculate the index.
	 */
	factor = 1;
	for (i = SINGLE; i < level; i++)
		factor *= LFS_NINDIR(fs);
	last = lastbn;
	if (lastbn > 0)
		last /= factor;
	nblocks = lfs_btofsb(fs, lfs_sb_getbsize(fs));
	/*
	 * Get buffer of block pointers, zero those entries corresponding
	 * to blocks to be free'd, and update on disk copy first.  Since
	 * double(triple) indirect before single(double) indirect, calls
	 * to bmap on these blocks will fail.  However, we already have
	 * the on disk address, so we have to set the b_blkno field
	 * explicitly instead of letting bread do everything for us.
	 */
	vp = ITOV(ip);
	bp = getblk(vp, lbn, lfs_sb_getbsize(fs), 0, 0);
	if (bp->b_oflags & (BO_DONE | BO_DELWRI)) {
		/* Braces must be here in case trace evaluates to nothing. */
		trace(TR_BREADHIT, pack(vp, lfs_sb_getbsize(fs)), lbn);
	} else {
		trace(TR_BREADMISS, pack(vp, lfs_sb_getbsize(fs)), lbn);
		curlwp->l_ru.ru_inblock++; /* pay for read */
		bp->b_flags |= B_READ;
		if (bp->b_bcount > bp->b_bufsize)
			panic("lfs_indirtrunc: bad buffer size");
		bp->b_blkno = LFS_FSBTODB(fs, dbn);
		VOP_STRATEGY(vp, bp);
		error = biowait(bp);
	}
	if (error) {
		brelse(bp, 0);
		*countp = *rcountp = 0;
		return (error);
	}

	bap = (int32_t *)bp->b_data;	/* XXX ondisk32 */
	if (lastbn >= 0) {
		copy = lfs_malloc(fs, lfs_sb_getbsize(fs), LFS_NB_IBLOCK);
		memcpy((void *)copy, (void *)bap, lfs_sb_getbsize(fs));
		memset((void *)&bap[last + 1], 0,
		/* XXX ondisk32 */
		  (u_int)(LFS_NINDIR(fs) - (last + 1)) * sizeof (int32_t));
		error = VOP_BWRITE(bp->b_vp, bp);
		if (error)
			allerror = error;
		bap = copy;
	}

	/*
	 * Recursively free totally unused blocks.
	 */
	for (i = LFS_NINDIR(fs) - 1, nlbn = lbn + 1 - i * factor; i > last;
	    i--, nlbn += factor) {
		nb = bap[i];
		if (nb == 0)
			continue;
		if (level > SINGLE) {
			error = lfs_indirtrunc(ip, nlbn, nb,
					       (daddr_t)-1, level - 1,
					       &blkcount, &rblkcount,
					       lastsegp, bcp);
			if (error)
				allerror = error;
			blocksreleased += blkcount;
			real_released += rblkcount;
		}
		lfs_blkfree(fs, ip, nb, lfs_sb_getbsize(fs), lastsegp, bcp);
		if (bap[i] > 0)
			real_released += nblocks;
		blocksreleased += nblocks;
	}

	/*
	 * Recursively free last partial block.
	 */
	if (level > SINGLE && lastbn >= 0) {
		last = lastbn % factor;
		nb = bap[i];
		if (nb != 0) {
			error = lfs_indirtrunc(ip, nlbn, nb,
					       last, level - 1, &blkcount,
					       &rblkcount, lastsegp, bcp);
			if (error)
				allerror = error;
			real_released += rblkcount;
			blocksreleased += blkcount;
		}
	}

	if (copy != NULL) {
		lfs_free(fs, copy, LFS_NB_IBLOCK);
	} else {
		mutex_enter(&bufcache_lock);
		if (bp->b_oflags & BO_DELWRI) {
			LFS_UNLOCK_BUF(bp);
			lfs_sb_addavail(fs, lfs_btofsb(fs, bp->b_bcount));
			wakeup(&fs->lfs_availsleep);
		}
		brelsel(bp, BC_INVAL);
		mutex_exit(&bufcache_lock);
	}

	*countp = blocksreleased;
	*rcountp = real_released;
	return (allerror);
}
Esempio n. 6
0
/* Finish the accounting updates for a segment. */
void
lfs_finalize_fs_seguse(struct lfs *fs)
{
	ASSERT_SEGLOCK(fs);
	lfs_finalize_seguse(fs, &fs->lfs_segdhd);
}
Esempio n. 7
0
/* Finish the accounting updates for a segment. */
void
lfs_finalize_ino_seguse(struct lfs *fs, struct inode *ip)
{
	ASSERT_SEGLOCK(fs);
	lfs_finalize_seguse(fs, &ip->i_lfs_segdhd);
}