Exemplo n.º 1
0
void
lfs_check_bpp(struct lfs *fs, struct segment *sp, char *file, int line)
{
	daddr_t blkno;
	struct buf **bpp;
	struct vnode *devvp;

	devvp = VTOI(fs->lfs_ivnode)->i_devvp;
	blkno = (*(sp->bpp))->b_blkno;
	for (bpp = sp->bpp; bpp < sp->cbpp; bpp++) {
		if ((*bpp)->b_blkno != blkno) {
			if ((*bpp)->b_vp == devvp) {
				printf("Oops, would misplace raw block "
				       "0x%" PRIx64 " at 0x%" PRIx64 "\n",
				       (*bpp)->b_blkno,
				       blkno);
			} else {
				printf("%s:%d: misplace ino %llu lbn %" PRId64
				       " at 0x%" PRIx64 " instead of "
				       "0x%" PRIx64 "\n",
				       file, line,
				       (unsigned long long)
				       VTOI((*bpp)->b_vp)->i_number,
				       (*bpp)->b_lblkno,
				       blkno,
				       (*bpp)->b_blkno);
			}
		}
		blkno += fsbtodb(fs, btofsb(fs, (*bpp)->b_bcount));
	}
}
Exemplo n.º 2
0
/*
 * allocate an unused inode
 */
ino_t
allocino(ino_t request, int type)
{
	ino_t ino;
	struct ufs1_dinode *dp;
	time_t t;
	struct uvnode *vp;
	struct ubuf *bp;

	if (request == 0)
		request = ROOTINO;
	else if (statemap[request] != USTATE)
		return (0);
	for (ino = request; ino < maxino; ino++)
		if (statemap[ino] == USTATE)
			break;
	if (ino == maxino)
		extend_ifile(fs);

	switch (type & IFMT) {
	case IFDIR:
		statemap[ino] = DSTATE;
		break;
	case IFREG:
	case IFLNK:
		statemap[ino] = FSTATE;
		break;
	default:
		return (0);
	}
        vp = lfs_valloc(fs, ino);
	if (vp == NULL)
		return (0);
	dp = (VTOI(vp)->i_din.ffs1_din);
	bp = getblk(vp, 0, fs->lfs_fsize);
	VOP_BWRITE(bp);
	dp->di_mode = type;
	(void) time(&t);
	dp->di_atime = t;
	dp->di_mtime = dp->di_ctime = dp->di_atime;
	dp->di_size = fs->lfs_fsize;
	dp->di_blocks = btofsb(fs, fs->lfs_fsize);
	n_files++;
	inodirty(VTOI(vp));
	typemap[ino] = IFTODT(type);
	return (ino);
}
Exemplo n.º 3
0
/*
 * Load the appropriate indirect block, and change the appropriate pointer.
 * Mark the block dirty.  Do segment and avail accounting.
 */
static int
update_meta(struct lfs *fs, ino_t ino, int vers, daddr_t lbn,
	    daddr_t ndaddr, size_t size, struct lwp *l)
{
	int error;
	struct vnode *vp;
	struct inode *ip;
#ifdef DEBUG
	daddr_t odaddr;
	struct indir a[NIADDR];
	int num;
	int i;
#endif /* DEBUG */
	struct buf *bp;
	SEGUSE *sup;

	KASSERT(lbn >= 0);	/* no indirect blocks */

	if ((error = lfs_rf_valloc(fs, ino, vers, l, &vp)) != 0) {
		DLOG((DLOG_RF, "update_meta: ino %d: lfs_rf_valloc"
		      " returned %d\n", ino, error));
		return error;
	}

	if ((error = lfs_balloc(vp, (lbn << fs->lfs_bshift), size,
				NOCRED, 0, &bp)) != 0) {
		vput(vp);
		return (error);
	}
	/* No need to write, the block is already on disk */
	if (bp->b_oflags & BO_DELWRI) {
		LFS_UNLOCK_BUF(bp);
		fs->lfs_avail += btofsb(fs, bp->b_bcount);
	}
	brelse(bp, BC_INVAL);

	/*
	 * Extend the file, if it is not large enough already.
	 * XXX this is not exactly right, we don't know how much of the
	 * XXX last block is actually used.  We hope that an inode will
	 * XXX appear later to give the correct size.
	 */
	ip = VTOI(vp);
	if (ip->i_size <= (lbn << fs->lfs_bshift)) {
		u_int64_t newsize;

		if (lbn < NDADDR)
			newsize = ip->i_ffs1_size = (lbn << fs->lfs_bshift) +
				(size - fs->lfs_fsize) + 1;
		else
			newsize = ip->i_ffs1_size = (lbn << fs->lfs_bshift) + 1;

		if (ip->i_size < newsize) {
			ip->i_size = newsize;
			/*
			 * tell vm our new size for the case the inode won't
			 * appear later.
			 */
			uvm_vnp_setsize(vp, newsize);
		}
	}

	lfs_update_single(fs, NULL, vp, lbn, ndaddr, size);

	LFS_SEGENTRY(sup, fs, dtosn(fs, ndaddr), bp);
	sup->su_nbytes += size;
	LFS_WRITESEGENTRY(sup, fs, dtosn(fs, ndaddr), bp);

	/* differences here should be due to UNWRITTEN indirect blocks. */
	KASSERT((lblkno(fs, ip->i_size) > NDADDR &&
	    ip->i_lfs_effnblks == ip->i_ffs1_blocks) ||
	    ip->i_lfs_effnblks >= ip->i_ffs1_blocks);

#ifdef DEBUG
	/* Now look again to make sure it worked */
	ufs_bmaparray(vp, lbn, &odaddr, &a[0], &num, NULL, NULL);
	for (i = num; i > 0; i--) {
		if (!a[i].in_exists)
			panic("update_meta: absent %d lv indirect block", i);
	}
	if (dbtofsb(fs, odaddr) != ndaddr)
		DLOG((DLOG_RF, "update_meta: failed setting ino %d lbn %"
		      PRId64 " to %" PRId64 "\n", ino, lbn, ndaddr));
#endif /* DEBUG */
	vput(vp);
	return 0;
}
Exemplo n.º 4
0
void
pass5(void)
{
	SEGUSE *su;
	struct ubuf *bp;
	int i;
	unsigned long bb;	/* total number of used blocks (lower bound) */
	unsigned long ubb;	/* upper bound number of used blocks */
	unsigned long avail;	/* blocks available for writing */
	unsigned long dmeta;	/* blocks in segsums and inodes */
	int nclean;		/* clean segments */
	size_t labelskew;
	int diddirty;

	/*
	 * Check segment holdings against actual holdings.  Check for
	 * "clean" segments that contain live data.  If we are only
	 * rolling forward, we can't check the segment holdings, but
	 * we can still check the cleanerinfo data.
	 */
	nclean = 0;
	avail = 0;
	bb = ubb = 0;
	dmeta = 0;
	for (i = 0; i < fs->lfs_nseg; i++) {
		diddirty = 0;
		LFS_SEGENTRY(su, fs, i, bp);
		if (!preen && !(su->su_flags & SEGUSE_DIRTY) &&
		    seg_table[i].su_nbytes > 0) {
			pwarn("CLEAN SEGMENT %d CONTAINS %d BYTES\n",
			    i, seg_table[i].su_nbytes);
			if (reply("MARK SEGMENT DIRTY")) {
				su->su_flags |= SEGUSE_DIRTY;
				++diddirty;
			}
		}
		if (!preen && su->su_nbytes != seg_table[i].su_nbytes) {
			pwarn("SEGMENT %d CLAIMS %d BYTES BUT HAS %d",
			    i, su->su_nbytes, seg_table[i].su_nbytes);
			if ((int32_t)su->su_nbytes >
			    (int32_t)seg_table[i].su_nbytes)
				pwarn(" (HIGH BY %d)\n", su->su_nbytes -
				    seg_table[i].su_nbytes);
			else
				pwarn(" (LOW BY %d)\n", -su->su_nbytes +
				    seg_table[i].su_nbytes);
			if (reply("FIX")) {
				su->su_nbytes = seg_table[i].su_nbytes;
				++diddirty;
			}
		}
		if (su->su_flags & SEGUSE_DIRTY) {
			bb += btofsb(fs, su->su_nbytes +
			    su->su_nsums * fs->lfs_sumsize);
			ubb += btofsb(fs, su->su_nbytes +
			    su->su_nsums * fs->lfs_sumsize +
			    su->su_ninos * fs->lfs_ibsize);
			dmeta += btofsb(fs,
			    fs->lfs_sumsize * su->su_nsums);
			dmeta += btofsb(fs,
			    fs->lfs_ibsize * su->su_ninos);
		} else {
			nclean++;
			avail += segtod(fs, 1);
			if (su->su_flags & SEGUSE_SUPERBLOCK)
				avail -= btofsb(fs, LFS_SBPAD);
			if (i == 0 && fs->lfs_version > 1 &&
			    fs->lfs_start < btofsb(fs, LFS_LABELPAD))
				avail -= btofsb(fs, LFS_LABELPAD) -
				    fs->lfs_start;
		}
		if (diddirty)
			VOP_BWRITE(bp);
		else
			brelse(bp, 0);
	}

	/* Also may be available bytes in current seg */
	i = dtosn(fs, fs->lfs_offset);
	avail += sntod(fs, i + 1) - fs->lfs_offset;
	/* But do not count minfreesegs */
	avail -= segtod(fs, (fs->lfs_minfreeseg -
		(fs->lfs_minfreeseg / 2)));
	/* Note we may have bytes to write yet */
	avail -= btofsb(fs, locked_queue_bytes);

	if (idaddr)
		pwarn("NOTE: when using -i, expect discrepancies in dmeta,"
		      " avail, nclean, bfree\n");
	if (dmeta != fs->lfs_dmeta) {
		pwarn("DMETA GIVEN AS %d, SHOULD BE %ld\n", fs->lfs_dmeta,
		    dmeta);
		if (preen || reply("FIX")) {
			fs->lfs_dmeta = dmeta;
			sbdirty();
		}
	}
	if (avail != fs->lfs_avail) {
		pwarn("AVAIL GIVEN AS %d, SHOULD BE %ld\n", fs->lfs_avail,
		    avail);
		if (preen || reply("FIX")) {
			fs->lfs_avail = avail;
			sbdirty();
		}
	}
	if (nclean != fs->lfs_nclean) {
		pwarn("NCLEAN GIVEN AS %d, SHOULD BE %d\n", fs->lfs_nclean,
		    nclean);
		if (preen || reply("FIX")) {
			fs->lfs_nclean = nclean;
			sbdirty();
		}
	}

	labelskew = 0;
	if (fs->lfs_version > 1 &&
	    fs->lfs_start < btofsb(fs, LFS_LABELPAD))
		labelskew = btofsb(fs, LFS_LABELPAD);
	if (fs->lfs_bfree > fs->lfs_dsize - bb - labelskew ||
	    fs->lfs_bfree < fs->lfs_dsize - ubb - labelskew) {
		pwarn("BFREE GIVEN AS %d, SHOULD BE BETWEEN %ld AND %ld\n",
		    fs->lfs_bfree, (fs->lfs_dsize - ubb - labelskew),
		    fs->lfs_dsize - bb - labelskew);
		if (preen || reply("FIX")) {
			fs->lfs_bfree =
				((fs->lfs_dsize - labelskew - ubb) +
				 fs->lfs_dsize - labelskew - bb) / 2;
			sbdirty();
		}
	}
}