Esempio n. 1
0
static int virtualblocks(struct fs *super, union dinode *dp)
{
	off_t nblk, sz;

	sz = DIP(super, dp, di_size);
#ifdef	COMPAT
	if (lblkno(super,sz) >= NDADDR) {
		nblk = blkroundup(super,sz);
		if (sz == nblk)
			nblk += super->fs_bsize;
	}

	return sz / 1024;

#else	/* COMPAT */

	if (lblkno(super,sz) >= NDADDR) {
		nblk = blkroundup(super,sz);
		sz = lblkno(super,nblk);
		sz = (sz - NDADDR + NINDIR(super) - 1) / NINDIR(super);
		while (sz > 0) {
			nblk += sz * super->fs_bsize;
			/* sz - 1 rounded up */
			sz = (sz - 1 + NINDIR(super) - 1) / NINDIR(super);
		}
	} else
		nblk = fragroundup(super,sz);

	return nblk / 512;
#endif	/* COMPAT */
}
Esempio n. 2
0
static int
virtualblocks(struct fs *super, union dinode *dp)
{
	off_t nblk, sz;

	sz = DIP(super, dp, di_size);
#ifdef	COMPAT
	if (lblkno(super, sz) >= NDADDR) {
		nblk = blkroundup(super, sz);
		if (sz == nblk)
			nblk += super->fs_bsize;
	}

	return sz / 1024;
#else	/* COMPAT */

	if (lblkno(super, sz) >= NDADDR) {
		nblk = blkroundup(super, sz);
		sz = lblkno(super, nblk);
		sz = howmany(sz - NDADDR, NINDIR(super));
		while (sz > 0) {
			nblk += sz * super->fs_bsize;
			/* One block on this level is in the inode itself */
			sz = howmany(sz - 1, NINDIR(super));
		}
	} else
		nblk = fragroundup(super, sz);

	return nblk / DEV_BSIZE;
#endif	/* COMPAT */
}
Esempio n. 3
0
void
setinodebuf(ufs1_ino_t inum)
{

	if (inum % sblock.fs_ipg != 0)
		errx(EEXIT, "bad inode number %d to setinodebuf", inum);
	startinum = 0;
	nextino = inum;
	lastinum = inum;
	readcnt = 0;
	if (inodebuf != NULL)
		return;
	inobufsize = blkroundup(&sblock, INOBUFSIZE);
	fullcnt = inobufsize / sizeof(struct ufs1_dinode);
	readpercg = sblock.fs_ipg / fullcnt;
	partialcnt = sblock.fs_ipg % fullcnt;
	partialsize = partialcnt * sizeof(struct ufs1_dinode);
	if (partialcnt != 0) {
		readpercg++;
	} else {
		partialcnt = fullcnt;
		partialsize = inobufsize;
	}
	if ((inodebuf = (struct ufs1_dinode *)malloc((unsigned)inobufsize)) == NULL)
		errx(EEXIT, "cannot allocate space for inode buffer");
}
Esempio n. 4
0
/*
 * Prepare to scan a set of inodes.
 */
void
setinodebuf(ino_t inum)
{

	if (inum % sblock.fs_ipg != 0)
		errx(1, "bad inode number %ju to setinodebuf", (uintmax_t)inum);
	lastvalidinum = inum + sblock.fs_ipg - 1;
	nextino = inum;
	lastinum = inum;
	readcnt = 0;
	if (inodebuf != NULL)
		return;
	inobufsize = blkroundup(&sblock, INOBUFSIZE);
	fullcnt = inobufsize / ((sblock.fs_magic == FS_UFS1_MAGIC) ?
	    sizeof(struct ufs1_dinode) : sizeof(struct ufs2_dinode));
	readpercg = sblock.fs_ipg / fullcnt;
	partialcnt = sblock.fs_ipg % fullcnt;
	partialsize = partialcnt * ((sblock.fs_magic == FS_UFS1_MAGIC) ?
	    sizeof(struct ufs1_dinode) : sizeof(struct ufs2_dinode));
	if (partialcnt != 0) {
		readpercg++;
	} else {
		partialcnt = fullcnt;
		partialsize = inobufsize;
	}
	if ((inodebuf = malloc((unsigned)inobufsize)) == NULL)
		errx(1, "cannot allocate space for inode buffer");
}
Esempio n. 5
0
void
ffs_gop_size(struct vnode *vp, off_t size, off_t *eobp, int flags)
{
	struct inode *ip = VTOI(vp);
	struct fs *fs = ip->i_fs;
	daddr_t olbn, nlbn;

	olbn = lblkno(fs, ip->i_size);
	nlbn = lblkno(fs, size);
	if (nlbn < NDADDR && olbn <= nlbn) {
		*eobp = fragroundup(fs, size);
	} else {
		*eobp = blkroundup(fs, size);
	}
}
Esempio n. 6
0
/*
 * Prepare to scan a set of inodes.
 */
void
resetinodebuf(void)
{

	nextino = 0;
	lastinum = 0;
	readcnt = 0;
	inobufsize = blkroundup(&sblock, INOBUFSIZE);
	fullcnt = inobufsize / sizeof(struct ufs1_dinode);
	readpercg = sblock.fs_ipg / fullcnt;
	partialcnt = sblock.fs_ipg % fullcnt;
	partialsize = partialcnt * sizeof(struct ufs1_dinode);
	if (partialcnt != 0) {
		readpercg++;
	} else {
		partialcnt = fullcnt;
		partialsize = inobufsize;
	}
	if (inodebuf == NULL &&
	   (inodebuf = malloc((u_int)inobufsize)) == NULL)
		errx(1, "malloc failed");
	while (nextino < ROOTINO)
		getnextinode(nextino);
}
Esempio n. 7
0
/*
 * Truncate the inode oip to at most length size, freeing the
 * disk blocks.
 */
int
ffs_truncate(struct vnode *ovp, off_t length, int ioflag, kauth_cred_t cred)
{
	daddr_t lastblock;
	struct inode *oip = VTOI(ovp);
	daddr_t bn, lastiblock[NIADDR], indir_lbn[NIADDR];
	daddr_t blks[NDADDR + NIADDR];
	struct fs *fs;
	int offset, pgoffset, level;
	int64_t count, blocksreleased = 0;
	int i, aflag, nblocks;
	int error, allerror = 0;
	off_t osize;
	int sync;
	struct ufsmount *ump = oip->i_ump;

	if (ovp->v_type == VCHR || ovp->v_type == VBLK ||
	    ovp->v_type == VFIFO || ovp->v_type == VSOCK) {
		KASSERT(oip->i_size == 0);
		return 0;
	}

	if (length < 0)
		return (EINVAL);

	if (ovp->v_type == VLNK &&
	    (oip->i_size < ump->um_maxsymlinklen ||
	     (ump->um_maxsymlinklen == 0 && DIP(oip, blocks) == 0))) {
		KDASSERT(length == 0);
		memset(SHORTLINK(oip), 0, (size_t)oip->i_size);
		oip->i_size = 0;
		DIP_ASSIGN(oip, size, 0);
		oip->i_flag |= IN_CHANGE | IN_UPDATE;
		return (ffs_update(ovp, NULL, NULL, 0));
	}
	if (oip->i_size == length) {
		/* still do a uvm_vnp_setsize() as writesize may be larger */
		uvm_vnp_setsize(ovp, length);
		oip->i_flag |= IN_CHANGE | IN_UPDATE;
		return (ffs_update(ovp, NULL, NULL, 0));
	}
	fs = oip->i_fs;
	if (length > ump->um_maxfilesize)
		return (EFBIG);

	if ((oip->i_flags & SF_SNAPSHOT) != 0)
		ffs_snapremove(ovp);

	osize = oip->i_size;
	aflag = ioflag & IO_SYNC ? B_SYNC : 0;

	/*
	 * Lengthen the size of the file. We must ensure that the
	 * last byte of the file is allocated. Since the smallest
	 * value of osize is 0, length will be at least 1.
	 */

	if (osize < length) {
		if (lblkno(fs, osize) < NDADDR &&
		    lblkno(fs, osize) != lblkno(fs, length) &&
		    blkroundup(fs, osize) != osize) {
			off_t eob;

			eob = blkroundup(fs, osize);
			uvm_vnp_setwritesize(ovp, eob);
			error = ufs_balloc_range(ovp, osize, eob - osize,
			    cred, aflag);
			if (error) {
				(void) ffs_truncate(ovp, osize,
				    ioflag & IO_SYNC, cred);
				return error;
			}
			if (ioflag & IO_SYNC) {
				mutex_enter(ovp->v_interlock);
				VOP_PUTPAGES(ovp,
				    trunc_page(osize & fs->fs_bmask),
				    round_page(eob), PGO_CLEANIT | PGO_SYNCIO |
				    PGO_JOURNALLOCKED);
			}
		}
		uvm_vnp_setwritesize(ovp, length);
		error = ufs_balloc_range(ovp, length - 1, 1, cred, aflag);
		if (error) {
			(void) ffs_truncate(ovp, osize, ioflag & IO_SYNC, cred);
			return (error);
		}
		uvm_vnp_setsize(ovp, length);
		oip->i_flag |= IN_CHANGE | IN_UPDATE;
		KASSERT(ovp->v_size == oip->i_size);
		return (ffs_update(ovp, NULL, NULL, 0));
	}

	/*
	 * When truncating a regular file down to a non-block-aligned size,
	 * we must zero the part of last block which is past the new EOF.
	 * We must synchronously flush the zeroed pages to disk
	 * since the new pages will be invalidated as soon as we
	 * inform the VM system of the new, smaller size.
	 * We must do this before acquiring the GLOCK, since fetching
	 * the pages will acquire the GLOCK internally.
	 * So there is a window where another thread could see a whole
	 * zeroed page past EOF, but that's life.
	 */

	offset = blkoff(fs, length);
	pgoffset = length & PAGE_MASK;
	if (ovp->v_type == VREG && (pgoffset != 0 || offset != 0) &&
	    osize > length) {
		daddr_t lbn;
		voff_t eoz;
		int size;

		if (offset != 0) {
			error = ufs_balloc_range(ovp, length - 1, 1, cred,
			    aflag);
			if (error)
				return error;
		}
		lbn = lblkno(fs, length);
		size = blksize(fs, oip, lbn);
		eoz = MIN(MAX(lblktosize(fs, lbn) + size, round_page(pgoffset)),
		    osize);
		ubc_zerorange(&ovp->v_uobj, length, eoz - length,
		    UBC_UNMAP_FLAG(ovp));
		if (round_page(eoz) > round_page(length)) {
			mutex_enter(ovp->v_interlock);
			error = VOP_PUTPAGES(ovp, round_page(length),
			    round_page(eoz),
			    PGO_CLEANIT | PGO_DEACTIVATE | PGO_JOURNALLOCKED |
			    ((ioflag & IO_SYNC) ? PGO_SYNCIO : 0));
			if (error)
				return error;
		}
	}

	genfs_node_wrlock(ovp);
	oip->i_size = length;
	DIP_ASSIGN(oip, size, length);
	uvm_vnp_setsize(ovp, length);
	/*
	 * Calculate index into inode's block list of
	 * last direct and indirect blocks (if any)
	 * which we want to keep.  Lastblock is -1 when
	 * the file is truncated to 0.
	 */
	lastblock = lblkno(fs, length + fs->fs_bsize - 1) - 1;
	lastiblock[SINGLE] = lastblock - NDADDR;
	lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs);
	lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs);
	nblocks = btodb(fs->fs_bsize);
	/*
	 * Update file and block pointers on disk before we start freeing
	 * blocks.  If we crash before free'ing blocks below, the blocks
	 * will be returned to the free list.  lastiblock values are also
	 * normalized to -1 for calls to ffs_indirtrunc below.
	 */
	sync = 0;
	for (level = TRIPLE; level >= SINGLE; level--) {
		blks[NDADDR + level] = DIP(oip, ib[level]);
		if (lastiblock[level] < 0 && blks[NDADDR + level] != 0) {
			sync = 1;
			DIP_ASSIGN(oip, ib[level], 0);
			lastiblock[level] = -1;
		}
	}
	for (i = 0; i < NDADDR; i++) {
		blks[i] = DIP(oip, db[i]);
		if (i > lastblock && blks[i] != 0) {
			sync = 1;
			DIP_ASSIGN(oip, db[i], 0);
		}
	}
	oip->i_flag |= IN_CHANGE | IN_UPDATE;
	if (sync) {
		error = ffs_update(ovp, NULL, NULL, UPDATE_WAIT);
		if (error && !allerror)
			allerror = error;
	}

	/*
	 * Having written the new inode to disk, save its new configuration
	 * and put back the old block pointers long enough to process them.
	 * Note that we save the new block configuration so we can check it
	 * when we are done.
	 */
	for (i = 0; i < NDADDR; i++) {
		bn = DIP(oip, db[i]);
		DIP_ASSIGN(oip, db[i], blks[i]);
		blks[i] = bn;
	}
	for (i = 0; i < NIADDR; i++) {
		bn = DIP(oip, ib[i]);
		DIP_ASSIGN(oip, ib[i], blks[NDADDR + i]);
		blks[NDADDR + i] = bn;
	}

	oip->i_size = osize;
	DIP_ASSIGN(oip, size, osize);
	error = vtruncbuf(ovp, lastblock + 1, 0, 0);
	if (error && !allerror)
		allerror = error;

	/*
	 * Indirect blocks first.
	 */
	indir_lbn[SINGLE] = -NDADDR;
	indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1;
	indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1;
	for (level = TRIPLE; level >= SINGLE; level--) {
		if (oip->i_ump->um_fstype == UFS1)
			bn = ufs_rw32(oip->i_ffs1_ib[level],UFS_FSNEEDSWAP(fs));
		else
			bn = ufs_rw64(oip->i_ffs2_ib[level],UFS_FSNEEDSWAP(fs));
		if (bn != 0) {
			error = ffs_indirtrunc(oip, indir_lbn[level],
			    fsbtodb(fs, bn), lastiblock[level], level, &count);
			if (error)
				allerror = error;
			blocksreleased += count;
			if (lastiblock[level] < 0) {
				DIP_ASSIGN(oip, ib[level], 0);
				if (oip->i_ump->um_mountp->mnt_wapbl) {
					UFS_WAPBL_REGISTER_DEALLOCATION(
					    oip->i_ump->um_mountp,
					    fsbtodb(fs, bn), fs->fs_bsize);
				} else
					ffs_blkfree(fs, oip->i_devvp, bn,
					    fs->fs_bsize, oip->i_number);
				blocksreleased += nblocks;
			}
		}
		if (lastiblock[level] >= 0)
			goto done;
	}

	/*
	 * All whole direct blocks or frags.
	 */
	for (i = NDADDR - 1; i > lastblock; i--) {
		long bsize;

		if (oip->i_ump->um_fstype == UFS1)
			bn = ufs_rw32(oip->i_ffs1_db[i], UFS_FSNEEDSWAP(fs));
		else
			bn = ufs_rw64(oip->i_ffs2_db[i], UFS_FSNEEDSWAP(fs));
		if (bn == 0)
			continue;
		DIP_ASSIGN(oip, db[i], 0);
		bsize = blksize(fs, oip, i);
		if ((oip->i_ump->um_mountp->mnt_wapbl) &&
		    (ovp->v_type != VREG)) {
			UFS_WAPBL_REGISTER_DEALLOCATION(oip->i_ump->um_mountp,
			    fsbtodb(fs, bn), bsize);
		} else
			ffs_blkfree(fs, oip->i_devvp, bn, bsize, oip->i_number);
		blocksreleased += btodb(bsize);
	}
	if (lastblock < 0)
		goto done;

	/*
	 * Finally, look for a change in size of the
	 * last direct block; release any frags.
	 */
	if (oip->i_ump->um_fstype == UFS1)
		bn = ufs_rw32(oip->i_ffs1_db[lastblock], UFS_FSNEEDSWAP(fs));
	else
		bn = ufs_rw64(oip->i_ffs2_db[lastblock], UFS_FSNEEDSWAP(fs));
	if (bn != 0) {
		long oldspace, newspace;

		/*
		 * Calculate amount of space we're giving
		 * back as old block size minus new block size.
		 */
		oldspace = blksize(fs, oip, lastblock);
		oip->i_size = length;
		DIP_ASSIGN(oip, size, length);
		newspace = blksize(fs, oip, lastblock);
		if (newspace == 0)
			panic("itrunc: newspace");
		if (oldspace - newspace > 0) {
			/*
			 * Block number of space to be free'd is
			 * the old block # plus the number of frags
			 * required for the storage we're keeping.
			 */
			bn += numfrags(fs, newspace);
			if ((oip->i_ump->um_mountp->mnt_wapbl) &&
			    (ovp->v_type != VREG)) {
				UFS_WAPBL_REGISTER_DEALLOCATION(
				    oip->i_ump->um_mountp, fsbtodb(fs, bn),
				    oldspace - newspace);
			} else
				ffs_blkfree(fs, oip->i_devvp, bn,
				    oldspace - newspace, oip->i_number);
			blocksreleased += btodb(oldspace - newspace);
		}
	}

done:
#ifdef DIAGNOSTIC
	for (level = SINGLE; level <= TRIPLE; level++)
		if (blks[NDADDR + level] != DIP(oip, ib[level]))
			panic("itrunc1");
	for (i = 0; i < NDADDR; i++)
		if (blks[i] != DIP(oip, db[i]))
			panic("itrunc2");
	if (length == 0 &&
	    (!LIST_EMPTY(&ovp->v_cleanblkhd) || !LIST_EMPTY(&ovp->v_dirtyblkhd)))
		panic("itrunc3");
#endif /* DIAGNOSTIC */
	/*
	 * Put back the real size.
	 */
	oip->i_size = length;
	DIP_ASSIGN(oip, size, length);
	DIP_ADD(oip, blocks, -blocksreleased);
	genfs_node_unlock(ovp);
	oip->i_flag |= IN_CHANGE;
	UFS_WAPBL_UPDATE(ovp, NULL, NULL, 0);
#if defined(QUOTA) || defined(QUOTA2)
	(void) chkdq(oip, -blocksreleased, NOCRED, 0);
#endif
	KASSERT(ovp->v_type != VREG || ovp->v_size == oip->i_size);
	return (allerror);
}
Esempio n. 8
0
/*
 * Enable logging
 */
int
lqfs_enable(struct vnode *vp, struct fiolog *flp, cred_t *cr)
{
	int		error;
	inode_t		*ip = VTOI(vp);
	qfsvfs_t	*qfsvfsp = ip->i_qfsvfs;
	fs_lqfs_common_t	*fs = VFS_FS_PTR(qfsvfsp);
	ml_unit_t	*ul;
#ifdef LQFS_TODO_LOCKFS
	int		reclaim = 0;
	struct lockfs	lf;
	struct ulockfs	*ulp;
#else
	/* QFS doesn't really support LOCKFS. */
#endif /* LQFS_TODO_LOCKFS */
	vfs_t		*vfsp = qfsvfsp->vfs_vfs;
	uint64_t	tmp_nbytes_actual;
	char fsclean;
	sam_sblk_t	*sblk = qfsvfsp->mi.m_sbp;

	/*
	 * File system is not capable of logging.
	 */
	if (!LQFS_CAPABLE(qfsvfsp)) {
		flp->error = FIOLOG_ENOTSUP;
		error = 0;
		goto out;
	}
	if (!SAM_MAGIC_V2A_OR_HIGHER(&sblk->info.sb)) {
		cmn_err(CE_WARN, "SAM-QFS: %s: Not enabling logging, "
		    " file system is not version 2A.", qfsvfsp->mt.fi_name);
		cmn_err(CE_WARN, "\tUpgrade file system with samfsck -u "
		    "first.");
		flp->error = FIOLOG_ENOTSUP;
		error = 0;
		goto out;
	}

	if (LQFS_GET_LOGBNO(fs)) {
		error = lqfs_log_validate(qfsvfsp, flp, cr);
	}

	/*
	 * Check if logging is already enabled
	 */
	if (LQFS_GET_LOGP(qfsvfsp)) {
		flp->error = FIOLOG_ETRANS;
		/* for root ensure logging option is set */
		vfs_setmntopt(vfsp, MNTOPT_LOGGING, NULL, 0);
		error = 0;
		goto out;
	}

	/*
	 * Come back here to recheck if we had to disable the log.
	 */
recheck:
	error = 0;
	flp->error = FIOLOG_ENONE;

	/*
	 * Adjust requested log size
	 */
	flp->nbytes_actual = flp->nbytes_requested;
	if (flp->nbytes_actual == 0) {
		tmp_nbytes_actual =
		    (((uint64_t)FS_SIZE(fs)) / ldl_divisor) << FS_FSHIFT(fs);
		flp->nbytes_actual = (uint_t)MIN(tmp_nbytes_actual, INT_MAX);
	}
	flp->nbytes_actual = MAX(flp->nbytes_actual, ldl_minlogsize);
	flp->nbytes_actual = MIN(flp->nbytes_actual, ldl_maxlogsize);
	flp->nbytes_actual = blkroundup(fs, flp->nbytes_actual);

	/*
	 * logging is enabled and the log is the right size; done
	 */
	ul = LQFS_GET_LOGP(qfsvfsp);

	if (ul && LQFS_GET_LOGBNO(fs) &&
	    (flp->nbytes_actual == ul->un_requestsize)) {
		vfs_setmntopt(vfsp, MNTOPT_LOGGING, NULL, 0);
		error = 0;
		goto out;
	}

	/*
	 * Readonly file system
	 */
	if (FS_RDONLY(fs)) {
		flp->error = FIOLOG_EROFS;
		error = 0;
		goto out;
	}

#ifdef LQFS_TODO_LOCKFS
	/*
	 * File system must be write locked to enable logging
	 */
	error = qfs_fiolfss(vp, &lf);
	if (error) {
		goto out;
	}
	if (!LOCKFS_IS_ULOCK(&lf)) {
		flp->error = FIOLOG_EULOCK;
		error = 0;
		goto out;
	}
	lf.lf_lock = LOCKFS_WLOCK;
	lf.lf_flags = 0;
	lf.lf_comment = NULL;
	error = qfs_fiolfs(vp, &lf, 1);
	if (error) {
		flp->error = FIOLOG_EWLOCK;
		error = 0;
		goto out;
	}
#else
	/* QFS doesn't really support lockfs. */
#endif /* LQFS_TODO_LOCKFS */

	/*
	 * Grab appropriate locks to synchronize with the rest
	 * of the system
	 */
	vfs_lock_wait(vfsp);
#ifdef LQFS_TODO_LOCKFS
	ulp = &ufsvfsp->vfs_ulockfs;
	mutex_enter(&ulp->ul_lock);
#else
	/* QFS doesn't really support lockfs. */
#endif /* LQFS_TODO_LOCKFS */

	/*
	 * File system must be fairly consistent to enable logging
	 */
	fsclean = LQFS_GET_FS_CLEAN(fs);
	if (fsclean != FSLOG &&
	    fsclean != FSACTIVE &&
	    fsclean != FSSTABLE &&
	    fsclean != FSCLEAN) {
		flp->error = FIOLOG_ECLEAN;
		goto unlockout;
	}

#ifdef LUFS
	/*
	 * A write-locked file system is only active if there are
	 * open deleted files; so remember to set FS_RECLAIM later.
	 */
	if (LQFS_GET_FS_CLEAN(fs) == FSACTIVE) {
		reclaim = FS_RECLAIM;
	}
#else
	/* QFS doesn't have a reclaim file thread. */
#endif /* LUFS */

	/*
	 * Logging is already enabled; must be changing the log's size
	 */
	if (LQFS_GET_LOGBNO(fs) && LQFS_GET_LOGP(qfsvfsp)) {
#ifdef LQFS_TODO_LOCKFS
		/*
		 * Before we can disable logging, we must give up our
		 * lock.  As a consequence of unlocking and disabling the
		 * log, the fs structure may change.  Because of this, when
		 * disabling is complete, we will go back to recheck to
		 * repeat all of the checks that we performed to get to
		 * this point.  Disabling sets fs->fs_logbno to 0, so this
		 * will not put us into an infinite loop.
		 */
		mutex_exit(&ulp->ul_lock);
#else
		/* QFS doesn't really support lockfs. */
#endif /* LQFS_TODO_LOCKFS */
		vfs_unlock(vfsp);

#ifdef LQFS_TODO_LOCKFS
		lf.lf_lock = LOCKFS_ULOCK;
		lf.lf_flags = 0;
		error = qfs_fiolfs(vp, &lf, 1);
		if (error) {
			flp->error = FIOLOG_ENOULOCK;
			error = 0;
			goto out;
		}
#else
		/* QFS doesn't really support lockfs. */
#endif /* LQFS_TODO_LOCKFS */
		error = lqfs_disable(vp, flp);
		if (error || (flp->error != FIOLOG_ENONE)) {
			error = 0;
			goto out;
		}
		goto recheck;
	}

	error = lqfs_alloc(qfsvfsp, flp, cr);
	if (error) {
		goto errout;
	}
#ifdef LUFS
#else
	if ((error = lqfs_log_validate(qfsvfsp, flp, cr)) != 0) {
		goto errout;
	}
#endif /* LUFS */

	/*
	 * Create all of the incore structs
	 */
	error = lqfs_snarf(qfsvfsp, fs, 0);
	if (error) {
		goto errout;
	}

	/*
	 * DON'T ``GOTO ERROUT'' PAST THIS POINT
	 */

	/*
	 * Pretend we were just mounted with logging enabled
	 *	freeze and drain the file system of readers
	 *		Get the ops vector
	 *		If debug, record metadata locations with log subsystem
	 *		Start the delete thread
	 *		Start the reclaim thread, if necessary
	 *	Thaw readers
	 */
	vfs_setmntopt(vfsp, MNTOPT_LOGGING, NULL, 0);

	TRANS_DOMATAMAP(qfsvfsp);
	TRANS_MATA_MOUNT(qfsvfsp);
	TRANS_MATA_SI(qfsvfsp, fs);

#ifdef LUFS
	qfs_thread_start(&qfsvfsp->vfs_delete, qfs_thread_delete, vfsp);
	if (fs->fs_reclaim & (FS_RECLAIM|FS_RECLAIMING)) {
		fs->fs_reclaim &= ~FS_RECLAIM;
		fs->fs_reclaim |=  FS_RECLAIMING;
		qfs_thread_start(&qfsvfsp->vfs_reclaim,
		    qfs_thread_reclaim, vfsp);
	} else {
		fs->fs_reclaim |= reclaim;
	}
#else
	/* QFS doesn't have file reclaim nor i-node delete threads. */
#endif /* LUFS */

#ifdef LUFS
	mutex_exit(&ulp->ul_lock);
#else
	/* QFS doesn't really support LOCKFS. */
#endif /* LUFS */

	vfs_unlock(vfsp);

#ifdef LQFS_TODO_LOCKFS
	/*
	 * Unlock the file system
	 */
	lf.lf_lock = LOCKFS_ULOCK;
	lf.lf_flags = 0;
	error = qfs_fiolfs(vp, &lf, 1);
	if (error) {
		flp->error = FIOLOG_ENOULOCK;
		error = 0;
		goto out;
	}
#else
	/* QFS doesn't really support LOCKFS. */
#endif /* LQFS_TODO_LOCKFS */

	/*
	 * There's nothing in the log yet (we've just allocated it)
	 * so directly write out the super block.
	 * Note, we have to force this sb out to disk
	 * (not just to the log) so that if we crash we know we are logging
	 */
	VFS_LOCK_MUTEX_ENTER(qfsvfsp);
	LQFS_SET_FS_CLEAN(fs, FSLOG);
	LQFS_SET_FS_ROLLED(fs, FS_NEED_ROLL);	/* Mark the fs as unrolled */
#ifdef LUFS
	QFS_BWRITE2(NULL, qfsvfsp->vfs_bufp);
#else
	sam_update_sblk(qfsvfsp, 0, 0, TRUE);
#endif /* LUFS */
	VFS_LOCK_MUTEX_EXIT(qfsvfsp);

	error = 0;
	goto out;

errout:
	/*
	 * Aquire the qfs_scan_lock before de-linking the mtm data
	 * structure so that we keep qfs_sync() and qfs_update() away
	 * when they execute the ufs_scan_inodes() run while we're in
	 * progress of enabling/disabling logging.
	 */
	mutex_enter(&qfs_scan_lock);
	(void) lqfs_unsnarf(qfsvfsp);
	mutex_exit(&qfs_scan_lock);

	(void) lqfs_free(qfsvfsp);
unlockout:
#ifdef LQFS_TODO_LOCKFS
	mutex_exit(&ulp->ul_lock);
#else
	/* QFS doesn't really support LOCKFS. */
#endif /* LQFS_TODO_LOCKFS */

	vfs_unlock(vfsp);

#ifdef LQFS_TODO_LOCKFS
	lf.lf_lock = LOCKFS_ULOCK;
	lf.lf_flags = 0;
	(void) qfs_fiolfs(vp, &lf, 1);
#else
	/* QFS doesn't really support LOCKFS. */
#endif /* LQFS_TODO_LOCKFS */

out:
	mutex_enter(&ip->mp->ms.m_waitwr_mutex);
	ip->mp->mt.fi_status |= FS_LOGSTATE_KNOWN;
	mutex_exit(&ip->mp->ms.m_waitwr_mutex);
	return (error);
}
Esempio n. 9
0
/*
 * do a simple estimate of the space needed to hold the statefile
 * taking compression into account, but be fairly conservative
 * so we have a better chance of completing; when dump fails,
 * the retry cost is fairly high.
 *
 * Do disk blocks allocation for the state file if no space has
 * been allocated yet. Since the state file will not be removed,
 * allocation should only be done once.
 */
static int
cpr_statefile_ok(vnode_t *vp, int alloc_retry)
{
	extern size_t cpr_bitmap_size;
	struct inode *ip = VTOI(vp);
	const int UCOMP_RATE = 20; /* comp. ratio*10 for user pages */
	u_longlong_t size, isize, ksize, raw_data;
	char *str, *est_fmt;
	size_t space;
	int error;

	/*
	 * number of pages short for swapping.
	 */
	STAT->cs_nosw_pages = k_anoninfo.ani_mem_resv;
	if (STAT->cs_nosw_pages < 0)
		STAT->cs_nosw_pages = 0;

	str = "cpr_statefile_ok:";

	CPR_DEBUG(CPR_DEBUG9, "Phys swap: max=%lu resv=%lu\n",
	    k_anoninfo.ani_max, k_anoninfo.ani_phys_resv);
	CPR_DEBUG(CPR_DEBUG9, "Mem swap: max=%ld resv=%lu\n",
	    MAX(availrmem - swapfs_minfree, 0),
	    k_anoninfo.ani_mem_resv);
	CPR_DEBUG(CPR_DEBUG9, "Total available swap: %ld\n",
	    CURRENT_TOTAL_AVAILABLE_SWAP);

	/*
	 * try increasing filesize by 15%
	 */
	if (alloc_retry) {
		/*
		 * block device doesn't get any bigger
		 */
		if (vp->v_type == VBLK) {
			if (cpr_debug & (CPR_DEBUG1 | CPR_DEBUG6))
				prom_printf(
				    "Retry statefile on special file\n");
			return (ENOMEM);
		} else {
			rw_enter(&ip->i_contents, RW_READER);
			size = (ip->i_size * SIZE_RATE) / INTEGRAL;
			rw_exit(&ip->i_contents);
		}
		if (cpr_debug & (CPR_DEBUG1 | CPR_DEBUG6))
			prom_printf("Retry statefile size = %lld\n", size);
	} else {
		u_longlong_t cpd_size;
		pgcnt_t npages, nback;
		int ndvram;

		ndvram = 0;
		(void) callb_execute_class(CB_CL_CPR_FB,
		    (int)(uintptr_t)&ndvram);
		if (cpr_debug & (CPR_DEBUG1 | CPR_DEBUG6))
			prom_printf("ndvram size = %d\n", ndvram);

		/*
		 * estimate 1 cpd_t for every (CPR_MAXCONTIG / 2) pages
		 */
		npages = cpr_count_kpages(REGULAR_BITMAP, cpr_nobit);
		cpd_size = sizeof (cpd_t) * (npages / (CPR_MAXCONTIG / 2));
		raw_data = cpd_size + cpr_bitmap_size;
		ksize = ndvram + mmu_ptob(npages);

		est_fmt = "%s estimated size with "
		    "%scompression %lld, ksize %lld\n";
		nback = mmu_ptob(STAT->cs_nosw_pages);
		if (CPR->c_flags & C_COMPRESSING) {
			size = ((ksize * COMPRESS_PERCENT) / INTEGRAL) +
			    raw_data + ((nback * 10) / UCOMP_RATE);
			CPR_DEBUG(CPR_DEBUG1, est_fmt, str, "", size, ksize);
		} else {
			size = ksize + raw_data + nback;
			CPR_DEBUG(CPR_DEBUG1, est_fmt, str, "no ",
			    size, ksize);
		}
	}

	/*
	 * All this is much simpler for a block device
	 */
	if (vp->v_type == VBLK) {
		space = cpr_get_devsize(vp->v_rdev);
		if (cpr_debug & (CPR_DEBUG1 | CPR_DEBUG6))
			prom_printf("statefile dev size %lu\n", space);

		/*
		 * Export the estimated filesize info, this value will be
		 * compared before dumping out the statefile in the case of
		 * no compression.
		 */
		STAT->cs_est_statefsz = size;
		if (cpr_debug & (CPR_DEBUG1 | CPR_DEBUG6))
			prom_printf("%s Estimated statefile size %llu, "
			    "space %lu\n", str, size, space);
		if (size > space) {
			cpr_err(CE_CONT, "Statefile partition too small.");
			return (ENOMEM);
		}
		return (0);
	} else {
		if (CPR->c_alloc_cnt++ > C_MAX_ALLOC_RETRY) {
			cpr_err(CE_CONT, "Statefile allocation retry failed\n");
			return (ENOMEM);
		}

		/*
		 * Estimate space needed for the state file.
		 *
		 * State file size in bytes:
		 * 	kernel size + non-cache pte seg +
		 *	bitmap size + cpr state file headers size
		 * (round up to fs->fs_bsize)
		 */
		size = blkroundup(ip->i_fs, size);

		/*
		 * Export the estimated filesize info, this value will be
		 * compared before dumping out the statefile in the case of
		 * no compression.
		 */
		STAT->cs_est_statefsz = size;
		error = cpr_grow_statefile(vp, size);
		if (cpr_debug & (CPR_DEBUG1 | CPR_DEBUG6)) {
			rw_enter(&ip->i_contents, RW_READER);
			isize = ip->i_size;
			rw_exit(&ip->i_contents);
			prom_printf("%s Estimated statefile size %lld, "
			    "i_size %lld\n", str, size, isize);
		}

		return (error);
	}
}