Ejemplo n.º 1
0
static ino_t journal_findfile( void ) {
	struct ufs1_dinode *dp1;
	struct ufs2_dinode *dp2;
	ino_t ino;
	int mode;
	void *ip;
	int i;
	if ( getino( &disk, &ip, ROOTINO, &mode ) != 0 ) {
		warn( "Failed to get root inode" );
		return ( -1 );
	}
	dp2 = ip;
	dp1 = ip;
	if ( sblock.fs_magic == FS_UFS1_MAGIC ) {
		if ( (off_t) dp1->di_size >= lblktosize( &sblock, NDADDR ) ) {
			warnx( "ROOTINO extends beyond direct blocks." );
			return ( -1 );
		}
		for ( i = 0; i < NDADDR; i++ ) {
			if ( dp1->di_db[i] == 0 ) break;
			if ( ( ino = dir_search( dp1->di_db[i], sblksize( &sblock, (off_t) dp1->di_size, i ) ) ) != 0 ) return ( ino );
		}
	} else {
		if ( (off_t) dp2->di_size >= lblktosize( &sblock, NDADDR ) ) {
			warnx( "ROOTINO extends beyond direct blocks." );
			return ( -1 );
		}
		for ( i = 0; i < NDADDR; i++ ) {
			if ( dp2->di_db[i] == 0 ) break;
			if ( ( ino = dir_search( dp2->di_db[i], sblksize( &sblock, (off_t) dp2->di_size, i ) ) ) != 0 ) return ( ino );
		}
	}
	return ( 0 );
}
Ejemplo n.º 2
0
int
wapbl_allocate_log_file(struct mount *mp, struct vnode *vp,
    daddr_t *startp, size_t *countp, uint64_t *extradatap)
{
	struct ufsmount *ump = VFSTOUFS(mp);
	struct fs *fs = ump->um_fs;
	struct inode *ip = VTOI(vp);
	daddr_t addr, indir_addr;
	off_t logsize;
	size_t size;
	int error;

	logsize = 0;
	/* check if there's a suggested log size */
	if (fs->fs_journal_flags & UFS_WAPBL_FLAGS_CREATE_LOG &&
	    fs->fs_journal_location == UFS_WAPBL_JOURNALLOC_IN_FILESYSTEM)
		logsize = fs->fs_journallocs[UFS_WAPBL_INFS_COUNT];

	if (DIP(ip, size) > 0) {
		printf("%s: file size (%lld) non zero\n", __func__,
		    DIP(ip, size));
		return EEXIST;
	}
	wapbl_find_log_start(mp, vp, logsize, &addr, &indir_addr, &size);
	if (addr == 0) {
		printf("%s: log not allocated, largest extent is "
		    "%lldMB\n", __func__,
		    lblktosize(fs, size) / (1024 * 1024));
		return ENOSPC;
	}

	logsize = lblktosize(fs, size);	/* final log size */

	VTOI(vp)->i_ffs_first_data_blk = addr;
	VTOI(vp)->i_ffs_first_indir_blk = indir_addr;

	error = ufs_gop_alloc(vp, 0, logsize, B_CONTIG, curproc->p_ucred);
	if (error) {
		printf("%s: ufs_gop_alloc error %d\n", __func__, error);
		return error;
	}

	*startp     = fsbtodb(fs, addr);
	*countp     = btodb(logsize);
	*extradatap = VTOI(vp)->i_number;

	return 0;
}
Ejemplo n.º 3
0
static int
udf_read(struct vop_read_args *ap)
{
	struct vnode *vp = ap->a_vp;
	struct uio *uio = ap->a_uio;
	struct udf_node *node = VTON(vp);
	struct udf_mnt *udfmp;
	struct file_entry *fentry;
	struct buf *bp;
	uint8_t *data;
	daddr_t lbn, rablock;
	off_t diff, fsize;
	ssize_t n;
	int error = 0;
	long size, on;

	if (uio->uio_resid == 0)
		return (0);
	if (uio->uio_offset < 0)
		return (EINVAL);

	if (is_data_in_fentry(node)) {
		fentry = node->fentry;
		data = &fentry->data[le32toh(fentry->l_ea)];
		fsize = le32toh(fentry->l_ad);

		n = uio->uio_resid;
		diff = fsize - uio->uio_offset;
		if (diff <= 0)
			return (0);
		if (diff < n)
			n = diff;
		error = uiomove(data + uio->uio_offset, (int)n, uio);
		return (error);
	}

	fsize = le64toh(node->fentry->inf_len);
	udfmp = node->udfmp;
	do {
		lbn = lblkno(udfmp, uio->uio_offset);
		on = blkoff(udfmp, uio->uio_offset);
		n = min((u_int)(udfmp->bsize - on),
			uio->uio_resid);
		diff = fsize - uio->uio_offset;
		if (diff <= 0)
			return (0);
		if (diff < n)
			n = diff;
		size = udfmp->bsize;
		rablock = lbn + 1;
		if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) {
			if (lblktosize(udfmp, rablock) < fsize) {
				error = cluster_read(vp, fsize, lbn, size,
				    NOCRED, uio->uio_resid,
				    (ap->a_ioflag >> 16), 0, &bp);
			} else {
				error = bread(vp, lbn, size, NOCRED, &bp);
			}
		} else {
Ejemplo n.º 4
0
static int
marshal(const char *name)
{
	struct fs *fs;

	fs = &disk.d_fs;

	printf("# newfs command for %s (%s)\n", name, disk.d_name);
	printf("newfs ");
	if (fs->fs_volname[0] != '\0')
		printf("-L %s ", fs->fs_volname);
	printf("-O %d ", disk.d_ufs);
	if (fs->fs_flags & FS_DOSOFTDEP)
		printf("-U ");
	printf("-a %d ", fs->fs_maxcontig);
	printf("-b %d ", fs->fs_bsize);
	/* -c is dumb */
	printf("-d %d ", fs->fs_maxbsize);
	printf("-e %d ", fs->fs_maxbpg);
	printf("-f %d ", fs->fs_fsize);
	printf("-g %d ", fs->fs_avgfilesize);
	printf("-h %d ", fs->fs_avgfpdir);
	printf("-i %jd ", fragroundup(fs, lblktosize(fs, fragstoblks(fs,
	    fs->fs_fpg)) / fs->fs_ipg));
	if (fs->fs_flags & FS_SUJ)
		printf("-j ");
	if (fs->fs_flags & FS_GJOURNAL)
		printf("-J ");
	printf("-k %jd ", fs->fs_metaspace);
	if (fs->fs_flags & FS_MULTILABEL)
		printf("-l ");
	printf("-m %d ", fs->fs_minfree);
	/* -n unimplemented */
	printf("-o ");
	switch (fs->fs_optim) {
	case FS_OPTSPACE:
		printf("space ");
		break;
	case FS_OPTTIME:
		printf("time ");
		break;
	default:
		printf("unknown ");
		break;
	}
	/* -p..r unimplemented */
	printf("-s %jd ", (intmax_t)fsbtodb(fs, fs->fs_size));
	if (fs->fs_flags & FS_TRIM)
		printf("-t ");
	printf("%s ", disk.d_name);
	printf("\n");

	return 0;
}
Ejemplo n.º 5
0
static int
ffs_balloc_ufs1(struct inode *ip, off_t offset, int bufsize, struct buf **bpp)
{
	makefs_daddr_t lbn, lastlbn;
	int size;
	int32_t nb;
	struct buf *bp, *nbp;
	struct fs *fs = ip->i_fs;
	struct indir indirs[UFS_NIADDR + 2];
	makefs_daddr_t newb, pref;
	int32_t *bap;
	int osize, nsize, num, i, error;
	int32_t *allocblk, allociblk[UFS_NIADDR + 1];
	int32_t *allocib;
	const int needswap = UFS_FSNEEDSWAP(fs);

	lbn = lblkno(fs, offset);
	size = blkoff(fs, offset) + bufsize;
	if (bpp != NULL) {
		*bpp = NULL;
	}

	assert(size <= fs->fs_bsize);
	if (lbn < 0)
		return (EFBIG);

	/*
	 * If the next write will extend the file into a new block,
	 * and the file is currently composed of a fragment
	 * this fragment has to be extended to be a full block.
	 */

	lastlbn = lblkno(fs, ip->i_ffs1_size);
	if (lastlbn < UFS_NDADDR && lastlbn < lbn) {
		nb = lastlbn;
		osize = blksize(fs, ip, nb);
		if (osize < fs->fs_bsize && osize > 0) {
			warnx("need to ffs_realloccg; not supported!");
			abort();
		}
	}

	/*
	 * The first UFS_NDADDR blocks are direct blocks
	 */

	if (lbn < UFS_NDADDR) {
		nb = ufs_rw32(ip->i_ffs1_db[lbn], needswap);
		if (nb != 0 && ip->i_ffs1_size >=
		    (uint64_t)lblktosize(fs, lbn + 1)) {

			/*
			 * The block is an already-allocated direct block
			 * and the file already extends past this block,
			 * thus this must be a whole block.
			 * Just read the block (if requested).
			 */

			if (bpp != NULL) {
				error = bread(ip->i_devvp, lbn, fs->fs_bsize,
				    NULL, bpp);
				if (error) {
					brelse(*bpp);
					return (error);
				}
			}
			return (0);
		}
		if (nb != 0) {

			/*
			 * Consider need to reallocate a fragment.
			 */

			osize = fragroundup(fs, blkoff(fs, ip->i_ffs1_size));
			nsize = fragroundup(fs, size);
			if (nsize <= osize) {

				/*
				 * The existing block is already
				 * at least as big as we want.
				 * Just read the block (if requested).
				 */

				if (bpp != NULL) {
					error = bread(ip->i_devvp, lbn, osize,
					    NULL, bpp);
					if (error) {
						brelse(*bpp);
						return (error);
					}
				}
				return 0;
			} else {
				warnx("need to ffs_realloccg; not supported!");
				abort();
			}
		} else {

			/*
			 * the block was not previously allocated,
			 * allocate a new block or fragment.
			 */

			if (ip->i_ffs1_size < (uint64_t)lblktosize(fs, lbn + 1))
				nsize = fragroundup(fs, size);
			else
				nsize = fs->fs_bsize;
			error = ffs_alloc(ip, lbn,
			    ffs_blkpref_ufs1(ip, lbn, (int)lbn,
				&ip->i_ffs1_db[0]),
				nsize, &newb);
			if (error)
				return (error);
			if (bpp != NULL) {
				bp = getblk(ip->i_devvp, lbn, nsize, 0, 0, 0);
				bp->b_blkno = fsbtodb(fs, newb);
				clrbuf(bp);
				*bpp = bp;
			}
		}
		ip->i_ffs1_db[lbn] = ufs_rw32((int32_t)newb, needswap);
		return (0);
	}

	/*
	 * Determine the number of levels of indirection.
	 */

	pref = 0;
	if ((error = ufs_getlbns(ip, lbn, indirs, &num)) != 0)
		return (error);

	if (num < 1) {
		warnx("ffs_balloc: ufs_getlbns returned indirect block");
		abort();
	}

	/*
	 * Fetch the first indirect block allocating if necessary.
	 */

	--num;
	nb = ufs_rw32(ip->i_ffs1_ib[indirs[0].in_off], needswap);
	allocib = NULL;
	allocblk = allociblk;
	if (nb == 0) {
		pref = ffs_blkpref_ufs1(ip, lbn, 0, (int32_t *)0);
		error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, &newb);
		if (error)
			return error;
		nb = newb;
		*allocblk++ = nb;
		bp = getblk(ip->i_devvp, indirs[1].in_lbn, fs->fs_bsize, 0, 0, 0);
		bp->b_blkno = fsbtodb(fs, nb);
		clrbuf(bp);
		/*
		 * Write synchronously so that indirect blocks
		 * never point at garbage.
		 */
		if ((error = bwrite(bp)) != 0)
			return error;
		allocib = &ip->i_ffs1_ib[indirs[0].in_off];
		*allocib = ufs_rw32((int32_t)nb, needswap);
	}

	/*
	 * Fetch through the indirect blocks, allocating as necessary.
	 */

	for (i = 1;;) {
		error = bread(ip->i_devvp, indirs[i].in_lbn, fs->fs_bsize,
		    NULL, &bp);
		if (error) {
			brelse(bp);
			return error;
		}
		bap = (int32_t *)bp->b_data;
		nb = ufs_rw32(bap[indirs[i].in_off], needswap);
		if (i == num)
			break;
		i++;
		if (nb != 0) {
			brelse(bp);
			continue;
		}
		if (pref == 0)
			pref = ffs_blkpref_ufs1(ip, lbn, 0, (int32_t *)0);
		error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, &newb);
		if (error) {
			brelse(bp);
			return error;
		}
		nb = newb;
		*allocblk++ = nb;
		nbp = getblk(ip->i_devvp, indirs[i].in_lbn, fs->fs_bsize, 0, 0, 0);
		nbp->b_blkno = fsbtodb(fs, nb);
		clrbuf(nbp);
		/*
		 * Write synchronously so that indirect blocks
		 * never point at garbage.
		 */

		if ((error = bwrite(nbp)) != 0) {
			brelse(bp);
			return error;
		}
		bap[indirs[i - 1].in_off] = ufs_rw32(nb, needswap);

		bwrite(bp);
	}

	/*
	 * Get the data block, allocating if necessary.
	 */

	if (nb == 0) {
		pref = ffs_blkpref_ufs1(ip, lbn, indirs[num].in_off, &bap[0]);
		error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, &newb);
		if (error) {
			brelse(bp);
			return error;
		}
		nb = newb;
		*allocblk++ = nb;
		if (bpp != NULL) {
			nbp = getblk(ip->i_devvp, lbn, fs->fs_bsize, 0, 0, 0);
			nbp->b_blkno = fsbtodb(fs, nb);
			clrbuf(nbp);
			*bpp = nbp;
		}
		bap[indirs[num].in_off] = ufs_rw32(nb, needswap);

		/*
		 * If required, write synchronously, otherwise use
		 * delayed write.
		 */
		bwrite(bp);
		return (0);
	}
	brelse(bp);
	if (bpp != NULL) {
		error = bread(ip->i_devvp, lbn, (int)fs->fs_bsize, NULL, &nbp);
		if (error) {
			brelse(nbp);
			return error;
		}
		*bpp = nbp;
	}
	return (0);
}
Ejemplo n.º 6
0
/*
 * Insert the journal file into the ROOTINO directory.  We always extend the
 * last frag
 */
static int
journal_insertfile(ino_t ino)
{
	struct ufs1_dinode *dp1;
	struct ufs2_dinode *dp2;
	void *ip;
	ufs2_daddr_t nblk;
	ufs2_daddr_t blk;
	ufs_lbn_t lbn;
	int size;
	int mode;
	int off;

	if (getino(&disk, &ip, ROOTINO, &mode) != 0) {
		warn("Failed to get root inode");
		sbdirty();
		return (-1);
	}
	dp2 = ip;
	dp1 = ip;
	blk = 0;
	size = 0;
	nblk = journal_balloc();
	if (nblk <= 0)
		return (-1);
	/*
	 * For simplicity sake we aways extend the ROOTINO into a new
	 * directory block rather than searching for space and inserting
	 * into an existing block.  However, if the rootino has frags
	 * have to free them and extend the block.
	 */
	if (sblock.fs_magic == FS_UFS1_MAGIC) {
		lbn = lblkno(&sblock, dp1->di_size);
		off = blkoff(&sblock, dp1->di_size);
		blk = dp1->di_db[lbn];
		size = sblksize(&sblock, (off_t)dp1->di_size, lbn);
	} else {
		lbn = lblkno(&sblock, dp2->di_size);
		off = blkoff(&sblock, dp2->di_size);
		blk = dp2->di_db[lbn];
		size = sblksize(&sblock, (off_t)dp2->di_size, lbn);
	}
	if (off != 0) {
		if (dir_extend(blk, nblk, off, ino) == -1)
			return (-1);
	} else {
		blk = 0;
		if (dir_insert(nblk, 0, ino) == -1)
			return (-1);
	}
	if (sblock.fs_magic == FS_UFS1_MAGIC) {
		dp1->di_blocks += (sblock.fs_bsize - size) / DEV_BSIZE;
		dp1->di_db[lbn] = nblk;
		dp1->di_size = lblktosize(&sblock, lbn+1);
	} else {
		dp2->di_blocks += (sblock.fs_bsize - size) / DEV_BSIZE;
		dp2->di_db[lbn] = nblk;
		dp2->di_size = lblktosize(&sblock, lbn+1);
	}
	if (putino(&disk) < 0) {
		warn("Failed to write root inode");
		return (-1);
	}
	if (cgwrite(&disk) < 0) {
		warn("Failed to write updated cg");
		sbdirty();
		return (-1);
	}
	if (blk) {
		if (cgbfree(&disk, blk, size) < 0) {
			warn("Failed to write cg");
			return (-1);
		}
	}

	return (0);
}
Ejemplo n.º 7
0
/*
 * Find a suitable location for the journal in the filesystem.
 *
 * Our strategy here is to look for a contiguous block of free space
 * at least "logfile" MB in size (plus room for any indirect blocks).
 * We start at the middle of the filesystem and check each cylinder
 * group working outwards.  If "logfile" MB is not available as a
 * single contigous chunk, then return the address and size of the
 * largest chunk found.
 *
 * XXX 
 * At what stage does the search fail?  Is if the largest space we could
 * find is less than a quarter the requested space reasonable?  If the
 * search fails entirely, return a block address if "0" it indicate this.
 */
void
wapbl_find_log_start(struct mount *mp, struct vnode *vp, off_t logsize,
    daddr_t *addr, daddr_t *indir_addr, size_t *size)
{
	struct ufsmount *ump = VFSTOUFS(mp);
	struct fs *fs = ump->um_fs;
	struct vnode *devvp = ump->um_devvp;
	struct cg *cgp;
	struct buf *bp;
	uint8_t *blksfree;
	daddr_t blkno, best_addr, start_addr;
	daddr_t desired_blks, min_desired_blks;
	daddr_t freeblks, best_blks;
	int bpcg, cg, error, fixedsize, indir_blks, n, s;
#ifdef FFS_EI
	const int needswap = UFS_FSNEEDSWAP(fs);
#endif

	if (logsize == 0) {
		fixedsize = 0;	/* We can adjust the size if tight */
		logsize = lfragtosize(fs, fs->fs_dsize) /
		    UFS_WAPBL_JOURNAL_SCALE;
		DPRINTF("suggested log size = %lld\n", logsize);
		logsize = max(logsize, UFS_WAPBL_MIN_JOURNAL_SIZE);
		logsize = min(logsize, UFS_WAPBL_MAX_JOURNAL_SIZE);
		DPRINTF("adjusted log size = %lld\n", logsize);
	} else {
		fixedsize = 1;
		DPRINTF("fixed log size = %lld\n", logsize);
	}

	desired_blks = logsize / fs->fs_bsize;
	DPRINTF("desired blocks = %lld\n", desired_blks);

	/* add in number of indirect blocks needed */
	indir_blks = 0;
	if (desired_blks >= NDADDR) {
		struct indir indirs[NIADDR + 2];
		int num;

		error = ufs_getlbns(vp, desired_blks, indirs, &num);
		if (error) {
			printf("%s: ufs_getlbns failed, error %d!\n",
			    __func__, error);
			goto bad;
		}

		switch (num) {
		case 2:
			indir_blks = 1;		/* 1st level indirect */
			break;
		case 3:
			indir_blks = 1 +	/* 1st level indirect */
			    1 +			/* 2nd level indirect */
			    indirs[1].in_off + 1; /* extra 1st level indirect */
			break;
		default:
			printf("%s: unexpected numlevels %d from ufs_getlbns\n",
			    __func__, num);
			*size = 0;
			goto bad;
		}
		desired_blks += indir_blks;
	}
	DPRINTF("desired blocks = %lld (including indirect)\n",
	    desired_blks);

	/*
	 * If a specific size wasn't requested, allow for a smaller log
	 * if we're really tight for space...
	 */
	min_desired_blks = desired_blks;
	if (!fixedsize)
		min_desired_blks = desired_blks / 4;

	/* Look at number of blocks per CG.  If it's too small, bail early. */
	bpcg = fragstoblks(fs, fs->fs_fpg);
	if (min_desired_blks > bpcg) {
		printf("ffs_wapbl: cylinder group size of %lld MB "
		    " is not big enough for journal\n",
		    lblktosize(fs, bpcg) / (1024 * 1024));
		goto bad;
	}

	/*
	 * Start with the middle cylinder group, and search outwards in
	 * both directions until we either find the requested log size
	 * or reach the start/end of the file system.  If we reach the
	 * start/end without finding enough space for the full requested
	 * log size, use the largest extent found if it is large enough
	 * to satisfy the our minimum size.
	 *
	 * XXX
	 * Can we just use the cluster contigsum stuff (esp on UFS2)
	 * here to simplify this search code?
	 */
	best_addr = 0;
	best_blks = 0;
	for (cg = fs->fs_ncg / 2, s = 0, n = 1;
	    best_blks < desired_blks && cg >= 0 && cg < fs->fs_ncg;
	    s++, n = -n, cg += n * s) {
		DPRINTF("check cg %d of %d\n", cg, fs->fs_ncg);
		error = bread(devvp, fsbtodb(fs, cgtod(fs, cg)),
		    fs->fs_cgsize, &bp);
		if (error) {
			continue;
		}
		cgp = (struct cg *)bp->b_data;
		if (!cg_chkmagic(cgp)) {
			brelse(bp);
			continue;
		}

		blksfree = cg_blksfree(cgp);

		for (blkno = 0; blkno < bpcg;) {
			/* look for next free block */
			/* XXX use scanc() and fragtbl[] here? */
			for (; blkno < bpcg - min_desired_blks; blkno++)
				if (ffs_isblock(fs, blksfree, blkno))
					break;

			/* past end of search space in this CG? */
			if (blkno >= bpcg - min_desired_blks)
				break;

			/* count how many free blocks in this extent */
			start_addr = blkno;
			for (freeblks = 0; blkno < bpcg; blkno++, freeblks++)
				if (!ffs_isblock(fs, blksfree, blkno))
					break;

			if (freeblks > best_blks) {
				best_blks = freeblks;
				best_addr = blkstofrags(fs, start_addr) +
				    cgbase(fs, cg);

				if (freeblks >= desired_blks) {
					DPRINTF("found len %lld"
					    " at offset %lld in gc\n",
					    freeblks, start_addr);
					break;
				}
			}
		}
		brelse(bp);
	}
	DPRINTF("best found len = %lld, wanted %lld"
	    " at addr %lld\n", best_blks, desired_blks, best_addr);

	if (best_blks < min_desired_blks) {
		*addr = 0;
		*indir_addr = 0;
	} else {
		/* put indirect blocks at start, and data blocks after */
		*addr = best_addr + blkstofrags(fs, indir_blks);
		*indir_addr = best_addr;
	}
	*size = min(desired_blks, best_blks) - indir_blks;
	return;

bad:
	*addr = 0;
	*indir_addr = 0;
	*size = 0;
	return;
}
Ejemplo n.º 8
0
/* ARGSUSED */
int
ext2fs_read(void *v)
{
	struct vop_read_args *ap = v;
	struct vnode *vp;
	struct inode *ip;
	struct uio *uio;
	struct m_ext2fs *fs;
	struct buf *bp;
	daddr_t lbn, nextlbn;
	off_t bytesinfile;
	long size, xfersize, blkoffset;
	int error;

	vp = ap->a_vp;
	ip = VTOI(vp);
	uio = ap->a_uio;

#ifdef DIAGNOSTIC
	if (uio->uio_rw != UIO_READ)
		panic("%s: mode", "ext2fs_read");

	if (vp->v_type == VLNK) {
		if ((int)ext2fs_size(ip) < vp->v_mount->mnt_maxsymlinklen ||
			(vp->v_mount->mnt_maxsymlinklen == 0 &&
			 ip->i_e2fs_nblock == 0))
			panic("%s: short symlink", "ext2fs_read");
	} else if (vp->v_type != VREG && vp->v_type != VDIR)
		panic("%s: type %d", "ext2fs_read", vp->v_type);
#endif
	fs = ip->i_e2fs;
	if ((u_int64_t)uio->uio_offset >
		((u_int64_t)0x80000000 * fs->e2fs_bsize - 1))
		return (EFBIG);
	if (uio->uio_resid == 0)
		return (0);

	for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
		if ((bytesinfile = ext2fs_size(ip) - uio->uio_offset) <= 0)
			break;
		lbn = lblkno(fs, uio->uio_offset);
		nextlbn = lbn + 1;
		size = fs->e2fs_bsize;
		blkoffset = blkoff(fs, uio->uio_offset);
		xfersize = fs->e2fs_bsize - blkoffset;
		if (uio->uio_resid < xfersize)
			xfersize = uio->uio_resid;
		if (bytesinfile < xfersize)
			xfersize = bytesinfile;

		if (lblktosize(fs, nextlbn) >= ext2fs_size(ip))
			error = bread(vp, lbn, size, &bp);
		else if (lbn - 1 == ip->i_ci.ci_lastr) {
			int nextsize = fs->e2fs_bsize;
			error = breadn(vp, lbn, size, &nextlbn, &nextsize,
			    1, &bp);
		} else
			error = bread(vp, lbn, size, &bp);
		if (error)
			break;
		ip->i_ci.ci_lastr = lbn;

		/*
		 * We should only get non-zero b_resid when an I/O error
		 * has occurred, which should cause us to break above.
		 * However, if the short read did not cause an error,
		 * then we want to ensure that we do not uiomove bad
		 * or uninitialized data.
		 */
		size -= bp->b_resid;
		if (size < xfersize) {
			if (size == 0)
				break;
			xfersize = size;
		}
		error = uiomove((char *)bp->b_data + blkoffset, xfersize, uio);
		if (error)
			break;
		brelse(bp);
	}
	if (bp != NULL)
		brelse(bp);

	if (!(vp->v_mount->mnt_flag & MNT_NOATIME)) {
		ip->i_flag |= IN_ACCESS;
	}
	return (error);
}
Ejemplo n.º 9
0
/*
 * Truncate the inode oip to at most length size, freeing the
 * disk blocks.
 */
int
ffs_truncate(struct vnode *ovp, off_t length, int ioflag, kauth_cred_t cred)
{
	daddr_t lastblock;
	struct inode *oip = VTOI(ovp);
	daddr_t bn, lastiblock[NIADDR], indir_lbn[NIADDR];
	daddr_t blks[NDADDR + NIADDR];
	struct fs *fs;
	int offset, pgoffset, level;
	int64_t count, blocksreleased = 0;
	int i, aflag, nblocks;
	int error, allerror = 0;
	off_t osize;
	int sync;
	struct ufsmount *ump = oip->i_ump;

	if (ovp->v_type == VCHR || ovp->v_type == VBLK ||
	    ovp->v_type == VFIFO || ovp->v_type == VSOCK) {
		KASSERT(oip->i_size == 0);
		return 0;
	}

	if (length < 0)
		return (EINVAL);

	if (ovp->v_type == VLNK &&
	    (oip->i_size < ump->um_maxsymlinklen ||
	     (ump->um_maxsymlinklen == 0 && DIP(oip, blocks) == 0))) {
		KDASSERT(length == 0);
		memset(SHORTLINK(oip), 0, (size_t)oip->i_size);
		oip->i_size = 0;
		DIP_ASSIGN(oip, size, 0);
		oip->i_flag |= IN_CHANGE | IN_UPDATE;
		return (ffs_update(ovp, NULL, NULL, 0));
	}
	if (oip->i_size == length) {
		/* still do a uvm_vnp_setsize() as writesize may be larger */
		uvm_vnp_setsize(ovp, length);
		oip->i_flag |= IN_CHANGE | IN_UPDATE;
		return (ffs_update(ovp, NULL, NULL, 0));
	}
	fs = oip->i_fs;
	if (length > ump->um_maxfilesize)
		return (EFBIG);

	if ((oip->i_flags & SF_SNAPSHOT) != 0)
		ffs_snapremove(ovp);

	osize = oip->i_size;
	aflag = ioflag & IO_SYNC ? B_SYNC : 0;

	/*
	 * Lengthen the size of the file. We must ensure that the
	 * last byte of the file is allocated. Since the smallest
	 * value of osize is 0, length will be at least 1.
	 */

	if (osize < length) {
		if (lblkno(fs, osize) < NDADDR &&
		    lblkno(fs, osize) != lblkno(fs, length) &&
		    blkroundup(fs, osize) != osize) {
			off_t eob;

			eob = blkroundup(fs, osize);
			uvm_vnp_setwritesize(ovp, eob);
			error = ufs_balloc_range(ovp, osize, eob - osize,
			    cred, aflag);
			if (error) {
				(void) ffs_truncate(ovp, osize,
				    ioflag & IO_SYNC, cred);
				return error;
			}
			if (ioflag & IO_SYNC) {
				mutex_enter(ovp->v_interlock);
				VOP_PUTPAGES(ovp,
				    trunc_page(osize & fs->fs_bmask),
				    round_page(eob), PGO_CLEANIT | PGO_SYNCIO |
				    PGO_JOURNALLOCKED);
			}
		}
		uvm_vnp_setwritesize(ovp, length);
		error = ufs_balloc_range(ovp, length - 1, 1, cred, aflag);
		if (error) {
			(void) ffs_truncate(ovp, osize, ioflag & IO_SYNC, cred);
			return (error);
		}
		uvm_vnp_setsize(ovp, length);
		oip->i_flag |= IN_CHANGE | IN_UPDATE;
		KASSERT(ovp->v_size == oip->i_size);
		return (ffs_update(ovp, NULL, NULL, 0));
	}

	/*
	 * When truncating a regular file down to a non-block-aligned size,
	 * we must zero the part of last block which is past the new EOF.
	 * We must synchronously flush the zeroed pages to disk
	 * since the new pages will be invalidated as soon as we
	 * inform the VM system of the new, smaller size.
	 * We must do this before acquiring the GLOCK, since fetching
	 * the pages will acquire the GLOCK internally.
	 * So there is a window where another thread could see a whole
	 * zeroed page past EOF, but that's life.
	 */

	offset = blkoff(fs, length);
	pgoffset = length & PAGE_MASK;
	if (ovp->v_type == VREG && (pgoffset != 0 || offset != 0) &&
	    osize > length) {
		daddr_t lbn;
		voff_t eoz;
		int size;

		if (offset != 0) {
			error = ufs_balloc_range(ovp, length - 1, 1, cred,
			    aflag);
			if (error)
				return error;
		}
		lbn = lblkno(fs, length);
		size = blksize(fs, oip, lbn);
		eoz = MIN(MAX(lblktosize(fs, lbn) + size, round_page(pgoffset)),
		    osize);
		ubc_zerorange(&ovp->v_uobj, length, eoz - length,
		    UBC_UNMAP_FLAG(ovp));
		if (round_page(eoz) > round_page(length)) {
			mutex_enter(ovp->v_interlock);
			error = VOP_PUTPAGES(ovp, round_page(length),
			    round_page(eoz),
			    PGO_CLEANIT | PGO_DEACTIVATE | PGO_JOURNALLOCKED |
			    ((ioflag & IO_SYNC) ? PGO_SYNCIO : 0));
			if (error)
				return error;
		}
	}

	genfs_node_wrlock(ovp);
	oip->i_size = length;
	DIP_ASSIGN(oip, size, length);
	uvm_vnp_setsize(ovp, length);
	/*
	 * Calculate index into inode's block list of
	 * last direct and indirect blocks (if any)
	 * which we want to keep.  Lastblock is -1 when
	 * the file is truncated to 0.
	 */
	lastblock = lblkno(fs, length + fs->fs_bsize - 1) - 1;
	lastiblock[SINGLE] = lastblock - NDADDR;
	lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs);
	lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs);
	nblocks = btodb(fs->fs_bsize);
	/*
	 * Update file and block pointers on disk before we start freeing
	 * blocks.  If we crash before free'ing blocks below, the blocks
	 * will be returned to the free list.  lastiblock values are also
	 * normalized to -1 for calls to ffs_indirtrunc below.
	 */
	sync = 0;
	for (level = TRIPLE; level >= SINGLE; level--) {
		blks[NDADDR + level] = DIP(oip, ib[level]);
		if (lastiblock[level] < 0 && blks[NDADDR + level] != 0) {
			sync = 1;
			DIP_ASSIGN(oip, ib[level], 0);
			lastiblock[level] = -1;
		}
	}
	for (i = 0; i < NDADDR; i++) {
		blks[i] = DIP(oip, db[i]);
		if (i > lastblock && blks[i] != 0) {
			sync = 1;
			DIP_ASSIGN(oip, db[i], 0);
		}
	}
	oip->i_flag |= IN_CHANGE | IN_UPDATE;
	if (sync) {
		error = ffs_update(ovp, NULL, NULL, UPDATE_WAIT);
		if (error && !allerror)
			allerror = error;
	}

	/*
	 * Having written the new inode to disk, save its new configuration
	 * and put back the old block pointers long enough to process them.
	 * Note that we save the new block configuration so we can check it
	 * when we are done.
	 */
	for (i = 0; i < NDADDR; i++) {
		bn = DIP(oip, db[i]);
		DIP_ASSIGN(oip, db[i], blks[i]);
		blks[i] = bn;
	}
	for (i = 0; i < NIADDR; i++) {
		bn = DIP(oip, ib[i]);
		DIP_ASSIGN(oip, ib[i], blks[NDADDR + i]);
		blks[NDADDR + i] = bn;
	}

	oip->i_size = osize;
	DIP_ASSIGN(oip, size, osize);
	error = vtruncbuf(ovp, lastblock + 1, 0, 0);
	if (error && !allerror)
		allerror = error;

	/*
	 * Indirect blocks first.
	 */
	indir_lbn[SINGLE] = -NDADDR;
	indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1;
	indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1;
	for (level = TRIPLE; level >= SINGLE; level--) {
		if (oip->i_ump->um_fstype == UFS1)
			bn = ufs_rw32(oip->i_ffs1_ib[level],UFS_FSNEEDSWAP(fs));
		else
			bn = ufs_rw64(oip->i_ffs2_ib[level],UFS_FSNEEDSWAP(fs));
		if (bn != 0) {
			error = ffs_indirtrunc(oip, indir_lbn[level],
			    fsbtodb(fs, bn), lastiblock[level], level, &count);
			if (error)
				allerror = error;
			blocksreleased += count;
			if (lastiblock[level] < 0) {
				DIP_ASSIGN(oip, ib[level], 0);
				if (oip->i_ump->um_mountp->mnt_wapbl) {
					UFS_WAPBL_REGISTER_DEALLOCATION(
					    oip->i_ump->um_mountp,
					    fsbtodb(fs, bn), fs->fs_bsize);
				} else
					ffs_blkfree(fs, oip->i_devvp, bn,
					    fs->fs_bsize, oip->i_number);
				blocksreleased += nblocks;
			}
		}
		if (lastiblock[level] >= 0)
			goto done;
	}

	/*
	 * All whole direct blocks or frags.
	 */
	for (i = NDADDR - 1; i > lastblock; i--) {
		long bsize;

		if (oip->i_ump->um_fstype == UFS1)
			bn = ufs_rw32(oip->i_ffs1_db[i], UFS_FSNEEDSWAP(fs));
		else
			bn = ufs_rw64(oip->i_ffs2_db[i], UFS_FSNEEDSWAP(fs));
		if (bn == 0)
			continue;
		DIP_ASSIGN(oip, db[i], 0);
		bsize = blksize(fs, oip, i);
		if ((oip->i_ump->um_mountp->mnt_wapbl) &&
		    (ovp->v_type != VREG)) {
			UFS_WAPBL_REGISTER_DEALLOCATION(oip->i_ump->um_mountp,
			    fsbtodb(fs, bn), bsize);
		} else
			ffs_blkfree(fs, oip->i_devvp, bn, bsize, oip->i_number);
		blocksreleased += btodb(bsize);
	}
	if (lastblock < 0)
		goto done;

	/*
	 * Finally, look for a change in size of the
	 * last direct block; release any frags.
	 */
	if (oip->i_ump->um_fstype == UFS1)
		bn = ufs_rw32(oip->i_ffs1_db[lastblock], UFS_FSNEEDSWAP(fs));
	else
		bn = ufs_rw64(oip->i_ffs2_db[lastblock], UFS_FSNEEDSWAP(fs));
	if (bn != 0) {
		long oldspace, newspace;

		/*
		 * Calculate amount of space we're giving
		 * back as old block size minus new block size.
		 */
		oldspace = blksize(fs, oip, lastblock);
		oip->i_size = length;
		DIP_ASSIGN(oip, size, length);
		newspace = blksize(fs, oip, lastblock);
		if (newspace == 0)
			panic("itrunc: newspace");
		if (oldspace - newspace > 0) {
			/*
			 * Block number of space to be free'd is
			 * the old block # plus the number of frags
			 * required for the storage we're keeping.
			 */
			bn += numfrags(fs, newspace);
			if ((oip->i_ump->um_mountp->mnt_wapbl) &&
			    (ovp->v_type != VREG)) {
				UFS_WAPBL_REGISTER_DEALLOCATION(
				    oip->i_ump->um_mountp, fsbtodb(fs, bn),
				    oldspace - newspace);
			} else
				ffs_blkfree(fs, oip->i_devvp, bn,
				    oldspace - newspace, oip->i_number);
			blocksreleased += btodb(oldspace - newspace);
		}
	}

done:
#ifdef DIAGNOSTIC
	for (level = SINGLE; level <= TRIPLE; level++)
		if (blks[NDADDR + level] != DIP(oip, ib[level]))
			panic("itrunc1");
	for (i = 0; i < NDADDR; i++)
		if (blks[i] != DIP(oip, db[i]))
			panic("itrunc2");
	if (length == 0 &&
	    (!LIST_EMPTY(&ovp->v_cleanblkhd) || !LIST_EMPTY(&ovp->v_dirtyblkhd)))
		panic("itrunc3");
#endif /* DIAGNOSTIC */
	/*
	 * Put back the real size.
	 */
	oip->i_size = length;
	DIP_ASSIGN(oip, size, length);
	DIP_ADD(oip, blocks, -blocksreleased);
	genfs_node_unlock(ovp);
	oip->i_flag |= IN_CHANGE;
	UFS_WAPBL_UPDATE(ovp, NULL, NULL, 0);
#if defined(QUOTA) || defined(QUOTA2)
	(void) chkdq(oip, -blocksreleased, NOCRED, 0);
#endif
	KASSERT(ovp->v_type != VREG || ovp->v_size == oip->i_size);
	return (allerror);
}
Ejemplo n.º 10
0
/*
 * Balloc defines the structure of file system storage
 * by allocating the physical blocks on a device given
 * the inode and the logical block number in a file.
 */
int
ffs1_balloc(struct inode *ip, off_t startoffset, int size, struct ucred *cred,
    int flags, struct buf **bpp)
{
	daddr_t lbn, nb, newb, pref;
	struct fs *fs;
	struct buf *bp, *nbp;
	struct vnode *vp;
	struct proc *p;
	struct indir indirs[NIADDR + 2];
	int32_t *bap;
	int deallocated, osize, nsize, num, i, error;
	int32_t *allocib, *blkp, *allocblk, allociblk[NIADDR+1];
	int unwindidx = -1;

	vp = ITOV(ip);
	fs = ip->i_fs;
	p = curproc;
	lbn = lblkno(fs, startoffset);
	size = blkoff(fs, startoffset) + size;
	if (size > fs->fs_bsize)
		panic("ffs1_balloc: blk too big");
	if (bpp != NULL)
		*bpp = NULL;
	if (lbn < 0)
		return (EFBIG);

	/*
	 * If the next write will extend the file into a new block,
	 * and the file is currently composed of a fragment
	 * this fragment has to be extended to be a full block.
	 */
	nb = lblkno(fs, ip->i_ffs1_size);
	if (nb < NDADDR && nb < lbn) {
		osize = blksize(fs, ip, nb);
		if (osize < fs->fs_bsize && osize > 0) {
			error = ffs_realloccg(ip, nb,
			    ffs1_blkpref(ip, nb, (int)nb, &ip->i_ffs1_db[0]),
			    osize, (int)fs->fs_bsize, cred, bpp, &newb);
			if (error)
				return (error);
			if (DOINGSOFTDEP(vp))
				softdep_setup_allocdirect(ip, nb, newb,
				    ip->i_ffs1_db[nb], fs->fs_bsize, osize,
				    bpp ? *bpp : NULL);

			ip->i_ffs1_size = lblktosize(fs, nb + 1);
			uvm_vnp_setsize(vp, ip->i_ffs1_size);
			ip->i_ffs1_db[nb] = newb;
			ip->i_flag |= IN_CHANGE | IN_UPDATE;
			if (bpp != NULL) {
				if (flags & B_SYNC)
					bwrite(*bpp);
				else
					bawrite(*bpp);
			}
		}
	}
	/*
	 * The first NDADDR blocks are direct blocks
	 */
	if (lbn < NDADDR) {
		nb = ip->i_ffs1_db[lbn];
		if (nb != 0 && ip->i_ffs1_size >= lblktosize(fs, lbn + 1)) {
			/*
			 * The block is an already-allocated direct block
			 * and the file already extends past this block,
			 * thus this must be a whole block.
			 * Just read the block (if requested).
			 */

			if (bpp != NULL) {
				error = bread(vp, lbn, fs->fs_bsize, bpp);
				if (error) {
					brelse(*bpp);
					return (error);
				}
			}
			return (0);
		}
		if (nb != 0) {
			/*
			 * Consider need to reallocate a fragment.
			 */
			osize = fragroundup(fs, blkoff(fs, ip->i_ffs1_size));
			nsize = fragroundup(fs, size);
			if (nsize <= osize) {
				/*
				 * The existing block is already
				 * at least as big as we want.
				 * Just read the block (if requested).
				 */
				if (bpp != NULL) {
					error = bread(vp, lbn, fs->fs_bsize,
					    bpp);
					if (error) {
						brelse(*bpp);
						return (error);
					}
					(*bpp)->b_bcount = osize;
				}
				return (0);
			} else {
				/*
				 * The existing block is smaller than we
				 * want, grow it.
				 */
				error = ffs_realloccg(ip, lbn,
				    ffs1_blkpref(ip, lbn, (int)lbn,
					&ip->i_ffs1_db[0]),
				    osize, nsize, cred, bpp, &newb);
				if (error)
					return (error);
				if (DOINGSOFTDEP(vp))
					softdep_setup_allocdirect(ip, lbn,
					    newb, nb, nsize, osize,
					    bpp ? *bpp : NULL);
			}
		} else {
			/*
			 * The block was not previously allocated,
			 * allocate a new block or fragment.
			 */

			if (ip->i_ffs1_size < lblktosize(fs, lbn + 1))
				nsize = fragroundup(fs, size);
			else
				nsize = fs->fs_bsize;
			error = ffs_alloc(ip, lbn,
			    ffs1_blkpref(ip, lbn, (int)lbn, &ip->i_ffs1_db[0]),
			    nsize, cred, &newb);
			if (error)
				return (error);
			if (bpp != NULL) {
				*bpp = getblk(vp, lbn, fs->fs_bsize, 0, 0);
				if (nsize < fs->fs_bsize)
					(*bpp)->b_bcount = nsize;
				(*bpp)->b_blkno = fsbtodb(fs, newb);
				if (flags & B_CLRBUF)
					clrbuf(*bpp);
			}
			if (DOINGSOFTDEP(vp))
				softdep_setup_allocdirect(ip, lbn, newb, 0,
				    nsize, 0, bpp ? *bpp : NULL);
		}
		ip->i_ffs1_db[lbn] = newb;
		ip->i_flag |= IN_CHANGE | IN_UPDATE;
		return (0);
	}

	/*
	 * Determine the number of levels of indirection.
	 */
	pref = 0;
	if ((error = ufs_getlbns(vp, lbn, indirs, &num)) != 0)
		return(error);
#ifdef DIAGNOSTIC
	if (num < 1)
		panic ("ffs1_balloc: ufs_bmaparray returned indirect block");
#endif
	/*
	 * Fetch the first indirect block allocating if necessary.
	 */
	--num;
	nb = ip->i_ffs1_ib[indirs[0].in_off];

	allocib = NULL;
	allocblk = allociblk;
	if (nb == 0) {
		pref = ffs1_blkpref(ip, lbn, -indirs[0].in_off - 1, NULL);
	        error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize,
				  cred, &newb);
		if (error)
			goto fail;
		nb = newb;

		*allocblk++ = nb;
		bp = getblk(vp, indirs[1].in_lbn, fs->fs_bsize, 0, 0);
		bp->b_blkno = fsbtodb(fs, nb);
		clrbuf(bp);

		if (DOINGSOFTDEP(vp)) {
			softdep_setup_allocdirect(ip, NDADDR + indirs[0].in_off,
			    newb, 0, fs->fs_bsize, 0, bp);
			bdwrite(bp);
		} else {
			/*
			 * Write synchronously so that indirect blocks
			 * never point at garbage.
			 */
			if ((error = bwrite(bp)) != 0)
				goto fail;
		}
		allocib = &ip->i_ffs1_ib[indirs[0].in_off];
		*allocib = nb;
		ip->i_flag |= IN_CHANGE | IN_UPDATE;
	}

	/*
	 * Fetch through the indirect blocks, allocating as necessary.
	 */
	for (i = 1;;) {
		error = bread(vp, indirs[i].in_lbn, (int)fs->fs_bsize, &bp);
		if (error) {
			brelse(bp);
			goto fail;
		}
		bap = (int32_t *)bp->b_data;
		nb = bap[indirs[i].in_off];
		if (i == num)
			break;
		i++;
		if (nb != 0) {
			brelse(bp);
			continue;
		}
		if (pref == 0)
			pref = ffs1_blkpref(ip, lbn, i - num - 1, NULL);
		error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, cred,
				  &newb);
		if (error) {
			brelse(bp);
			goto fail;
		}
		nb = newb;
		*allocblk++ = nb;
		nbp = getblk(vp, indirs[i].in_lbn, fs->fs_bsize, 0, 0);
		nbp->b_blkno = fsbtodb(fs, nb);
		clrbuf(nbp);

		if (DOINGSOFTDEP(vp)) {
			softdep_setup_allocindir_meta(nbp, ip, bp,
			    indirs[i - 1].in_off, nb);
			bdwrite(nbp);
		} else {
			/*
			 * Write synchronously so that indirect blocks
			 * never point at garbage.
			 */
			if ((error = bwrite(nbp)) != 0) {
				brelse(bp);
				goto fail;
			}
		}
		bap[indirs[i - 1].in_off] = nb;
		if (allocib == NULL && unwindidx < 0)
			unwindidx = i - 1;
		/*
		 * If required, write synchronously, otherwise use
		 * delayed write.
		 */
		if (flags & B_SYNC) {
			bwrite(bp);
		} else {
			bdwrite(bp);
		}
	}
	/*
	 * Get the data block, allocating if necessary.
	 */
	if (nb == 0) {
		pref = ffs1_blkpref(ip, lbn, indirs[i].in_off, &bap[0]);
		error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, cred,
				  &newb);
		if (error) {
			brelse(bp);
			goto fail;
		}
		nb = newb;
		*allocblk++ = nb;
		if (bpp != NULL) {
			nbp = getblk(vp, lbn, fs->fs_bsize, 0, 0);
			nbp->b_blkno = fsbtodb(fs, nb);
			if (flags & B_CLRBUF)
				clrbuf(nbp);
			*bpp = nbp;
		}
		if (DOINGSOFTDEP(vp))
			softdep_setup_allocindir_page(ip, lbn, bp,
			    indirs[i].in_off, nb, 0, bpp ? *bpp : NULL);
		bap[indirs[i].in_off] = nb;
		/*
		 * If required, write synchronously, otherwise use
		 * delayed write.
		 */
		if (flags & B_SYNC) {
			bwrite(bp);
		} else {
			bdwrite(bp);
		}
		return (0);
	}
	brelse(bp);
	if (bpp != NULL) {
		if (flags & B_CLRBUF) {
			error = bread(vp, lbn, (int)fs->fs_bsize, &nbp);
			if (error) {
				brelse(nbp);
				goto fail;
			}
		} else {
			nbp = getblk(vp, lbn, fs->fs_bsize, 0, 0);
			nbp->b_blkno = fsbtodb(fs, nb);
		}
		*bpp = nbp;
	}
	return (0);

fail:
	/*
	 * If we have failed to allocate any blocks, simply return the error.
	 * This is the usual case and avoids the need to fsync the file.
	 */
	if (allocblk == allociblk && allocib == NULL && unwindidx == -1)
		return (error);
	/*
	 * If we have failed part way through block allocation, we have to
	 * deallocate any indirect blocks that we have allocated. We have to
	 * fsync the file before we start to get rid of all of its
	 * dependencies so that we do not leave them dangling. We have to sync
	 * it at the end so that the softdep code does not find any untracked
	 * changes. Although this is really slow, running out of disk space is
	 * not expected to be a common occurrence. The error return from fsync
	 * is ignored as we already have an error to return to the user.
	 */
	VOP_FSYNC(vp, p->p_ucred, MNT_WAIT, p);
	for (deallocated = 0, blkp = allociblk; blkp < allocblk; blkp++) {
		ffs_blkfree(ip, *blkp, fs->fs_bsize);
		deallocated += fs->fs_bsize;
	}
	if (allocib != NULL) {
		*allocib = 0;
	} else if (unwindidx >= 0) {
		int r;

		r = bread(vp, indirs[unwindidx].in_lbn, (int)fs->fs_bsize, &bp);
		if (r)
			panic("Could not unwind indirect block, error %d", r);
		bap = (int32_t *)bp->b_data;
		bap[indirs[unwindidx].in_off] = 0;
		if (flags & B_SYNC) {
			bwrite(bp);
		} else {
			bdwrite(bp);
		}
	}
	if (deallocated) {
		/*
		 * Restore user's disk quota because allocation failed.
		 */
		(void)ufs_quota_free_blocks(ip, btodb(deallocated), cred);

		ip->i_ffs1_blocks -= btodb(deallocated);
		ip->i_flag |= IN_CHANGE | IN_UPDATE;
	}
	VOP_FSYNC(vp, p->p_ucred, MNT_WAIT, p);
	return (error);
}
Ejemplo n.º 11
0
/*
 * Extended attribute area reading.
 */
int
ffs_ea_read(struct vnode *vp, struct uio *uio, int ioflag)
{
	struct inode *ip;
	struct ufs2_dinode *dp;
	struct fs *fs;
	struct buf *bp;
	daddr64_t lbn, nextlbn;
	off_t ealeft;
	int error, size, xfersize;
	size_t oresid;

	ip = VTOI(vp);
	fs = ip->i_fs;
	dp = ip->i_din2;
	error = 0;
	oresid = uio->uio_resid;
	ealeft = dp->di_extsize;

	/*
	 * Loop over the amount of data requested by the caller, stopping only
	 * if an error occurs. By default, we always try to copy a file system
	 * block worth of bytes per iteration ('xfersize'). Check this value
	 * against what is left to be copied ('uio->uio_resid'), and the amount
	 * of bytes past our current position in the extended attribute area
	 * ('ealeft').
	 */

	while (uio->uio_resid > 0) {

		ealeft -= uio->uio_offset;
		if (ealeft <= 0)
			break;

		xfersize = fs->fs_bsize;
		if (uio->uio_resid < xfersize)
			xfersize = uio->uio_resid;
		if (ealeft < xfersize)
			xfersize = ealeft;

		/*
		 * Get the corresponding logical block number. Read it in,
		 * doing read-ahead if possible.
		 */

		lbn = lblkno(fs, uio->uio_offset);
		size = sblksize(fs, dp->di_extsize, lbn);
		nextlbn = lbn + 1;

		if (lblktosize(fs, nextlbn) >= dp->di_extsize)
			error = bread(vp, -1 - lbn, size, NOCRED, &bp);
		else {
			int nextsize = sblksize(fs, dp->di_extsize, nextlbn);
			nextlbn = -1 - nextlbn;
			error = breadn(vp, -1 - lbn,
			    size, &nextlbn, &nextsize, 1, NOCRED, &bp);
		}

		if (error) {
			brelse(bp);
			break;
		}

		/* Check for short-reads. */
		if (bp->b_resid) {
			brelse(bp);
			error = EIO;
			break;
		}

		/* Finally, copy out the data, and release the buffer. */
		error = uiomove(bp->b_data, xfersize, uio);
		brelse(bp);
		if (error)
			break;
	}

	if ((error == 0 || uio->uio_resid != oresid) &&
	    (vp->v_mount->mnt_flag & MNT_NOATIME) == 0)
		ip->i_flag |= IN_ACCESS;

	return (error);
}
Ejemplo n.º 12
0
/*
 * Vnode op for reading.
 */
int
cd9660_read(void *v)
{
	struct vop_read_args *ap = v;
	struct vnode *vp = ap->a_vp;
	register struct uio *uio = ap->a_uio;
	register struct iso_node *ip = VTOI(vp);
	register struct iso_mnt *imp;
	struct buf *bp;
	daddr_t lbn, rablock;
	off_t diff;
	int error = 0;
	long size, n, on;

	if (uio->uio_resid == 0)
		return (0);
	if (uio->uio_offset < 0)
		return (EINVAL);
	ip->i_flag |= IN_ACCESS;
	imp = ip->i_mnt;
	do {
		struct cluster_info *ci = &ip->i_ci;

		lbn = lblkno(imp, uio->uio_offset);
		on = blkoff(imp, uio->uio_offset);
		n = min((u_int)(imp->logical_block_size - on),
			uio->uio_resid);
		diff = (off_t)ip->i_size - uio->uio_offset;
		if (diff <= 0)
			return (0);
		if (diff < n)
			n = diff;
		size = blksize(imp, ip, lbn);
		rablock = lbn + 1;
#define MAX_RA 32
		if (ci->ci_lastr + 1 == lbn) {
			struct ra {
				daddr_t blks[MAX_RA];
				int sizes[MAX_RA];
			} *ra;
			int i;

			ra = malloc(sizeof *ra, M_TEMP, M_WAITOK);
			for (i = 0; i < MAX_RA &&
			    lblktosize(imp, (rablock + i)) < ip->i_size;
			    i++) {
				ra->blks[i] = rablock + i;
				ra->sizes[i] = blksize(imp, ip, rablock + i);
			}
			error = breadn(vp, lbn, size, ra->blks,
			    ra->sizes, i, &bp);
			free(ra, M_TEMP, 0);
		} else
			error = bread(vp, lbn, size, &bp);
		ci->ci_lastr = lbn;
		n = min(n, size - bp->b_resid);
		if (error) {
			brelse(bp);
			return (error);
		}

		error = uiomovei(bp->b_data + on, (int)n, uio);

		brelse(bp);
	} while (error == 0 && uio->uio_resid > 0 && n != 0);
	return (error);
}
Ejemplo n.º 13
0
static int journal_insertfile( ino_t ino ) {
	struct ufs1_dinode *dp1;
	struct ufs2_dinode *dp2;
	void *ip;
	ufs2_daddr_t nblk;
	ufs2_daddr_t blk;
	ufs_lbn_t lbn;
	int size;
	int mode;
	int off;
	if ( getino( &disk, &ip, ROOTINO, &mode ) != 0 ) {
		warn( "Failed to get root inode" );
		sbdirty();
		return ( -1 );
	}
	dp2 = ip;
	dp1 = ip;
	blk = 0;
	size = 0;
	nblk = journal_balloc();
	if ( nblk <= 0 ) return ( -1 );
	if ( sblock.fs_magic == FS_UFS1_MAGIC ) {
		lbn = lblkno( &sblock, dp1->di_size );
		off = blkoff( &sblock, dp1->di_size );
		blk = dp1->di_db[lbn];
		size = sblksize( &sblock, (off_t) dp1->di_size, lbn );
	} else {
		lbn = lblkno( &sblock, dp2->di_size );
		off = blkoff( &sblock, dp2->di_size );
		blk = dp2->di_db[lbn];
		size = sblksize( &sblock, (off_t) dp2->di_size, lbn );
	}
	if ( off != 0 ) {
		if ( dir_extend( blk, nblk, off, ino ) == -1 ) return ( -1 );
	} else {
		blk = 0;
		if ( dir_insert( nblk, 0, ino ) == -1 ) return ( -1 );
	}
	if ( sblock.fs_magic == FS_UFS1_MAGIC ) {
		dp1->di_blocks += ( sblock.fs_bsize - size ) / DEV_BSIZE;
		dp1->di_db[lbn] = nblk;
		dp1->di_size = lblktosize( &sblock, lbn + 1 );
	} else {
		dp2->di_blocks += ( sblock.fs_bsize - size ) / DEV_BSIZE;
		dp2->di_db[lbn] = nblk;
		dp2->di_size = lblktosize( &sblock, lbn + 1 );
	}
	if ( putino( &disk ) < 0 ) {
		warn( "Failed to write root inode" );
		return ( -1 );
	}
	if ( cgwrite( &disk ) < 0 ) {
		warn( "Failed to write updated cg" );
		sbdirty();
		return ( -1 );
	}
	if ( blk ) {
		if ( cgbfree( &disk, blk, size ) < 0 ) {
			warn( "Failed to write cg" );
			return ( -1 );
		}
	}
	return ( 0 );
}
Ejemplo n.º 14
0
/*
 * this function handles traditional block mapping
 */
static int
ext2_ind_read(struct vop_read_args *ap)
{
	struct vnode *vp;
	struct inode *ip;
	struct uio *uio;
	FS *fs;
	struct buf *bp;
	daddr_t lbn, nextlbn;
	off_t bytesinfile;
	long size, xfersize, blkoffset;
	int error, orig_resid, seqcount;
	seqcount = ap->a_ioflag >> IO_SEQSHIFT;
	u_short mode;

	vp = ap->a_vp;
	ip = VTOI(vp);
	mode = ip->i_mode;
	uio = ap->a_uio;

#ifdef DIAGNOSTIC
	if (uio->uio_rw != UIO_READ)
		panic("%s: mode", READ_S);

	if (vp->v_type == VLNK) {
		if ((int)ip->i_size < vp->v_mount->mnt_maxsymlinklen)
			panic("%s: short symlink", READ_S);
	} else if (vp->v_type != VREG && vp->v_type != VDIR)
		panic("%s: type %d", READ_S, vp->v_type);
#endif
	orig_resid = uio->uio_resid;
	KASSERT(orig_resid >= 0, ("ext2_read: uio->uio_resid < 0"));
	if (orig_resid == 0)
		return (0);
	KASSERT(uio->uio_offset >= 0, ("ext2_read: uio->uio_offset < 0"));
	fs = ip->I_FS;
	if (uio->uio_offset < ip->i_size && uio->uio_offset >= fs->e2fs_maxfilesize)
		return (EOVERFLOW);
	for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
		if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0)
			break;
		lbn = lblkno(fs, uio->uio_offset);
		nextlbn = lbn + 1;
		size = BLKSIZE(fs, ip, lbn);
		blkoffset = blkoff(fs, uio->uio_offset);

		xfersize = fs->e2fs_fsize - blkoffset;
		if (uio->uio_resid < xfersize)
			xfersize = uio->uio_resid;
		if (bytesinfile < xfersize)
			xfersize = bytesinfile;

		if (lblktosize(fs, nextlbn) >= ip->i_size)
			error = bread(vp, lbn, size, NOCRED, &bp);
		else if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0)
		error = cluster_read(vp, ip->i_size, lbn, size,
  			NOCRED, blkoffset + uio->uio_resid, seqcount, &bp);
		else if (seqcount > 1) {
			int nextsize = BLKSIZE(fs, ip, nextlbn);
			error = breadn(vp, lbn,
			    size, &nextlbn, &nextsize, 1, NOCRED, &bp);
		} else
			error = bread(vp, lbn, size, NOCRED, &bp);
		if (error) {
			brelse(bp);
			bp = NULL;
			break;
		}

		/*
		 * We should only get non-zero b_resid when an I/O error
		 * has occurred, which should cause us to break above.
		 * However, if the short read did not cause an error,
		 * then we want to ensure that we do not uiomove bad
		 * or uninitialized data.
		 */
		size -= bp->b_resid;
		if (size < xfersize) {
			if (size == 0)
				break;
			xfersize = size;
		}
		error = uiomove((char *)bp->b_data + blkoffset,
  			(int)xfersize, uio);
		if (error)
			break;

		bqrelse(bp);
	}
	if (bp != NULL)
		bqrelse(bp);
	if ((error == 0 || uio->uio_resid != orig_resid) &&
	    (vp->v_mount->mnt_flag & MNT_NOATIME) == 0)
		ip->i_flag |= IN_ACCESS;
	return (error);
}
Ejemplo n.º 15
0
/*
 * Vnode op for reading.
 */
int
ffs_read(void *v)
{
	struct vop_read_args *ap = v;
	struct vnode *vp;
	struct inode *ip;
	struct uio *uio;
	struct fs *fs;
	struct buf *bp;
	daddr_t lbn, nextlbn;
	off_t bytesinfile;
	long size, xfersize, blkoffset;
	mode_t mode;
	int error;

	vp = ap->a_vp;
	ip = VTOI(vp);
	mode = DIP(ip, mode);
	uio = ap->a_uio;

#ifdef DIAGNOSTIC
	if (uio->uio_rw != UIO_READ)
		panic("ffs_read: mode");

	if (vp->v_type == VLNK) {
		if ((int)DIP(ip, size) < vp->v_mount->mnt_maxsymlinklen ||
		    (vp->v_mount->mnt_maxsymlinklen == 0 &&
		     DIP(ip, blocks) == 0))
			panic("ffs_read: short symlink");
	} else if (vp->v_type != VREG && vp->v_type != VDIR)
		panic("ffs_read: type %d", vp->v_type);
#endif
	fs = ip->i_fs;
	if ((u_int64_t)uio->uio_offset > fs->fs_maxfilesize)
		return (EFBIG);

	if (uio->uio_resid == 0)
		return (0);

	for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
		if ((bytesinfile = DIP(ip, size) - uio->uio_offset) <= 0)
			break;
		lbn = lblkno(fs, uio->uio_offset);
		nextlbn = lbn + 1;
		size = fs->fs_bsize;	/* WAS blksize(fs, ip, lbn); */
		blkoffset = blkoff(fs, uio->uio_offset);
		xfersize = fs->fs_bsize - blkoffset;
		if (uio->uio_resid < xfersize)
			xfersize = uio->uio_resid;
		if (bytesinfile < xfersize)
			xfersize = bytesinfile;

		if (lblktosize(fs, nextlbn) >= DIP(ip, size))
			error = bread(vp, lbn, size, &bp);
		else if (lbn - 1 == ip->i_ci.ci_lastr) {
			error = bread_cluster(vp, lbn, size, &bp);
		} else
			error = bread(vp, lbn, size, &bp);

		if (error)
			break;
		ip->i_ci.ci_lastr = lbn;

		/*
		 * We should only get non-zero b_resid when an I/O error
		 * has occurred, which should cause us to break above.
		 * However, if the short read did not cause an error,
		 * then we want to ensure that we do not uiomove bad
		 * or uninitialized data.
		 */
		size -= bp->b_resid;
		if (size < xfersize) {
			if (size == 0)
				break;
			xfersize = size;
		}
		error = uiomovei(bp->b_data + blkoffset, (int)xfersize, uio);
		if (error)
			break;
		brelse(bp);
	}
	if (bp != NULL)
		brelse(bp);
	if (!(vp->v_mount->mnt_flag & MNT_NOATIME) ||
	    (ip->i_flag & (IN_CHANGE | IN_UPDATE))) {
		ip->i_flag |= IN_ACCESS;
	}
	return (error);
}
Ejemplo n.º 16
0
/*
 * Truncate the inode ip to at most length size, freeing the
 * disk blocks.
 */
int
ffs_truncate(vnode *vp, off_t length, int flags, Ucred *cred)
{
	print("HARVEY TODO: %s\n", __func__);
#if 0
	struct inode *ip;
	ufs2_daddr_t bn, lbn, lastblock, lastiblock[UFS_NIADDR];
	ufs2_daddr_t indir_lbn[UFS_NIADDR], oldblks[UFS_NDADDR + UFS_NIADDR];
	ufs2_daddr_t newblks[UFS_NDADDR + UFS_NIADDR];
	ufs2_daddr_t count, blocksreleased = 0, datablocks, blkno;
	struct bufobj *bo;
	struct fs *fs;
	struct buf *bp;
	struct ufsmount *ump;
	int softdeptrunc, journaltrunc;
	int needextclean, extblocks;
	int offset, size, level, nblocks;
	int i, error, allerror, indiroff, waitforupdate;
	off_t osize;

	ip = VTOI(vp);
	ump = VFSTOUFS(vp->v_mount);
	fs = ump->um_fs;
	bo = &vp->v_bufobj;

	ASSERT_VOP_LOCKED(vp, "ffs_truncate");

	if (length < 0)
		return (EINVAL);
	if (length > fs->fs_maxfilesize)
		return (EFBIG);
#ifdef QUOTA
	error = getinoquota(ip);
	if (error)
		return (error);
#endif
	/*
	 * Historically clients did not have to specify which data
	 * they were truncating. So, if not specified, we assume
	 * traditional behavior, e.g., just the normal data.
	 */
	if ((flags & (IO_EXT | IO_NORMAL)) == 0)
		flags |= IO_NORMAL;
	if (!DOINGSOFTDEP(vp) && !DOINGASYNC(vp))
		flags |= IO_SYNC;
	waitforupdate = (flags & IO_SYNC) != 0 || !DOINGASYNC(vp);
	/*
	 * If we are truncating the extended-attributes, and cannot
	 * do it with soft updates, then do it slowly here. If we are
	 * truncating both the extended attributes and the file contents
	 * (e.g., the file is being unlinked), then pick it off with
	 * soft updates below.
	 */
	allerror = 0;
	needextclean = 0;
	softdeptrunc = 0;
	journaltrunc = DOINGSUJ(vp);
	if (journaltrunc == 0 && DOINGSOFTDEP(vp) && length == 0)
		softdeptrunc = !softdep_slowdown(vp);
	extblocks = 0;
	datablocks = DIP(ip, i_blocks);
	if (fs->fs_magic == FS_UFS2_MAGIC && ip->i_din2->di_extsize > 0) {
		extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize));
		datablocks -= extblocks;
	}
	if ((flags & IO_EXT) && extblocks > 0) {
		if (length != 0)
			panic("ffs_truncate: partial trunc of extdata");
		if (softdeptrunc || journaltrunc) {
			if ((flags & IO_NORMAL) == 0)
				goto extclean;
			needextclean = 1;
		} else {
			if ((error = ffs_syncvnode(vp, MNT_WAIT, 0)) != 0)
				return (error);
#ifdef QUOTA
			(void) chkdq(ip, -extblocks, NOCRED, 0);
#endif
			vinvalbuf(vp, V_ALT, 0, 0);
			vn_pages_remove(vp,
			    OFF_TO_IDX(lblktosize(fs, -extblocks)), 0);
			osize = ip->i_din2->di_extsize;
			ip->i_din2->di_blocks -= extblocks;
			ip->i_din2->di_extsize = 0;
			for (i = 0; i < UFS_NXADDR; i++) {
				oldblks[i] = ip->i_din2->di_extb[i];
				ip->i_din2->di_extb[i] = 0;
			}
			ip->i_flag |= IN_CHANGE;
			if ((error = ffs_update(vp, waitforupdate)))
				return (error);
			for (i = 0; i < UFS_NXADDR; i++) {
				if (oldblks[i] == 0)
					continue;
				ffs_blkfree(ump, fs, ITODEVVP(ip), oldblks[i],
				    sblksize(fs, osize, i), ip->i_number,
				    vp->v_type, nil);
			}
		}
	}
	if ((flags & IO_NORMAL) == 0)
		return (0);
	if (vp->v_type == VLNK &&
	    (ip->i_size < vp->v_mount->mnt_maxsymlinklen ||
	     datablocks == 0)) {
#ifdef INVARIANTS
		if (length != 0)
			panic("ffs_truncate: partial truncate of symlink");
#endif
		bzero(SHORTLINK(ip), (uint)ip->i_size);
		ip->i_size = 0;
		DIP_SET(ip, i_size, 0);
		ip->i_flag |= IN_CHANGE | IN_UPDATE;
		if (needextclean)
			goto extclean;
		return (ffs_update(vp, waitforupdate));
	}
	if (ip->i_size == length) {
		ip->i_flag |= IN_CHANGE | IN_UPDATE;
		if (needextclean)
			goto extclean;
		return (ffs_update(vp, 0));
	}
	if (fs->fs_ronly)
		panic("ffs_truncate: read-only filesystem");
	if (IS_SNAPSHOT(ip))
		ffs_snapremove(vp);
	vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0;
	osize = ip->i_size;
	/*
	 * Lengthen the size of the file. We must ensure that the
	 * last byte of the file is allocated. Since the smallest
	 * value of osize is 0, length will be at least 1.
	 */
	if (osize < length) {
		vnode_pager_setsize(vp, length);
		flags |= BA_CLRBUF;
		error = UFS_BALLOC(vp, length - 1, 1, cred, flags, &bp);
		if (error) {
			vnode_pager_setsize(vp, osize);
			return (error);
		}
		ip->i_size = length;
		DIP_SET(ip, i_size, length);
		if (bp->b_bufsize == fs->fs_bsize)
			bp->b_flags |= B_CLUSTEROK;
		if (flags & IO_SYNC)
			bwrite(bp);
		else if (DOINGASYNC(vp))
			bdwrite(bp);
		else
			bawrite(bp);
		ip->i_flag |= IN_CHANGE | IN_UPDATE;
		return (ffs_update(vp, waitforupdate));
	}
	/*
	 * Lookup block number for a given offset. Zero length files
	 * have no blocks, so return a blkno of -1.
	 */
	lbn = lblkno(fs, length - 1);
	if (length == 0) {
		blkno = -1;
	} else if (lbn < UFS_NDADDR) {
		blkno = DIP(ip, i_db[lbn]);
	} else {
		error = UFS_BALLOC(vp, lblktosize(fs, (off_t)lbn), fs->fs_bsize,
		    cred, BA_METAONLY, &bp);
		if (error)
			return (error);
		indiroff = (lbn - UFS_NDADDR) % NINDIR(fs);
		if (I_IS_UFS1(ip))
			blkno = ((ufs1_daddr_t *)(bp->b_data))[indiroff];
		else
			blkno = ((ufs2_daddr_t *)(bp->b_data))[indiroff];
		/*
		 * If the block number is non-zero, then the indirect block
		 * must have been previously allocated and need not be written.
		 * If the block number is zero, then we may have allocated
		 * the indirect block and hence need to write it out.
		 */
		if (blkno != 0)
			brelse(bp);
		else if (flags & IO_SYNC)
			bwrite(bp);
		else
			bdwrite(bp);
	}
	/*
	 * If the block number at the new end of the file is zero,
	 * then we must allocate it to ensure that the last block of 
	 * the file is allocated. Soft updates does not handle this
	 * case, so here we have to clean up the soft updates data
	 * structures describing the allocation past the truncation
	 * point. Finding and deallocating those structures is a lot of
	 * work. Since partial truncation with a hole at the end occurs
	 * rarely, we solve the problem by syncing the file so that it
	 * will have no soft updates data structures left.
	 */
	if (blkno == 0 && (error = ffs_syncvnode(vp, MNT_WAIT, 0)) != 0)
		return (error);
	if (blkno != 0 && DOINGSOFTDEP(vp)) {
		if (softdeptrunc == 0 && journaltrunc == 0) {
			/*
			 * If soft updates cannot handle this truncation,
			 * clean up soft dependency data structures and
			 * fall through to the synchronous truncation.
			 */
			if ((error = ffs_syncvnode(vp, MNT_WAIT, 0)) != 0)
				return (error);
		} else {
			flags = IO_NORMAL | (needextclean ? IO_EXT: 0);
			if (journaltrunc)
				softdep_journal_freeblocks(ip, cred, length,
				    flags);
			else
				softdep_setup_freeblocks(ip, length, flags);
			ASSERT_VOP_LOCKED(vp, "ffs_truncate1");
			if (journaltrunc == 0) {
				ip->i_flag |= IN_CHANGE | IN_UPDATE;
				error = ffs_update(vp, 0);
			}
			return (error);
		}
	}
	/*
	 * Shorten the size of the file. If the last block of the
	 * shortened file is unallocated, we must allocate it.
	 * Additionally, if the file is not being truncated to a
	 * block boundary, the contents of the partial block
	 * following the end of the file must be zero'ed in
	 * case it ever becomes accessible again because of
	 * subsequent file growth. Directories however are not
	 * zero'ed as they should grow back initialized to empty.
	 */
	offset = blkoff(fs, length);
	if (blkno != 0 && offset == 0) {
		ip->i_size = length;
		DIP_SET(ip, i_size, length);
	} else {
		lbn = lblkno(fs, length);
		flags |= BA_CLRBUF;
		error = UFS_BALLOC(vp, length - 1, 1, cred, flags, &bp);
		if (error)
			return (error);
		/*
		 * When we are doing soft updates and the UFS_BALLOC
		 * above fills in a direct block hole with a full sized
		 * block that will be truncated down to a fragment below,
		 * we must flush out the block dependency with an FSYNC
		 * so that we do not get a soft updates inconsistency
		 * when we create the fragment below.
		 */
		if (DOINGSOFTDEP(vp) && lbn < UFS_NDADDR &&
		    fragroundup(fs, blkoff(fs, length)) < fs->fs_bsize &&
		    (error = ffs_syncvnode(vp, MNT_WAIT, 0)) != 0)
			return (error);
		ip->i_size = length;
		DIP_SET(ip, i_size, length);
		size = blksize(fs, ip, lbn);
		if (vp->v_type != VDIR && offset != 0)
			bzero((char *)bp->b_data + offset,
			    (uint)(size - offset));
		/* Kirk's code has reallocbuf(bp, size, 1) here */
		allocbuf(bp, size);
		if (bp->b_bufsize == fs->fs_bsize)
			bp->b_flags |= B_CLUSTEROK;
		if (flags & IO_SYNC)
			bwrite(bp);
		else if (DOINGASYNC(vp))
			bdwrite(bp);
		else
			bawrite(bp);
	}
	/*
	 * Calculate index into inode's block list of
	 * last direct and indirect blocks (if any)
	 * which we want to keep.  Lastblock is -1 when
	 * the file is truncated to 0.
	 */
	lastblock = lblkno(fs, length + fs->fs_bsize - 1) - 1;
	lastiblock[SINGLE] = lastblock - UFS_NDADDR;
	lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs);
	lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs);
	nblocks = btodb(fs->fs_bsize);
	/*
	 * Update file and block pointers on disk before we start freeing
	 * blocks.  If we crash before free'ing blocks below, the blocks
	 * will be returned to the free list.  lastiblock values are also
	 * normalized to -1 for calls to ffs_indirtrunc below.
	 */
	for (level = TRIPLE; level >= SINGLE; level--) {
		oldblks[UFS_NDADDR + level] = DIP(ip, i_ib[level]);
		if (lastiblock[level] < 0) {
			DIP_SET(ip, i_ib[level], 0);
			lastiblock[level] = -1;
		}
	}
	for (i = 0; i < UFS_NDADDR; i++) {
		oldblks[i] = DIP(ip, i_db[i]);
		if (i > lastblock)
			DIP_SET(ip, i_db[i], 0);
	}
	ip->i_flag |= IN_CHANGE | IN_UPDATE;
	allerror = ffs_update(vp, waitforupdate);
	
	/*
	 * Having written the new inode to disk, save its new configuration
	 * and put back the old block pointers long enough to process them.
	 * Note that we save the new block configuration so we can check it
	 * when we are done.
	 */
	for (i = 0; i < UFS_NDADDR; i++) {
		newblks[i] = DIP(ip, i_db[i]);
		DIP_SET(ip, i_db[i], oldblks[i]);
	}
	for (i = 0; i < UFS_NIADDR; i++) {
		newblks[UFS_NDADDR + i] = DIP(ip, i_ib[i]);
		DIP_SET(ip, i_ib[i], oldblks[UFS_NDADDR + i]);
	}
	ip->i_size = osize;
	DIP_SET(ip, i_size, osize);

	error = vtruncbuf(vp, cred, length, fs->fs_bsize);
	if (error && (allerror == 0))
		allerror = error;

	/*
	 * Indirect blocks first.
	 */
	indir_lbn[SINGLE] = -UFS_NDADDR;
	indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1;
	indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1;
	for (level = TRIPLE; level >= SINGLE; level--) {
		bn = DIP(ip, i_ib[level]);
		if (bn != 0) {
			error = ffs_indirtrunc(ip, indir_lbn[level],
			    fsbtodb(fs, bn), lastiblock[level], level, &count);
			if (error)
				allerror = error;
			blocksreleased += count;
			if (lastiblock[level] < 0) {
				DIP_SET(ip, i_ib[level], 0);
				ffs_blkfree(ump, fs, ump->um_devvp, bn,
				    fs->fs_bsize, ip->i_number,
				    vp->v_type, nil);
				blocksreleased += nblocks;
			}
		}
		if (lastiblock[level] >= 0)
			goto done;
	}

	/*
	 * All whole direct blocks or frags.
	 */
	for (i = UFS_NDADDR - 1; i > lastblock; i--) {
		long bsize;

		bn = DIP(ip, i_db[i]);
		if (bn == 0)
			continue;
		DIP_SET(ip, i_db[i], 0);
		bsize = blksize(fs, ip, i);
		ffs_blkfree(ump, fs, ump->um_devvp, bn, bsize, ip->i_number,
		    vp->v_type, nil);
		blocksreleased += btodb(bsize);
	}
	if (lastblock < 0)
		goto done;

	/*
	 * Finally, look for a change in size of the
	 * last direct block; release any frags.
	 */
	bn = DIP(ip, i_db[lastblock]);
	if (bn != 0) {
		long oldspace, newspace;

		/*
		 * Calculate amount of space we're giving
		 * back as old block size minus new block size.
		 */
		oldspace = blksize(fs, ip, lastblock);
		ip->i_size = length;
		DIP_SET(ip, i_size, length);
		newspace = blksize(fs, ip, lastblock);
		if (newspace == 0)
			panic("ffs_truncate: newspace");
		if (oldspace - newspace > 0) {
			/*
			 * Block number of space to be free'd is
			 * the old block # plus the number of frags
			 * required for the storage we're keeping.
			 */
			bn += numfrags(fs, newspace);
			ffs_blkfree(ump, fs, ump->um_devvp, bn,
			   oldspace - newspace, ip->i_number, vp->v_type, nil);
			blocksreleased += btodb(oldspace - newspace);
		}
	}
done:
#ifdef INVARIANTS
	for (level = SINGLE; level <= TRIPLE; level++)
		if (newblks[UFS_NDADDR + level] != DIP(ip, i_ib[level]))
			panic("ffs_truncate1");
	for (i = 0; i < UFS_NDADDR; i++)
		if (newblks[i] != DIP(ip, i_db[i]))
			panic("ffs_truncate2");
	BO_LOCK(bo);
	if (length == 0 &&
	    (fs->fs_magic != FS_UFS2_MAGIC || ip->i_din2->di_extsize == 0) &&
	    (bo->bo_dirty.bv_cnt > 0 || bo->bo_clean.bv_cnt > 0))
		panic("ffs_truncate3");
	BO_UNLOCK(bo);
#endif /* INVARIANTS */
	/*
	 * Put back the real size.
	 */
	ip->i_size = length;
	DIP_SET(ip, i_size, length);
	if (DIP(ip, i_blocks) >= blocksreleased)
		DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - blocksreleased);
	else	/* sanity */
		DIP_SET(ip, i_blocks, 0);
	ip->i_flag |= IN_CHANGE;
#ifdef QUOTA
	(void) chkdq(ip, -blocksreleased, NOCRED, 0);
#endif
	return (allerror);

extclean:
	if (journaltrunc)
		softdep_journal_freeblocks(ip, cred, length, IO_EXT);
	else
		softdep_setup_freeblocks(ip, length, IO_EXT);
	return (ffs_update(vp, waitforupdate));

#endif // 0
	return 0;
}
Ejemplo n.º 17
0
/*
 * Vnode op for reading.
 */
static int
ext2_read(struct vop_read_args *ap)
{
	struct vnode *vp;
	struct inode *ip;
	struct uio *uio;
	struct m_ext2fs *fs;
	struct buf *bp;
	daddr_t lbn, nextlbn;
	off_t bytesinfile;
	long size, xfersize, blkoffset;
	int error, orig_resid, seqcount;
	int ioflag;

	vp = ap->a_vp;
	uio = ap->a_uio;
	ioflag = ap->a_ioflag;

	seqcount = ap->a_ioflag >> IO_SEQSHIFT;
	ip = VTOI(vp);

#ifdef INVARIANTS
	if (uio->uio_rw != UIO_READ)
		panic("%s: mode", "ext2_read");

	if (vp->v_type == VLNK) {
		if ((int)ip->i_size < vp->v_mount->mnt_maxsymlinklen)
			panic("%s: short symlink", "ext2_read");
	} else if (vp->v_type != VREG && vp->v_type != VDIR)
		panic("%s: type %d", "ext2_read", vp->v_type);
#endif
	orig_resid = uio->uio_resid;
	KASSERT(orig_resid >= 0, ("ext2_read: uio->uio_resid < 0"));
	if (orig_resid == 0)
		return (0);
	KASSERT(uio->uio_offset >= 0, ("ext2_read: uio->uio_offset < 0"));
	fs = ip->i_e2fs;
	if (uio->uio_offset < ip->i_size &&
	    uio->uio_offset >= fs->e2fs_maxfilesize)
	    	return (EOVERFLOW);

	for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
		if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0)
			break;
		lbn = lblkno(fs, uio->uio_offset);
		nextlbn = lbn + 1;
		size = blksize(fs, ip, lbn);
		blkoffset = blkoff(fs, uio->uio_offset);

		xfersize = fs->e2fs_fsize - blkoffset;
		if (uio->uio_resid < xfersize)
			xfersize = uio->uio_resid;
		if (bytesinfile < xfersize)
			xfersize = bytesinfile;

		if (lblktosize(fs, nextlbn) >= ip->i_size)
			error = bread(vp, lbn, size, NOCRED, &bp);
		else if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) {
			error = cluster_read(vp, ip->i_size, lbn, size,
			    NOCRED, blkoffset + uio->uio_resid, seqcount,
			    0, &bp);
		} else if (seqcount > 1) {
			int nextsize = blksize(fs, ip, nextlbn);
			error = breadn(vp, lbn,
			    size, &nextlbn, &nextsize, 1, NOCRED, &bp);
		} else
			error = bread(vp, lbn, size, NOCRED, &bp);
		if (error) {
			brelse(bp);
			bp = NULL;
			break;
		}

		/*
		 * If IO_DIRECT then set B_DIRECT for the buffer.  This
		 * will cause us to attempt to release the buffer later on
		 * and will cause the buffer cache to attempt to free the
		 * underlying pages.
		 */
		if (ioflag & IO_DIRECT)
			bp->b_flags |= B_DIRECT;

		/*
		 * We should only get non-zero b_resid when an I/O error
		 * has occurred, which should cause us to break above.
		 * However, if the short read did not cause an error,
		 * then we want to ensure that we do not uiomove bad
		 * or uninitialized data.
		 */
		size -= bp->b_resid;
		if (size < xfersize) {
			if (size == 0)
				break;
			xfersize = size;
		}
		error = uiomove((char *)bp->b_data + blkoffset,
  			(int)xfersize, uio);
		if (error)
			break;

		if (ioflag & (IO_VMIO|IO_DIRECT)) {
			/*
			 * If it's VMIO or direct I/O, then we don't
			 * need the buf, mark it available for
			 * freeing. If it's non-direct VMIO, the VM has
			 * the data.
			 */
			bp->b_flags |= B_RELBUF;
			brelse(bp);
		} else {
			/*
			 * Otherwise let whoever
			 * made the request take care of
			 * freeing it. We just queue
			 * it onto another list.
			 */
			bqrelse(bp);
		}
	}

	/* 
	 * This can only happen in the case of an error
	 * because the loop above resets bp to NULL on each iteration
	 * and on normal completion has not set a new value into it.
	 * so it must have come from a 'break' statement
	 */
	if (bp != NULL) {
		if (ioflag & (IO_VMIO|IO_DIRECT)) {
			bp->b_flags |= B_RELBUF;
			brelse(bp);
		} else {
			bqrelse(bp);
		}
	}

	if ((error == 0 || uio->uio_resid != orig_resid) &&
	    (vp->v_mount->mnt_flag & MNT_NOATIME) == 0)
		ip->i_flag |= IN_ACCESS;
	return (error);
}
Ejemplo n.º 18
0
int
ffs2_balloc(struct inode *ip, off_t off, int size, struct ucred *cred,
    int flags, struct buf **bpp)
{
	daddr_t lbn, lastlbn, nb, newb, *blkp;
	daddr_t pref, *allocblk, allociblk[NIADDR + 1];
	daddr_t *bap, *allocib;
	int deallocated, osize, nsize, num, i, error, unwindidx, r;
	struct buf *bp, *nbp;
	struct indir indirs[NIADDR + 2];
	struct fs *fs;
	struct vnode *vp;
	struct proc *p;
	
	vp = ITOV(ip);
	fs = ip->i_fs;
	p = curproc;
	unwindidx = -1;

	lbn = lblkno(fs, off);
	size = blkoff(fs, off) + size;

	if (size > fs->fs_bsize)
		panic("ffs2_balloc: block too big");

	if (bpp != NULL)
		*bpp = NULL;

	if (lbn < 0)
		return (EFBIG);

	/*
	 * If the next write will extend the file into a new block, and the
	 * file is currently composed of a fragment, this fragment has to be
	 * extended to be a full block.
	 */
	lastlbn = lblkno(fs, ip->i_ffs2_size);
	if (lastlbn < NDADDR && lastlbn < lbn) {
		nb = lastlbn;
		osize = blksize(fs, ip, nb);
		if (osize < fs->fs_bsize && osize > 0) {
			error = ffs_realloccg(ip, nb, ffs2_blkpref(ip,
			    lastlbn, nb, &ip->i_ffs2_db[0]), osize,
			    (int) fs->fs_bsize, cred, bpp, &newb);
			if (error)
				return (error);

			if (DOINGSOFTDEP(vp))
				softdep_setup_allocdirect(ip, nb, newb,
				    ip->i_ffs2_db[nb], fs->fs_bsize, osize,
				    bpp ? *bpp : NULL);

			ip->i_ffs2_size = lblktosize(fs, nb + 1);
			uvm_vnp_setsize(vp, ip->i_ffs2_size);
			ip->i_ffs2_db[nb] = newb;
			ip->i_flag |= IN_CHANGE | IN_UPDATE;

			if (bpp) {
				if (flags & B_SYNC)
					bwrite(*bpp);
				else
					bawrite(*bpp);
			}
		}
	}

	/*
	 * The first NDADDR blocks are direct.
	 */
	if (lbn < NDADDR) {

		nb = ip->i_ffs2_db[lbn];

		if (nb != 0 && ip->i_ffs2_size >= lblktosize(fs, lbn + 1)) {
			/*
			 * The direct block is already allocated and the file
			 * extends past this block, thus this must be a whole
			 * block. Just read it, if requested.
			 */
			if (bpp != NULL) {
				error = bread(vp, lbn, fs->fs_bsize, bpp);
				if (error) {
					brelse(*bpp);
					return (error);
				}
			}

			return (0);
		}

		if (nb != 0) {
			/*
			 * Consider the need to allocate a fragment.
			 */
			osize = fragroundup(fs, blkoff(fs, ip->i_ffs2_size));
			nsize = fragroundup(fs, size);

			if (nsize <= osize) {
				/*
				 * The existing block is already at least as
				 * big as we want. Just read it, if requested.
				 */
				if (bpp != NULL) {
					error = bread(vp, lbn, fs->fs_bsize,
					    bpp);
					if (error) {
						brelse(*bpp);
						return (error);
					}
					(*bpp)->b_bcount = osize;
				}

				return (0);
			} else {
				/*
				 * The existing block is smaller than we want,
				 * grow it.
				 */
				error = ffs_realloccg(ip, lbn,
				    ffs2_blkpref(ip, lbn, (int) lbn,
				    &ip->i_ffs2_db[0]), osize, nsize, cred,
				    bpp, &newb);
				if (error)
					return (error);

				if (DOINGSOFTDEP(vp))
					softdep_setup_allocdirect(ip, lbn,
					    newb, nb, nsize, osize,
					    bpp ? *bpp : NULL);
			}
		} else {
			/*
			 * The block was not previously allocated, allocate a
			 * new block or fragment.
			 */
			if (ip->i_ffs2_size < lblktosize(fs, lbn + 1))
				nsize = fragroundup(fs, size);
			else
				nsize = fs->fs_bsize;

			error = ffs_alloc(ip, lbn, ffs2_blkpref(ip, lbn,
			    (int) lbn, &ip->i_ffs2_db[0]), nsize, cred, &newb);
			if (error)
				return (error);

			if (bpp != NULL) {
				bp = getblk(vp, lbn, fs->fs_bsize, 0, 0);
				if (nsize < fs->fs_bsize)
					bp->b_bcount = nsize;
				bp->b_blkno = fsbtodb(fs, newb);
				if (flags & B_CLRBUF)
					clrbuf(bp);
				*bpp = bp;
			}

			if (DOINGSOFTDEP(vp))
				softdep_setup_allocdirect(ip, lbn, newb, 0,
				    nsize, 0, bpp ? *bpp : NULL);
		}

		ip->i_ffs2_db[lbn] = newb;
		ip->i_flag |= IN_CHANGE | IN_UPDATE;

		return (0);
	}

	/*
	 * Determine the number of levels of indirection.
	 */
	pref = 0;
	error = ufs_getlbns(vp, lbn, indirs, &num);
	if (error)
		return (error);

#ifdef DIAGNOSTIC
	if (num < 1)
		panic("ffs2_balloc: ufs_bmaparray returned indirect block");
#endif

	/*
	 * Fetch the first indirect block allocating it necessary.
	 */
	--num;
	nb = ip->i_ffs2_ib[indirs[0].in_off];
	allocib = NULL;
	allocblk = allociblk;

	if (nb == 0) {
		pref = ffs2_blkpref(ip, lbn, -indirs[0].in_off - 1, NULL);
		error = ffs_alloc(ip, lbn, pref, (int) fs->fs_bsize, cred,
		    &newb);
		if (error)
			goto fail;

		nb = newb;
		*allocblk++ = nb;
		bp = getblk(vp, indirs[1].in_lbn, fs->fs_bsize, 0, 0);
		bp->b_blkno = fsbtodb(fs, nb);
		clrbuf(bp);

		if (DOINGSOFTDEP(vp)) {
			softdep_setup_allocdirect(ip, NDADDR + indirs[0].in_off,
			    newb, 0, fs->fs_bsize, 0, bp);
			bdwrite(bp);
		} else {
			/*
			 * Write synchronously so that indirect blocks never
			 * point at garbage.
			 */
			error = bwrite(bp);
			if (error)
				goto fail;
		}

		unwindidx = 0;
		allocib = &ip->i_ffs2_ib[indirs[0].in_off];
		*allocib = nb;
		ip->i_flag |= IN_CHANGE | IN_UPDATE;
	}

	/*
	 * Fetch through the indirect blocks, allocating as necessary.
	 */
	for (i = 1;;) {
		error = bread(vp, indirs[i].in_lbn, (int)fs->fs_bsize, &bp);
		if (error) {
			brelse(bp);
			goto fail;
		}

		bap = (int64_t *) bp->b_data;
		nb = bap[indirs[i].in_off];

		if (i == num)
			break;

		i++;

		if (nb != 0) {
			brelse(bp);
			continue;
		}

		if (pref == 0)
			pref = ffs2_blkpref(ip, lbn, i - num - 1, NULL);

		error = ffs_alloc(ip, lbn, pref, (int) fs->fs_bsize, cred,
		    &newb);
		if (error) {
			brelse(bp);
			goto fail;
		}

		nb = newb;
		*allocblk++ = nb;
		nbp = getblk(vp, indirs[i].in_lbn, fs->fs_bsize, 0, 0);
		nbp->b_blkno = fsbtodb(fs, nb);
		clrbuf(nbp);

		if (DOINGSOFTDEP(vp)) {
			softdep_setup_allocindir_meta(nbp, ip, bp,
			    indirs[i - 1].in_off, nb);
			bdwrite(nbp);
		} else {
			/*
			 * Write synchronously so that indirect blocks never
			 * point at garbage.
			 */
			error = bwrite(nbp);
			if (error) {
				brelse(bp);
				goto fail;
			}
		}

		if (unwindidx < 0)
			unwindidx = i - 1;

		bap[indirs[i - 1].in_off] = nb;

		/*
		 * If required, write synchronously, otherwise use delayed
		 * write.
		 */
		if (flags & B_SYNC)
			bwrite(bp);
		else
			bdwrite(bp);
	}

	/*
	 * Get the data block, allocating if necessary.
	 */
	if (nb == 0) {
		pref = ffs2_blkpref(ip, lbn, indirs[num].in_off, &bap[0]);

		error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, cred,
		    &newb);
		if (error) {
			brelse(bp);
			goto fail;
		}

		nb = newb;
		*allocblk++ = nb;

		if (bpp != NULL) {
			nbp = getblk(vp, lbn, fs->fs_bsize, 0, 0);
			nbp->b_blkno = fsbtodb(fs, nb);
			if (flags & B_CLRBUF)
				clrbuf(nbp);
			*bpp = nbp;
		}

		if (DOINGSOFTDEP(vp))
			softdep_setup_allocindir_page(ip, lbn, bp,
			    indirs[num].in_off, nb, 0, bpp ? *bpp : NULL);

		bap[indirs[num].in_off] = nb;

		if (allocib == NULL && unwindidx < 0)
			unwindidx = i - 1;

		/*
		 * If required, write synchronously, otherwise use delayed
		 * write.
		 */
		if (flags & B_SYNC)
			bwrite(bp);
		else
			bdwrite(bp);

		return (0);
	}

	brelse(bp);

	if (bpp != NULL) {
		if (flags & B_CLRBUF) {
			error = bread(vp, lbn, (int)fs->fs_bsize, &nbp);
			if (error) {
				brelse(nbp);
				goto fail;
			}
		} else {
			nbp = getblk(vp, lbn, fs->fs_bsize, 0, 0);
			nbp->b_blkno = fsbtodb(fs, nb);
			clrbuf(nbp);
		}

		*bpp = nbp;
	}

	return (0);

fail:
	/*
	 * If we have failed to allocate any blocks, simply return the error.
	 * This is the usual case and avoids the need to fsync the file.
	 */
	if (allocblk == allociblk && allocib == NULL && unwindidx == -1)
		return (error);
	/*
	 * If we have failed part way through block allocation, we have to
	 * deallocate any indirect blocks that we have allocated. We have to
	 * fsync the file before we start to get rid of all of its
	 * dependencies so that we do not leave them dangling. We have to sync
	 * it at the end so that the softdep code does not find any untracked
	 * changes. Although this is really slow, running out of disk space is
	 * not expected to be a common occurrence. The error return from fsync
	 * is ignored as we already have an error to return to the user.
	 */
	VOP_FSYNC(vp, p->p_ucred, MNT_WAIT, p);
	if (unwindidx >= 0) {
		/*
		 * First write out any buffers we've created to resolve their
		 * softdeps. This must be done in reverse order of creation so
		 * that we resolve the dependencies in one pass.
		 * Write the cylinder group buffers for these buffers too.
		 */
		 for (i = num; i >= unwindidx; i--) {
		 	if (i == 0)
				break;

			bp = getblk(vp, indirs[i].in_lbn, (int) fs->fs_bsize,
			    0, 0);
			if (bp->b_flags & B_DELWRI) {
				nb = fsbtodb(fs, cgtod(fs, dtog(fs,
				    dbtofsb(fs, bp->b_blkno))));
				bwrite(bp);
				bp = getblk(ip->i_devvp, nb,
				    (int) fs->fs_cgsize, 0, 0);
				if (bp->b_flags & B_DELWRI)
					bwrite(bp);
				else {
					bp->b_flags |= B_INVAL;
					brelse(bp);
				}
			} else {
				bp->b_flags |= B_INVAL;
				brelse(bp);
			}
		}

		if (DOINGSOFTDEP(vp) && unwindidx == 0) {
			ip->i_flag |= IN_CHANGE | IN_UPDATE;
			ffs_update(ip, 1);
		}

		/*
		 * Now that any dependencies that we created have been
		 * resolved, we can undo the partial allocation.
		 */
		if (unwindidx == 0) {
			*allocib = 0;
			ip->i_flag |= IN_CHANGE | IN_UPDATE;
			if (DOINGSOFTDEP(vp))
				ffs_update(ip, 1);
		} else {
			r = bread(vp, indirs[unwindidx].in_lbn,
			    (int)fs->fs_bsize, &bp);
			if (r)
				panic("ffs2_balloc: unwind failed");

			bap = (int64_t *) bp->b_data;
			bap[indirs[unwindidx].in_off] = 0;
			bwrite(bp);
		}

		for (i = unwindidx + 1; i <= num; i++) {
			bp = getblk(vp, indirs[i].in_lbn, (int)fs->fs_bsize, 0,
			    0);
			bp->b_flags |= B_INVAL;
			brelse(bp);
		}
	}

	for (deallocated = 0, blkp = allociblk; blkp < allocblk; blkp++) {
		ffs_blkfree(ip, *blkp, fs->fs_bsize);
		deallocated += fs->fs_bsize;
	}

	if (deallocated) {
		/*
	 	 * Restore user's disk quota because allocation failed.
	 	 */
		(void) ufs_quota_free_blocks(ip, btodb(deallocated), cred);

		ip->i_ffs2_blocks -= btodb(deallocated);
		ip->i_flag |= IN_CHANGE | IN_UPDATE;
	}
	VOP_FSYNC(vp, p->p_ucred, MNT_WAIT, p);
	return (error);
}