Esempio n. 1
0
/*
 * Update the access, modified, and inode change times as specified by the
 * IN_ACCESS, IN_UPDATE, and IN_CHANGE flags respectively.  Write the inode
 * to disk if the IN_MODIFIED flag is set (it may be set initially, or by
 * the timestamp update).  The IN_LAZYMOD flag is set to force a write
 * later if not now.  If we write now, then clear both IN_MODIFIED and
 * IN_LAZYMOD to reflect the presumably successful write, and if waitfor is
 * set, then wait for the write to complete.
 */
int
ffs_update(struct vnode *vp, int waitfor)
{
	struct fs *fs;
	struct buf *bp;
	struct inode *ip;
	int error;

	ufs_itimes(vp);
	ip = VTOI(vp);
	if ((ip->i_flag & IN_MODIFIED) == 0 && waitfor == 0)
		return (0);
	ip->i_flag &= ~(IN_LAZYMOD | IN_MODIFIED);
	fs = ip->i_fs;
	if (fs->fs_ronly)
		return (0);

	/*
	 * The vnode type is usually set to VBAD if an unrecoverable I/O
	 * error has occured (such as when reading the inode).  Clear the
	 * modified bits but do not write anything out in this case.
	 */
	if (vp->v_type == VBAD)
		return (0);
	/*
	 * Ensure that uid and gid are correct. This is a temporary
	 * fix until fsck has been changed to do the update.
	 */
	if (fs->fs_inodefmt < FS_44INODEFMT) {		/* XXX */
		ip->i_din.di_ouid = ip->i_uid;		/* XXX */
		ip->i_din.di_ogid = ip->i_gid;		/* XXX */
	}						/* XXX */
	error = bread(ip->i_devvp, 
		      fsbtodoff(fs, ino_to_fsba(fs, ip->i_number)),
		      (int)fs->fs_bsize, &bp);
	if (error) {
		brelse(bp);
		return (error);
	}
	if (DOINGSOFTDEP(vp))
		softdep_update_inodeblock(ip, bp, waitfor);
	else if (ip->i_effnlink != ip->i_nlink)
		panic("ffs_update: bad link cnt");
	*((struct ufs1_dinode *)bp->b_data +
	    ino_to_fsbo(fs, ip->i_number)) = ip->i_din;
	if (waitfor && !DOINGASYNC(vp)) {
		return (bwrite(bp));
	} else if (vm_page_count_severe() || buf_dirty_count_severe()) {
		return (bwrite(bp));
	} else {
		if (bp->b_bufsize == fs->fs_bsize)
			bp->b_flags |= B_CLUSTEROK;
		bdwrite(bp);
		return (0);
	}
}
Esempio n. 2
0
static void
read_inode_bitmap(struct mount *mp, unsigned long block_group,
		  unsigned int bitmap_nr)
{
	struct ext2_sb_info *sb = VFSTOEXT2(mp)->um_e2fs;
	struct ext2_group_desc *gdp;
	struct buf *bh;
	int	error;

	gdp = get_group_desc (mp, block_group, NULL);
	if ((error = bread (VFSTOEXT2(mp)->um_devvp,
			    fsbtodoff(sb, gdp->bg_inode_bitmap),
			    sb->s_blocksize, &bh)) != 0)
		panic ( "read_inode_bitmap:"
			    "Cannot read inode bitmap - "
			    "block_group = %lu, inode_bitmap = %lu",
			    block_group, (unsigned long) gdp->bg_inode_bitmap);
	sb->s_inode_bitmap_number[bitmap_nr] = block_group;
	sb->s_inode_bitmap[bitmap_nr] = bh;
	LCK_BUF(bh)
}
Esempio n. 3
0
/*
 * Update the access, modified, and inode change times as specified by the
 * IN_ACCESS, IN_UPDATE, and IN_CHANGE flags respectively.  Write the inode
 * to disk if the IN_MODIFIED flag is set (it may be set initially, or by
 * the timestamp update).  The IN_LAZYMOD flag is set to force a write
 * later if not now.  If we write now, then clear both IN_MODIFIED and
 * IN_LAZYMOD to reflect the presumably successful write, and if waitfor is
 * set, then wait for the write to complete.
 */
int
ext2_update(struct vnode *vp, int waitfor)
{
    struct ext2_sb_info *fs;
    struct buf *bp;
    struct inode *ip;
    int error;

    ext2_itimes(vp);
    ip = VTOI(vp);
    if ((ip->i_flag & IN_MODIFIED) == 0)
        return (0);
    ip->i_flag &= ~(IN_LAZYMOD | IN_MODIFIED);
    if (vp->v_mount->mnt_flag & MNT_RDONLY)
        return (0);
    fs = ip->i_e2fs;
    error = bread(ip->i_devvp,
                  fsbtodoff(fs, ino_to_fsba(fs, ip->i_number)),
                  (int)fs->s_blocksize, &bp);
    if (error) {
        brelse(bp);
        return (error);
    }
    ext2_di2ei( &ip->i_din, (struct ext2_inode *) ((char *)bp->b_data + EXT2_INODE_SIZE(fs) *
                ino_to_fsbo(fs, ip->i_number)));
    /*
    	if (waitfor && (vp->v_mount->mnt_flag & MNT_ASYNC) == 0)
    		return (bwrite(bp));
    	else {
    */
    bdwrite(bp);
    return (0);
    /*
    	}
    */
}
Esempio n. 4
0
/*
 * ffs_balloc(struct vnode *a_vp, ufs_daddr_t a_lbn, int a_size,
 *	      struct ucred *a_cred, int a_flags, struct buf *a_bpp)
 *
 * Balloc defines the structure of filesystem storage by allocating
 * the physical blocks on a device given the inode and the logical
 * block number in a file.
 *
 * NOTE: B_CLRBUF - this flag tells balloc to clear invalid portions
 *	 of the buffer.  However, any dirty bits will override missing
 *	 valid bits.  This case occurs when writable mmaps are truncated
 *	 and then extended.
 */
int
ffs_balloc(struct vop_balloc_args *ap)
{
	struct inode *ip;
	ufs_daddr_t lbn;
	int size;
	struct ucred *cred;
	int flags;
	struct fs *fs;
	ufs_daddr_t nb;
	struct buf *bp, *nbp, *dbp;
	struct vnode *vp;
	struct indir indirs[NIADDR + 2];
	ufs_daddr_t newb, *bap, pref;
	int deallocated, osize, nsize, num, i, error;
	ufs_daddr_t *allocib, *blkp, *allocblk, allociblk[NIADDR + 1];
	ufs_daddr_t *lbns_remfree, lbns[NIADDR + 1];
	int unwindidx;
	int seqcount;

	vp = ap->a_vp;
	ip = VTOI(vp);
	fs = ip->i_fs;
	lbn = lblkno(fs, ap->a_startoffset);
	size = blkoff(fs, ap->a_startoffset) + ap->a_size;
	if (size > fs->fs_bsize)
		panic("ffs_balloc: blk too big");
	*ap->a_bpp = NULL;
	if (lbn < 0)
		return (EFBIG);
	cred = ap->a_cred;
	flags = ap->a_flags;

	/*
	 * The vnode must be locked for us to be able to safely mess
	 * around with the inode.
	 */
	if (vn_islocked(vp) != LK_EXCLUSIVE) {
		panic("ffs_balloc: vnode %p not exclusively locked!", vp);
	}

	/*
	 * If the next write will extend the file into a new block,
	 * and the file is currently composed of a fragment
	 * this fragment has to be extended to be a full block.
	 */
	nb = lblkno(fs, ip->i_size);
	if (nb < NDADDR && nb < lbn) {
		/*
		 * The filesize prior to this write can fit in direct
		 * blocks (ex. fragmentation is possibly done)
		 * we are now extending the file write beyond
		 * the block which has end of the file prior to this write.
		 */
		osize = blksize(fs, ip, nb);
		/*
		 * osize gives disk allocated size in the last block. It is
		 * either in fragments or a file system block size.
		 */
		if (osize < fs->fs_bsize && osize > 0) {
			/* A few fragments are already allocated, since the
			 * current extends beyond this block allocated the
			 * complete block as fragments are on in last block.
			 */
			error = ffs_realloccg(ip, nb,
				ffs_blkpref(ip, nb, (int)nb, &ip->i_db[0]),
				osize, (int)fs->fs_bsize, cred, &bp);
			if (error)
				return (error);
			if (DOINGSOFTDEP(vp))
				softdep_setup_allocdirect(ip, nb,
				    dofftofsb(fs, bp->b_bio2.bio_offset), 
				    ip->i_db[nb], fs->fs_bsize, osize, bp);
			/* adjust the inode size, we just grew */
			ip->i_size = smalllblktosize(fs, nb + 1);
			ip->i_db[nb] = dofftofsb(fs, bp->b_bio2.bio_offset);
			ip->i_flag |= IN_CHANGE | IN_UPDATE;
			if (flags & B_SYNC)
				bwrite(bp);
			else
				bawrite(bp);
			/* bp is already released here */
		}
	}
	/*
	 * The first NDADDR blocks are direct blocks
	 */
	if (lbn < NDADDR) {
		nb = ip->i_db[lbn];
		if (nb != 0 && ip->i_size >= smalllblktosize(fs, lbn + 1)) {
			error = bread(vp, lblktodoff(fs, lbn), fs->fs_bsize, &bp);
			if (error) {
				brelse(bp);
				return (error);
			}
			bp->b_bio2.bio_offset = fsbtodoff(fs, nb);
			*ap->a_bpp = bp;
			return (0);
		}
		if (nb != 0) {
			/*
			 * Consider need to reallocate a fragment.
			 */
			osize = fragroundup(fs, blkoff(fs, ip->i_size));
			nsize = fragroundup(fs, size);
			if (nsize <= osize) {
				error = bread(vp, lblktodoff(fs, lbn), 
					      osize, &bp);
				if (error) {
					brelse(bp);
					return (error);
				}
				bp->b_bio2.bio_offset = fsbtodoff(fs, nb);
			} else {
				/*
				 * NOTE: ffs_realloccg() issues a bread().
				 */
				error = ffs_realloccg(ip, lbn,
				    ffs_blkpref(ip, lbn, (int)lbn,
					&ip->i_db[0]), osize, nsize, cred, &bp);
				if (error)
					return (error);
				if (DOINGSOFTDEP(vp))
					softdep_setup_allocdirect(ip, lbn,
					    dofftofsb(fs, bp->b_bio2.bio_offset),
					    nb, nsize, osize, bp);
			}
		} else {
			if (ip->i_size < smalllblktosize(fs, lbn + 1))
				nsize = fragroundup(fs, size);
			else
				nsize = fs->fs_bsize;
			error = ffs_alloc(ip, lbn,
			    ffs_blkpref(ip, lbn, (int)lbn, &ip->i_db[0]),
			    nsize, cred, &newb);
			if (error)
				return (error);
			bp = getblk(vp, lblktodoff(fs, lbn), nsize, 0, 0);
			bp->b_bio2.bio_offset = fsbtodoff(fs, newb);
			if (flags & B_CLRBUF)
				vfs_bio_clrbuf(bp);
			if (DOINGSOFTDEP(vp))
				softdep_setup_allocdirect(ip, lbn, newb, 0,
				    nsize, 0, bp);
		}
		ip->i_db[lbn] = dofftofsb(fs, bp->b_bio2.bio_offset);
		ip->i_flag |= IN_CHANGE | IN_UPDATE;
		*ap->a_bpp = bp;
		return (0);
	}
	/*
	 * Determine the number of levels of indirection.
	 */
	pref = 0;
	if ((error = ufs_getlbns(vp, lbn, indirs, &num)) != 0)
		return(error);
#ifdef DIAGNOSTIC
	if (num < 1)
		panic ("ffs_balloc: ufs_bmaparray returned indirect block");
#endif
	/*
	 * Get a handle on the data block buffer before working through 
	 * indirect blocks to avoid a deadlock between the VM system holding
	 * a locked VM page and issuing a BMAP (which tries to lock the
	 * indirect blocks), and the filesystem holding a locked indirect
	 * block and then trying to read a data block (which tries to lock
	 * the underlying VM pages).
	 */
	dbp = getblk(vp, lblktodoff(fs, lbn), fs->fs_bsize, 0, 0);

	/*
	 * Setup undo history
	 */
	allocib = NULL;
	allocblk = allociblk;
	lbns_remfree = lbns;

	unwindidx = -1;

	/*
	 * Fetch the first indirect block directly from the inode, allocating
	 * one if necessary. 
	 */
	--num;
	nb = ip->i_ib[indirs[0].in_off];
	if (nb == 0) {
		pref = ffs_blkpref(ip, lbn, 0, NULL);
		/*
		 * If the filesystem has run out of space we can skip the
		 * full fsync/undo of the main [fail] case since no undo
		 * history has been built yet.  Hence the goto fail2.
		 */
	        if ((error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize,
		    cred, &newb)) != 0)
			goto fail2;
		nb = newb;
		*allocblk++ = nb;
		*lbns_remfree++ = indirs[1].in_lbn;
		bp = getblk(vp, lblktodoff(fs, indirs[1].in_lbn),
			    fs->fs_bsize, 0, 0);
		bp->b_bio2.bio_offset = fsbtodoff(fs, nb);
		vfs_bio_clrbuf(bp);
		if (DOINGSOFTDEP(vp)) {
			softdep_setup_allocdirect(ip, NDADDR + indirs[0].in_off,
			    newb, 0, fs->fs_bsize, 0, bp);
			bdwrite(bp);
		} else {
			/*
			 * Write synchronously so that indirect blocks
			 * never point at garbage.
			 */
			if (DOINGASYNC(vp))
				bdwrite(bp);
			else if ((error = bwrite(bp)) != 0)
				goto fail;
		}
		allocib = &ip->i_ib[indirs[0].in_off];
		*allocib = nb;
		ip->i_flag |= IN_CHANGE | IN_UPDATE;
	}

	/*
	 * Fetch through the indirect blocks, allocating as necessary.
	 */
	for (i = 1;;) {
		error = bread(vp, lblktodoff(fs, indirs[i].in_lbn), (int)fs->fs_bsize, &bp);
		if (error) {
			brelse(bp);
			goto fail;
		}
		bap = (ufs_daddr_t *)bp->b_data;
		nb = bap[indirs[i].in_off];
		if (i == num)
			break;
		i += 1;
		if (nb != 0) {
			bqrelse(bp);
			continue;
		}
		if (pref == 0)
			pref = ffs_blkpref(ip, lbn, 0, NULL);
		if ((error =
		    ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, cred, &newb)) != 0) {
			brelse(bp);
			goto fail;
		}
		nb = newb;
		*allocblk++ = nb;
		*lbns_remfree++ = indirs[i].in_lbn;
		nbp = getblk(vp, lblktodoff(fs, indirs[i].in_lbn),
			     fs->fs_bsize, 0, 0);
		nbp->b_bio2.bio_offset = fsbtodoff(fs, nb);
		vfs_bio_clrbuf(nbp);
		if (DOINGSOFTDEP(vp)) {
			softdep_setup_allocindir_meta(nbp, ip, bp,
			    indirs[i - 1].in_off, nb);
			bdwrite(nbp);
		} else {
			/*
			 * Write synchronously so that indirect blocks
			 * never point at garbage.
			 */
			if ((error = bwrite(nbp)) != 0) {
				brelse(bp);
				goto fail;
			}
		}
		bap[indirs[i - 1].in_off] = nb;
		if (allocib == NULL && unwindidx < 0)
			unwindidx = i - 1;
		/*
		 * If required, write synchronously, otherwise use
		 * delayed write.
		 */
		if (flags & B_SYNC) {
			bwrite(bp);
		} else {
			if (bp->b_bufsize == fs->fs_bsize)
				bp->b_flags |= B_CLUSTEROK;
			bdwrite(bp);
		}
	}

	/*
	 * Get the data block, allocating if necessary.  We have already
	 * called getblk() on the data block buffer, dbp.  If we have to
	 * allocate it and B_CLRBUF has been set the inference is an intention
	 * to zero out the related disk blocks, so we do not have to issue
	 * a read.  Instead we simply call vfs_bio_clrbuf().  If B_CLRBUF is
	 * not set the caller intends to overwrite the entire contents of the
	 * buffer and we don't waste time trying to clean up the contents.
	 *
	 * bp references the current indirect block.  When allocating, 
	 * the block must be updated.
	 */
	if (nb == 0) {
		pref = ffs_blkpref(ip, lbn, indirs[i].in_off, &bap[0]);
		error = ffs_alloc(ip,
		    lbn, pref, (int)fs->fs_bsize, cred, &newb);
		if (error) {
			brelse(bp);
			goto fail;
		}
		nb = newb;
		*allocblk++ = nb;
		*lbns_remfree++ = lbn;
		dbp->b_bio2.bio_offset = fsbtodoff(fs, nb);
		if (flags & B_CLRBUF)
			vfs_bio_clrbuf(dbp);
		if (DOINGSOFTDEP(vp))
			softdep_setup_allocindir_page(ip, lbn, bp,
			    indirs[i].in_off, nb, 0, dbp);
		bap[indirs[i].in_off] = nb;
		/*
		 * If required, write synchronously, otherwise use
		 * delayed write.
		 */
		if (flags & B_SYNC) {
			bwrite(bp);
		} else {
			if (bp->b_bufsize == fs->fs_bsize)
				bp->b_flags |= B_CLUSTEROK;
			bdwrite(bp);
		}
		*ap->a_bpp = dbp;
		return (0);
	}
	brelse(bp);

	/*
	 * At this point all related indirect blocks have been allocated
	 * if necessary and released.  bp is no longer valid.  dbp holds
	 * our getblk()'d data block.
	 *
	 * XXX we previously performed a cluster_read operation here.
	 */
	if (flags & B_CLRBUF) {
		/*
		 * If B_CLRBUF is set we must validate the invalid portions
		 * of the buffer.  This typically requires a read-before-
		 * write.  The strategy call will fill in bio_offset in that
		 * case.
		 *
		 * If we hit this case we do a cluster read if possible
		 * since nearby data blocks are likely to be accessed soon
		 * too.
		 */
		if ((dbp->b_flags & B_CACHE) == 0) {
			bqrelse(dbp);
			seqcount = (flags & B_SEQMASK) >> B_SEQSHIFT;
			if (seqcount &&
			    (vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) {
				error = cluster_read(vp, (off_t)ip->i_size,
					    lblktodoff(fs, lbn),
					    (int)fs->fs_bsize, 
					    fs->fs_bsize,
					    seqcount * BKVASIZE,
					    &dbp);
			} else {
				error = bread(vp, lblktodoff(fs, lbn),
					      (int)fs->fs_bsize, &dbp);
			}
			if (error)
				goto fail;
		} else {
Esempio n. 5
0
			if (seqcount &&
			    (vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) {
				error = cluster_read(vp, (off_t)ip->i_size,
					    lblktodoff(fs, lbn),
					    (int)fs->fs_bsize, 
					    fs->fs_bsize,
					    seqcount * BKVASIZE,
					    &dbp);
			} else {
				error = bread(vp, lblktodoff(fs, lbn),
					      (int)fs->fs_bsize, &dbp);
			}
			if (error)
				goto fail;
		} else {
			dbp->b_bio2.bio_offset = fsbtodoff(fs, nb);
		}
	} else {
		/*
		 * If B_CLRBUF is not set the caller intends to overwrite
		 * the entire contents of the buffer.  We can simply set
		 * bio_offset and we are done.
		 */
		dbp->b_bio2.bio_offset = fsbtodoff(fs, nb);
	}
	*ap->a_bpp = dbp;
	return (0);
fail:
	/*
	 * If we have failed part way through block allocation, we
	 * have to deallocate any indirect blocks that we have allocated.
Esempio n. 6
0
/*
 * ext2_reallocblks(struct vnode *a_vp, struct cluster_save *a_buflist)
 */
int
ext2_reallocblks(struct vop_reallocblks_args *ap)
{
#ifndef FANCY_REALLOC
/* kprintf("ext2_reallocblks not implemented\n"); */
return ENOSPC;
#else

	struct ext2_sb_info *fs;
	struct inode *ip;
	struct vnode *vp;
	struct buf *sbp, *ebp;
	daddr_t *bap, *sbap, *ebap;
	struct cluster_save *buflist;
	daddr_t start_lbn, end_lbn, soff, eoff, newblk, blkno;
	struct indir start_ap[NIADDR + 1], end_ap[NIADDR + 1], *idp;
	int i, len, start_lvl, end_lvl, pref, ssize;

	vp = ap->a_vp;
	ip = VTOI(vp);
	fs = ip->i_e2fs;
#ifdef UNKLAR
	if (fs->fs_contigsumsize <= 0)
		return (ENOSPC);
#endif
	buflist = ap->a_buflist;
	len = buflist->bs_nchildren;
	start_lbn = lblkno(fs, buflist->bs_children[0]->b_loffset);
	end_lbn = start_lbn + len - 1;
#if DIAGNOSTIC
	for (i = 1; i < len; i++) {
		if (buflist->bs_children[i]->b_loffset != lblktodoff(fs, start_lbn) + lblktodoff(fs, i))
			panic("ext2_reallocblks: non-cluster");
	}
#endif
	/*
	 * If the latest allocation is in a new block group, assume that
	 * the filesystem has decided to move and do not force it back to
	 * the previous block group.
	 */
	if (dtog(fs, dofftofsb(fs, buflist->bs_children[0]->b_bio2.bio_offset)) !=
	    dtog(fs, dofftofsb(fs, buflist->bs_children[len - 1]->b_bio2.bio_offset)))
		return (ENOSPC);
	if (ext2_getlbns(vp, start_lbn, start_ap, &start_lvl) ||
	    ext2_getlbns(vp, end_lbn, end_ap, &end_lvl))
		return (ENOSPC);
	/*
	 * Get the starting offset and block map for the first block.
	 */
	if (start_lvl == 0) {
		sbap = &ip->i_db[0];
		soff = start_lbn;
	} else {
		idp = &start_ap[start_lvl - 1];
		if (bread(vp, lblktodoff(fs, idp->in_lbn), (int)fs->s_blocksize, NOCRED, &sbp)) {
			brelse(sbp);
			return (ENOSPC);
		}
		sbap = (daddr_t *)sbp->b_data;
		soff = idp->in_off;
	}
	/*
	 * Find the preferred location for the cluster.
	 */
	pref = ext2_blkpref(ip, start_lbn, soff, sbap);
	/*
	 * If the block range spans two block maps, get the second map.
	 */
	if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) {
		ssize = len;
	} else {
#if DIAGNOSTIC
		if (start_ap[start_lvl-1].in_lbn == idp->in_lbn)
			panic("ext2_reallocblk: start == end");
#endif
		ssize = len - (idp->in_off + 1);
		if (bread(vp, lblktodoff(fs, idp->in_lbn), (int)fs->s_blocksize, NOCRED, &ebp))
			goto fail;
		ebap = (daddr_t *)ebp->b_data;
	}
	/*
	 * Search the block map looking for an allocation of the desired size.
	 */
	if ((newblk = (daddr_t)ext2_hashalloc(ip, dtog(fs, pref), (long)pref,
	    len, (u_long (*)())ext2_clusteralloc)) == 0)
		goto fail;
	/*
	 * We have found a new contiguous block.
	 *
	 * First we have to replace the old block pointers with the new
	 * block pointers in the inode and indirect blocks associated
	 * with the file.
	 */
	blkno = newblk;
	for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->s_frags_per_block) {
		if (i == ssize)
			bap = ebap;
#if DIAGNOSTIC
		if (buflist->bs_children[i]->b_bio2.bio_offset != fsbtodoff(fs, *bap))
			panic("ext2_reallocblks: alloc mismatch");
#endif
		*bap++ = blkno;
	}
	/*
	 * Next we must write out the modified inode and indirect blocks.
	 * For strict correctness, the writes should be synchronous since
	 * the old block values may have been written to disk. In practise
	 * they are almost never written, but if we are concerned about
	 * strict correctness, the `doasyncfree' flag should be set to zero.
	 *
	 * The test on `doasyncfree' should be changed to test a flag
	 * that shows whether the associated buffers and inodes have
	 * been written. The flag should be set when the cluster is
	 * started and cleared whenever the buffer or inode is flushed.
	 * We can then check below to see if it is set, and do the
	 * synchronous write only when it has been cleared.
	 */
	if (sbap != &ip->i_db[0]) {
		if (doasyncfree)
			bdwrite(sbp);
		else
			bwrite(sbp);
	} else {
		ip->i_flag |= IN_CHANGE | IN_UPDATE;
		if (!doasyncfree)
			EXT2_UPDATE(vp, 1);
	}
	if (ssize < len)
		if (doasyncfree)
			bdwrite(ebp);
		else
			bwrite(ebp);
	/*
	 * Last, free the old blocks and assign the new blocks to the buffers.
	 */
	for (blkno = newblk, i = 0; i < len; i++, blkno += fs->s_frags_per_block) {
		ext2_blkfree(ip, dofftofsb(fs, buflist->bs_children[i]->b_bio2.bio_offset),
		    fs->s_blocksize);
		buflist->bs_children[i]->b_bio2.bio_offset = fsbtodoff(fs, blkno);
	}
	return (0);

fail:
	if (ssize < len)
		brelse(ebp);
	if (sbap != &ip->i_db[0])
		brelse(sbp);
	return (ENOSPC);

#endif /* FANCY_REALLOC */
}
Esempio n. 7
0
static int
ext2_indirtrunc(struct inode *ip, daddr_t lbn, off_t doffset, daddr_t lastbn,
                int level, long *countp)
{
    int i;
    struct buf *bp;
    struct ext2_sb_info *fs = ip->i_e2fs;
    daddr_t *bap;
    struct vnode *vp;
    daddr_t *copy, nb, nlbn, last;
    long blkcount, factor;
    int nblocks, blocksreleased = 0;
    int error = 0, allerror = 0;

    /*
     * Calculate index in current block of last
     * block to be kept.  -1 indicates the entire
     * block so we need not calculate the index.
     */
    factor = 1;
    for (i = SINGLE; i < level; i++)
        factor *= NINDIR(fs);
    last = lastbn;
    if (lastbn > 0)
        last /= factor;
    nblocks = btodb(fs->s_blocksize);
    /*
     * Get buffer of block pointers, zero those entries corresponding
     * to blocks to be free'd, and update on disk copy first.  Since
     * double(triple) indirect before single(double) indirect, calls
     * to bmap on these blocks will fail.  However, we already have
     * the on disk address, so we have to set the bio_offset field
     * explicitly instead of letting bread do everything for us.
     */
    vp = ITOV(ip);
    bp = getblk(vp, lblktodoff(fs, lbn), (int)fs->s_blocksize, 0, 0);
    if ((bp->b_flags & B_CACHE) == 0) {
        bp->b_flags &= ~(B_ERROR | B_INVAL);
        bp->b_cmd = BUF_CMD_READ;
        if (bp->b_bcount > bp->b_bufsize)
            panic("ext2_indirtrunc: bad buffer size");
        bp->b_bio2.bio_offset = doffset;
        bp->b_bio1.bio_done = biodone_sync;
        bp->b_bio1.bio_flags |= BIO_SYNC;
        vfs_busy_pages(bp->b_vp, bp);
        vn_strategy(vp, &bp->b_bio1);
        error = biowait(&bp->b_bio1, "biord");
    }
    if (error) {
        brelse(bp);
        *countp = 0;
        return (error);
    }

    bap = (daddr_t *)bp->b_data;
    MALLOC(copy, daddr_t *, fs->s_blocksize, M_TEMP, M_WAITOK);
    bcopy((caddr_t)bap, (caddr_t)copy, (u_int)fs->s_blocksize);
    bzero((caddr_t)&bap[last + 1],
          (u_int)(NINDIR(fs) - (last + 1)) * sizeof (daddr_t));
    if (last == -1)
        bp->b_flags |= B_INVAL;
    error = bwrite(bp);
    if (error)
        allerror = error;
    bap = copy;

    /*
     * Recursively free totally unused blocks.
     */
    for (i = NINDIR(fs) - 1, nlbn = lbn + 1 - i * factor; i > last;
            i--, nlbn += factor) {
        nb = bap[i];
        if (nb == 0)
            continue;
        if (level > SINGLE) {
            if ((error = ext2_indirtrunc(ip, nlbn,
                                         fsbtodoff(fs, nb), (daddr_t)-1, level - 1, &blkcount)) != 0)
                allerror = error;
            blocksreleased += blkcount;
        }
        ext2_blkfree(ip, nb, fs->s_blocksize);
        blocksreleased += nblocks;
    }

    /*
     * Recursively free last partial block.
     */
    if (level > SINGLE && lastbn >= 0) {
        last = lastbn % factor;
        nb = bap[i];
        if (nb != 0) {
            error = ext2_indirtrunc(ip, nlbn, fsbtodoff(fs, nb),
                                    last, level - 1, &blkcount);
            if (error)
                allerror = error;
            blocksreleased += blkcount;
        }
    }
    FREE(copy, M_TEMP);
    *countp = blocksreleased;
    return (allerror);
}
Esempio n. 8
0
/*
 * Truncate the inode oip to at most length size, freeing the
 * disk blocks.
 */
int
ext2_truncate(struct vnode *vp, off_t length, int flags, struct ucred *cred)
{
    struct vnode *ovp = vp;
    daddr_t lastblock;
    struct inode *oip;
    daddr_t bn, lbn, lastiblock[NIADDR], indir_lbn[NIADDR];
    daddr_t oldblks[NDADDR + NIADDR], newblks[NDADDR + NIADDR];
    struct ext2_sb_info *fs;
    struct buf *bp;
    int offset, size, level;
    long count, nblocks, blocksreleased = 0;
    int i;
    int aflags, error, allerror;
    off_t osize;
    /*
    kprintf("ext2_truncate called %d to %d\n", VTOI(ovp)->i_number, length);
    */	/*
	 * negative file sizes will totally break the code below and
	 * are not meaningful anyways.
	 */
    if (length < 0)
        return EFBIG;

    oip = VTOI(ovp);
    if (ovp->v_type == VLNK &&
            oip->i_size < ovp->v_mount->mnt_maxsymlinklen) {
#if DIAGNOSTIC
        if (length != 0)
            panic("ext2_truncate: partial truncate of symlink");
#endif
        bzero((char *)&oip->i_shortlink, (u_int)oip->i_size);
        oip->i_size = 0;
        oip->i_flag |= IN_CHANGE | IN_UPDATE;
        return (EXT2_UPDATE(ovp, 1));
    }
    if (oip->i_size == length) {
        oip->i_flag |= IN_CHANGE | IN_UPDATE;
        return (EXT2_UPDATE(ovp, 0));
    }
#if QUOTA
    if ((error = ext2_getinoquota(oip)) != 0)
        return (error);
#endif
    fs = oip->i_e2fs;
    osize = oip->i_size;
    ext2_discard_prealloc(oip);
    /*
     * Lengthen the size of the file. We must ensure that the
     * last byte of the file is allocated. Since the smallest
     * value of osize is 0, length will be at least 1.
     */
    if (osize < length) {
        offset = blkoff(fs, length - 1);
        lbn = lblkno(fs, length - 1);
        aflags = B_CLRBUF;
        if (flags & IO_SYNC)
            aflags |= B_SYNC;
        vnode_pager_setsize(ovp, length);
        error = ext2_balloc(oip, lbn, offset + 1, cred, &bp, aflags);
        if (error) {
            vnode_pager_setsize(ovp, osize);
            return (error);
        }
        oip->i_size = length;
        if (aflags & IO_SYNC)
            bwrite(bp);
        else
            bawrite(bp);
        oip->i_flag |= IN_CHANGE | IN_UPDATE;
        return (EXT2_UPDATE(ovp, 1));
    }
    /*
     * Shorten the size of the file. If the file is not being
     * truncated to a block boundry, the contents of the
     * partial block following the end of the file must be
     * zero'ed in case it ever become accessable again because
     * of subsequent file growth.
     */
    /* I don't understand the comment above */
    offset = blkoff(fs, length);
    if (offset == 0) {
        oip->i_size = length;
    } else {
        lbn = lblkno(fs, length);
        aflags = B_CLRBUF;
        if (flags & IO_SYNC)
            aflags |= B_SYNC;
        error = ext2_balloc(oip, lbn, offset, cred, &bp, aflags);
        if (error)
            return (error);
        oip->i_size = length;
        size = blksize(fs, oip, lbn);
        bzero((char *)bp->b_data + offset, (u_int)(size - offset));
        allocbuf(bp, size);
        if (aflags & IO_SYNC)
            bwrite(bp);
        else
            bawrite(bp);
    }
    /*
     * Calculate index into inode's block list of
     * last direct and indirect blocks (if any)
     * which we want to keep.  Lastblock is -1 when
     * the file is truncated to 0.
     */
    lastblock = lblkno(fs, length + fs->s_blocksize - 1) - 1;
    lastiblock[SINGLE] = lastblock - NDADDR;
    lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs);
    lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs);
    nblocks = btodb(fs->s_blocksize);
    /*
     * Update file and block pointers on disk before we start freeing
     * blocks.  If we crash before free'ing blocks below, the blocks
     * will be returned to the free list.  lastiblock values are also
     * normalized to -1 for calls to ext2_indirtrunc below.
     */
    bcopy((caddr_t)&oip->i_db[0], (caddr_t)oldblks, sizeof oldblks);
    for (level = TRIPLE; level >= SINGLE; level--)
        if (lastiblock[level] < 0) {
            oip->i_ib[level] = 0;
            lastiblock[level] = -1;
        }
    for (i = NDADDR - 1; i > lastblock; i--)
        oip->i_db[i] = 0;
    oip->i_flag |= IN_CHANGE | IN_UPDATE;
    allerror = EXT2_UPDATE(ovp, 1);

    /*
     * Having written the new inode to disk, save its new configuration
     * and put back the old block pointers long enough to process them.
     * Note that we save the new block configuration so we can check it
     * when we are done.
     */
    bcopy((caddr_t)&oip->i_db[0], (caddr_t)newblks, sizeof newblks);
    bcopy((caddr_t)oldblks, (caddr_t)&oip->i_db[0], sizeof oldblks);
    oip->i_size = osize;
    error = vtruncbuf(ovp, length, (int)fs->s_blocksize);
    if (error && (allerror == 0))
        allerror = error;

    /*
     * Indirect blocks first.
     */
    indir_lbn[SINGLE] = -NDADDR;
    indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1;
    indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1;
    for (level = TRIPLE; level >= SINGLE; level--) {
        bn = oip->i_ib[level];
        if (bn != 0) {
            error = ext2_indirtrunc(oip, indir_lbn[level],
                                    fsbtodoff(fs, bn), lastiblock[level], level, &count);
            if (error)
                allerror = error;
            blocksreleased += count;
            if (lastiblock[level] < 0) {
                oip->i_ib[level] = 0;
                ext2_blkfree(oip, bn, fs->s_frag_size);
                blocksreleased += nblocks;
            }
        }
        if (lastiblock[level] >= 0)
            goto done;
    }

    /*
     * All whole direct blocks or frags.
     */
    for (i = NDADDR - 1; i > lastblock; i--) {
        long bsize;

        bn = oip->i_db[i];
        if (bn == 0)
            continue;
        oip->i_db[i] = 0;
        bsize = blksize(fs, oip, i);
        ext2_blkfree(oip, bn, bsize);
        blocksreleased += btodb(bsize);
    }
    if (lastblock < 0)
        goto done;

    /*
     * Finally, look for a change in size of the
     * last direct block; release any frags.
     */
    bn = oip->i_db[lastblock];
    if (bn != 0) {
        long oldspace, newspace;

        /*
         * Calculate amount of space we're giving
         * back as old block size minus new block size.
         */
        oldspace = blksize(fs, oip, lastblock);
        oip->i_size = length;
        newspace = blksize(fs, oip, lastblock);
        if (newspace == 0)
            panic("itrunc: newspace");
        if (oldspace - newspace > 0) {
            /*
             * Block number of space to be free'd is
             * the old block # plus the number of frags
             * required for the storage we're keeping.
             */
            bn += numfrags(fs, newspace);
            ext2_blkfree(oip, bn, oldspace - newspace);
            blocksreleased += btodb(oldspace - newspace);
        }
    }
done:
#if DIAGNOSTIC
    for (level = SINGLE; level <= TRIPLE; level++)
        if (newblks[NDADDR + level] != oip->i_ib[level])
            panic("itrunc1");
    for (i = 0; i < NDADDR; i++)
        if (newblks[i] != oip->i_db[i])
            panic("itrunc2");
    if (length == 0 && (!RB_EMPTY(&ovp->v_rbdirty_tree) ||
                        !RB_EMPTY(&ovp->v_rbclean_tree)))
        panic("itrunc3");
#endif /* DIAGNOSTIC */
    /*
     * Put back the real size.
     */
    oip->i_size = length;
    oip->i_blocks -= blocksreleased;
    if (oip->i_blocks < 0)			/* sanity */
        oip->i_blocks = 0;
    oip->i_flag |= IN_CHANGE;
    vnode_pager_setsize(ovp, length);
#if QUOTA
    ext2_chkdq(oip, -blocksreleased, NOCRED, 0);
#endif
    return (allerror);
}
Esempio n. 9
0
/*
 * Indirect blocks are now on the vnode for the file.  They are given negative
 * logical block numbers.  Indirect blocks are addressed by the negative
 * address of the first data block to which they point.  Double indirect blocks
 * are addressed by one less than the address of the first indirect block to
 * which they point.  Triple indirect blocks are addressed by one less than
 * the address of the first double indirect block to which they point.
 *
 * ext2_bmaparray does the bmap conversion, and if requested returns the
 * array of logical blocks which must be traversed to get to a block.
 * Each entry contains the offset into that block that gets you to the
 * next block and the disk address of the block (if it is assigned).
 */
static
int
ext2_bmaparray(struct vnode *vp, ext2_daddr_t bn, ext2_daddr_t *bnp,
	      struct indir *ap, int *nump, int *runp, int *runb)
{
	struct inode *ip;
	struct buf *bp;
	struct ext2_mount *ump;
	struct mount *mp;
	struct ext2_sb_info *fs;
	struct indir a[NIADDR+1], *xap;
	ext2_daddr_t daddr;
	long metalbn;
	int error, maxrun, num;

	ip = VTOI(vp);
	mp = vp->v_mount;
	ump = VFSTOEXT2(mp);
	fs = ip->i_e2fs;
#ifdef DIAGNOSTIC
	if ((ap != NULL && nump == NULL) || (ap == NULL && nump != NULL))
		panic("ext2_bmaparray: invalid arguments");
#endif

	if (runp) {
		*runp = 0;
	}

	if (runb) {
		*runb = 0;
	}

	maxrun = mp->mnt_iosize_max / mp->mnt_stat.f_iosize - 1;

	xap = ap == NULL ? a : ap;
	if (!nump)
		nump = &num;
	error = ext2_getlbns(vp, bn, xap, nump);
	if (error)
		return (error);

	num = *nump;
	if (num == 0) {
		*bnp = blkptrtodb(ump, ip->i_db[bn]);
		if (*bnp == 0)
			*bnp = -1;
		else if (runp) {
			daddr_t bnb = bn;
			for (++bn; bn < NDADDR && *runp < maxrun &&
			    is_sequential(ump, ip->i_db[bn - 1], ip->i_db[bn]);
			    ++bn, ++*runp);
			bn = bnb;
			if (runb && (bn > 0)) {
				for (--bn; (bn >= 0) && (*runb < maxrun) &&
					is_sequential(ump, ip->i_db[bn],
						ip->i_db[bn+1]);
						--bn, ++*runb);
			}
		}
		return (0);
	}


	/* Get disk address out of indirect block array */
	daddr = ip->i_ib[xap->in_off];

	for (bp = NULL, ++xap; --num; ++xap) {
		/*
		 * Exit the loop if there is no disk address assigned yet and
		 * the indirect block isn't in the cache, or if we were
		 * looking for an indirect block and we've found it.
		 */

		metalbn = xap->in_lbn;
		if ((daddr == 0 &&
		     !findblk(vp, dbtodoff(fs, metalbn), FINDBLK_TEST)) ||
		    metalbn == bn) {
			break;
		}
		/*
		 * If we get here, we've either got the block in the cache
		 * or we have a disk address for it, go fetch it.
		 */
		if (bp)
			bqrelse(bp);

		xap->in_exists = 1;
		bp = getblk(vp, lblktodoff(fs, metalbn),
			    mp->mnt_stat.f_iosize, 0, 0);
		if ((bp->b_flags & B_CACHE) == 0) {
#ifdef DIAGNOSTIC
			if (!daddr)
				panic("ext2_bmaparray: indirect block not in cache");
#endif
			/*
			 * This runs through ext2_strategy using bio2 to
			 * cache the disk offset, then comes back through
			 * bio1.  So we want to wait on bio1
			 */
			bp->b_bio1.bio_done = biodone_sync;
			bp->b_bio1.bio_flags |= BIO_SYNC;
			bp->b_bio2.bio_offset = fsbtodoff(fs, daddr);
			bp->b_flags &= ~(B_INVAL|B_ERROR);
			bp->b_cmd = BUF_CMD_READ;
			vfs_busy_pages(bp->b_vp, bp);
			vn_strategy(bp->b_vp, &bp->b_bio1);
			error = biowait(&bp->b_bio1, "biord");
			if (error) {
				brelse(bp);
				return (error);
			}
		}

		daddr = ((ext2_daddr_t *)bp->b_data)[xap->in_off];
		if (num == 1 && daddr && runp) {
			for (bn = xap->in_off + 1;
			    bn < MNINDIR(ump) && *runp < maxrun &&
			    is_sequential(ump,
			    ((ext2_daddr_t *)bp->b_data)[bn - 1],
			    ((ext2_daddr_t *)bp->b_data)[bn]);
			    ++bn, ++*runp);
			bn = xap->in_off;
			if (runb && bn) {
				for(--bn; bn >= 0 && *runb < maxrun &&
					is_sequential(ump, ((daddr_t *)bp->b_data)[bn],
					    ((daddr_t *)bp->b_data)[bn+1]);
					--bn, ++*runb);
			}
		}
	}
	if (bp)
		bqrelse(bp);

	daddr = blkptrtodb(ump, daddr);
	*bnp = daddr == 0 ? -1 : daddr;
	return (0);
}