예제 #1
0
파일: bmap.c 프로젝트: ele7enxxh/dtrace-pf
static __inline nandfs_lbn_t
lbn_offset(struct nandfs_device *fsdev, int level)
{
	nandfs_lbn_t res;

	for (res = 1; level > 0; level--)
		res *= MNINDIR(fsdev);
	return (res);
}
예제 #2
0
파일: bmap.c 프로젝트: ele7enxxh/dtrace-pf
static nandfs_lbn_t
blocks_inside(struct nandfs_device *fsdev, int level, struct nandfs_indir *nip)
{
	nandfs_lbn_t blocks;

	for (blocks = 1; level >= SINGLE; level--, nip++) {
		MPASS(nip->in_off >= 0 && nip->in_off < MNINDIR(fsdev));
		blocks += nip->in_off * lbn_offset(fsdev, level);
	}

	return (blocks);
}
예제 #3
0
파일: bmap.c 프로젝트: ele7enxxh/dtrace-pf
nandfs_lbn_t
get_maxfilesize(struct nandfs_device *fsdev)
{
	struct nandfs_indir f[NIADDR];
	nandfs_lbn_t max;
	int i;

	max = NDADDR;

	for (i = 0; i < NIADDR; i++) {
		f[i].in_off = MNINDIR(fsdev) - 1;
		max += blocks_inside(fsdev, i, f);
	}

	max *= fsdev->nd_blocksize;

	return (max);
}
예제 #4
0
파일: ext2_bmap.c 프로젝트: Alkzndr/freebsd
/*
 * Create an array of logical block number/offset pairs which represent the
 * path of indirect blocks required to access a data block.  The first "pair"
 * contains the logical block number of the appropriate single, double or
 * triple indirect block and the offset into the inode indirect block array.
 * Note, the logical block number of the inode single/double/triple indirect
 * block appears twice in the array, once with the offset into the i_ib and
 * once with the offset into the page itself.
 */
int
ext2_getlbns(struct vnode *vp, daddr_t bn, struct indir *ap, int *nump)
{
	long blockcnt;
	e2fs_lbn_t metalbn, realbn;
	struct ext2mount *ump;
	int i, numlevels, off;
	int64_t qblockcnt;

	ump = VFSTOEXT2(vp->v_mount);
	if (nump)
		*nump = 0;
	numlevels = 0;
	realbn = bn;
	if ((long)bn < 0)
		bn = -(long)bn;

	/* The first NDADDR blocks are direct blocks. */
	if (bn < NDADDR)
		return (0);

	/*
	 * Determine the number of levels of indirection.  After this loop
	 * is done, blockcnt indicates the number of data blocks possible
	 * at the previous level of indirection, and NIADDR - i is the number
	 * of levels of indirection needed to locate the requested block.
	 */
	for (blockcnt = 1, i = NIADDR, bn -= NDADDR;; i--, bn -= blockcnt) {
		if (i == 0)
			return (EFBIG);
		/*
		 * Use int64_t's here to avoid overflow for triple indirect
		 * blocks when longs have 32 bits and the block size is more
		 * than 4K.
		 */
		qblockcnt = (int64_t)blockcnt * MNINDIR(ump);
		if (bn < qblockcnt)
			break;
		blockcnt = qblockcnt;
	}

	/* Calculate the address of the first meta-block. */
	if (realbn >= 0)
		metalbn = -(realbn - bn + NIADDR - i);
	else
		metalbn = -(-realbn - bn + NIADDR - i);

	/*
	 * At each iteration, off is the offset into the bap array which is
	 * an array of disk addresses at the current level of indirection.
	 * The logical block number and the offset in that block are stored
	 * into the argument array.
	 */
	ap->in_lbn = metalbn;
	ap->in_off = off = NIADDR - i;
	ap++;
	for (++numlevels; i <= NIADDR; i++) {
		/* If searching for a meta-data block, quit when found. */
		if (metalbn == realbn)
			break;

		off = (bn / blockcnt) % MNINDIR(ump);

		++numlevels;
		ap->in_lbn = metalbn;
		ap->in_off = off;
		++ap;

		metalbn -= -1 + off * blockcnt;
		blockcnt /= MNINDIR(ump);
	}
	if (nump)
		*nump = numlevels;
	return (0);
}
예제 #5
0
파일: ext2_bmap.c 프로젝트: Alkzndr/freebsd
int
ext2_bmaparray(struct vnode *vp, daddr_t bn, daddr_t *bnp, int *runp, int *runb)
{
	struct inode *ip;
	struct buf *bp;
	struct ext2mount *ump;
	struct mount *mp;
	struct vnode *devvp;
	struct indir a[NIADDR+1], *ap;
	daddr_t daddr;
	e2fs_lbn_t metalbn;
	int error, num, maxrun = 0, bsize;
	int *nump;

	ap = NULL;
	ip = VTOI(vp);
	mp = vp->v_mount;
	ump = VFSTOEXT2(mp);
	devvp = ump->um_devvp;

	bsize = EXT2_BLOCK_SIZE(ump->um_e2fs);

	if (runp) {
		maxrun = mp->mnt_iosize_max / bsize - 1;
		*runp = 0;
	}

	if (runb) {
		*runb = 0;
	}


	ap = a;
	nump = &num;
	error = ext2_getlbns(vp, bn, ap, nump);
	if (error)
		return (error);

	num = *nump;
	if (num == 0) {
		*bnp = blkptrtodb(ump, ip->i_db[bn]);
		if (*bnp == 0) {
			*bnp = -1;
		} else if (runp) {
			daddr_t bnb = bn;
			for (++bn; bn < NDADDR && *runp < maxrun &&
			    is_sequential(ump, ip->i_db[bn - 1], ip->i_db[bn]);
			    ++bn, ++*runp);
			bn = bnb;
			if (runb && (bn > 0)) {
				for (--bn; (bn >= 0) && (*runb < maxrun) &&
					is_sequential(ump, ip->i_db[bn],
						ip->i_db[bn + 1]);
						--bn, ++*runb);
			}
		}
		return (0);
	}


	/* Get disk address out of indirect block array */
	daddr = ip->i_ib[ap->in_off];

	for (bp = NULL, ++ap; --num; ++ap) {
		/*
		 * Exit the loop if there is no disk address assigned yet and
		 * the indirect block isn't in the cache, or if we were
		 * looking for an indirect block and we've found it.
		 */

		metalbn = ap->in_lbn;
		if ((daddr == 0 && !incore(&vp->v_bufobj, metalbn)) || metalbn == bn)
			break;
		/*
		 * If we get here, we've either got the block in the cache
		 * or we have a disk address for it, go fetch it.
		 */
		if (bp)
			bqrelse(bp);

		bp = getblk(vp, metalbn, bsize, 0, 0, 0);
		if ((bp->b_flags & B_CACHE) == 0) {
#ifdef INVARIANTS
			if (!daddr)
				panic("ext2_bmaparray: indirect block not in cache");
#endif
			bp->b_blkno = blkptrtodb(ump, daddr);
			bp->b_iocmd = BIO_READ;
			bp->b_flags &= ~B_INVAL;
			bp->b_ioflags &= ~BIO_ERROR;
			vfs_busy_pages(bp, 0);
			bp->b_iooffset = dbtob(bp->b_blkno);
			bstrategy(bp);
			curthread->td_ru.ru_inblock++;
			error = bufwait(bp);
			if (error) {
				brelse(bp);
				return (error);
			}
		}

		daddr = ((e2fs_daddr_t *)bp->b_data)[ap->in_off];
		if (num == 1 && daddr && runp) {
			for (bn = ap->in_off + 1;
			    bn < MNINDIR(ump) && *runp < maxrun &&
			    is_sequential(ump,
			    ((e2fs_daddr_t *)bp->b_data)[bn - 1],
			    ((e2fs_daddr_t *)bp->b_data)[bn]);
			    ++bn, ++*runp);
			bn = ap->in_off;
			if (runb && bn) {
				for (--bn; bn >= 0 && *runb < maxrun &&
					is_sequential(ump,
					((e2fs_daddr_t *)bp->b_data)[bn],
					((e2fs_daddr_t *)bp->b_data)[bn + 1]);
					--bn, ++*runb);
			}
		}
	}
	if (bp)
		bqrelse(bp);

	/*
	 * Since this is FFS independent code, we are out of scope for the
	 * definitions of BLK_NOCOPY and BLK_SNAP, but we do know that they
	 * will fall in the range 1..um_seqinc, so we use that test and
	 * return a request for a zeroed out buffer if attempts are made
	 * to read a BLK_NOCOPY or BLK_SNAP block.
	 */
	if ((ip->i_flags & SF_SNAPSHOT) && daddr > 0 && daddr < ump->um_seqinc){
		*bnp = -1;
		return (0);
	}
	*bnp = blkptrtodb(ump, daddr);
	if (*bnp == 0) {
		*bnp = -1;
	}
	return (0);
}
예제 #6
0
/*
 * Last reference to an inode.  If necessary, write or delete it.
 */
int
ufs_inactive(void *v)
{
	struct vop_inactive_args /* {
		struct vnode *a_vp;
		struct bool *a_recycle;
	} */ *ap = v;
	struct vnode *vp = ap->a_vp;
	struct inode *ip = VTOI(vp);
	struct mount *transmp;
	mode_t mode;
	int error = 0;
	int logged = 0;

	UFS_WAPBL_JUNLOCK_ASSERT(vp->v_mount);

	transmp = vp->v_mount;
	fstrans_start(transmp, FSTRANS_SHARED);
	/*
	 * Ignore inodes related to stale file handles.
	 */
	if (ip->i_mode == 0)
		goto out;
	if (ip->i_ffs_effnlink == 0 && DOINGSOFTDEP(vp))
		softdep_releasefile(ip);

	if (ip->i_nlink <= 0 && (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
		error = UFS_WAPBL_BEGIN(vp->v_mount);
		if (error)
			goto out;
		logged = 1;
#ifdef QUOTA
		(void)chkiq(ip, -1, NOCRED, 0);
#endif
#ifdef UFS_EXTATTR
		ufs_extattr_vnode_inactive(vp, curlwp);
#endif
		if (ip->i_size != 0) {
			/*
			 * When journaling, only truncate one indirect block
			 * at a time
			 */
			if (vp->v_mount->mnt_wapbl) {
				uint64_t incr = MNINDIR(ip->i_ump) <<
				    vp->v_mount->mnt_fs_bshift; /* Power of 2 */
				uint64_t base = NDADDR <<
				    vp->v_mount->mnt_fs_bshift;
				while (!error && ip->i_size > base + incr) {
					/*
					 * round down to next full indirect
					 * block boundary.
					 */
					uint64_t nsize = base +
					    ((ip->i_size - base - 1) &
					    ~(incr - 1));
					error = UFS_TRUNCATE(vp, nsize, 0,
					    NOCRED);
					if (error)
						break;
					UFS_WAPBL_END(vp->v_mount);
					error = UFS_WAPBL_BEGIN(vp->v_mount);
					if (error)
						goto out;
				}
			}
			if (!error)
				error = UFS_TRUNCATE(vp, (off_t)0, 0, NOCRED);
		}
		/*
		 * Setting the mode to zero needs to wait for the inode
		 * to be written just as does a change to the link count.
		 * So, rather than creating a new entry point to do the
		 * same thing, we just use softdep_change_linkcnt().
		 */
		DIP_ASSIGN(ip, rdev, 0);
		mode = ip->i_mode;
		ip->i_mode = 0;
		DIP_ASSIGN(ip, mode, 0);
		ip->i_flag |= IN_CHANGE | IN_UPDATE;
		mutex_enter(&vp->v_interlock);
		vp->v_iflag |= VI_FREEING;
		mutex_exit(&vp->v_interlock);
		if (DOINGSOFTDEP(vp))
			softdep_change_linkcnt(ip);
		UFS_VFREE(vp, ip->i_number, mode);
	}

	if (ip->i_flag & (IN_CHANGE | IN_UPDATE | IN_MODIFIED)) {
		if (!logged++) {
			int err;
			err = UFS_WAPBL_BEGIN(vp->v_mount);
			if (err)
				goto out;
		}
		UFS_UPDATE(vp, NULL, NULL, 0);
	}
	if (logged)
		UFS_WAPBL_END(vp->v_mount);
out:
	/*
	 * If we are done with the inode, reclaim it
	 * so that it can be reused immediately.
	 */
	*ap->a_recycle = (ip->i_mode == 0);
	VOP_UNLOCK(vp, 0);
	fstrans_done(transmp);
	return (error);
}
예제 #7
0
파일: ufs_bmap.c 프로젝트: sofuture/bitrig
/*
 * Indirect blocks are now on the vnode for the file.  They are given negative
 * logical block numbers.  Indirect blocks are addressed by the negative
 * address of the first data block to which they point.  Double indirect blocks
 * are addressed by one less than the address of the first indirect block to
 * which they point.  Triple indirect blocks are addressed by one less than
 * the address of the first double indirect block to which they point.
 *
 * ufs_bmaparray does the bmap conversion, and if requested returns the
 * array of logical blocks which must be traversed to get to a block.
 * Each entry contains the offset into that block that gets you to the
 * next block and the disk address of the block (if it is assigned).
 */
int
ufs_bmaparray(struct vnode *vp, daddr64_t bn, daddr64_t *bnp, struct indir *ap,
    int *nump, int *runp)
{
	struct inode *ip;
	struct buf *bp;
	struct ufsmount *ump;
	struct mount *mp;
	struct vnode *devvp;
	struct indir a[NIADDR+1], *xap;
	daddr64_t daddr, metalbn;
	int error, maxrun = 0, num;

	ip = VTOI(vp);
	mp = vp->v_mount;
	ump = VFSTOUFS(mp);
#ifdef DIAGNOSTIC
	if ((ap != NULL && nump == NULL) || (ap == NULL && nump != NULL))
		panic("ufs_bmaparray: invalid arguments");
#endif

	if (runp) {
		/*
		 * XXX
		 * If MAXBSIZE is the largest transfer the disks can handle,
		 * we probably want maxrun to be 1 block less so that we
		 * don't create a block larger than the device can handle.
		 */
		*runp = 0;
		maxrun = MAXBSIZE / mp->mnt_stat.f_iosize - 1;
	}

	xap = ap == NULL ? a : ap;
	if (!nump)
		nump = &num;
	if ((error = ufs_getlbns(vp, bn, xap, nump)) != 0)
		return (error);

	num = *nump;
	if (num == 0) {
		*bnp = blkptrtodb(ump, DIP(ip, db[bn]));
		if (*bnp == 0)
			*bnp = -1;
		else if (runp)
			for (++bn; bn < NDADDR && *runp < maxrun &&
			    is_sequential(ump, DIP(ip, db[bn - 1]),
			        DIP(ip, db[bn]));
			    ++bn, ++*runp);
		return (0);
	}


	/* Get disk address out of indirect block array */
	daddr = DIP(ip, ib[xap->in_off]);

	devvp = VFSTOUFS(vp->v_mount)->um_devvp;
	for (bp = NULL, ++xap; --num; ++xap) {
		/* 
		 * Exit the loop if there is no disk address assigned yet and
		 * the indirect block isn't in the cache, or if we were
		 * looking for an indirect block and we've found it.
		 */

		metalbn = xap->in_lbn;
		if ((daddr == 0 && !incore(vp, metalbn)) || metalbn == bn)
			break;
		/*
		 * If we get here, we've either got the block in the cache
		 * or we have a disk address for it, go fetch it.
		 */
		if (bp)
			brelse(bp);

		xap->in_exists = 1;
		bp = getblk(vp, metalbn, mp->mnt_stat.f_iosize, 0, 0);
		if (bp->b_flags & (B_DONE | B_DELWRI)) {
			;
		}
#ifdef DIAGNOSTIC
		else if (!daddr)
			panic("ufs_bmaparray: indirect block not in cache");
#endif
		else {
			bp->b_blkno = blkptrtodb(ump, daddr);
			bp->b_flags |= B_READ;
			bcstats.pendingreads++;
			bcstats.numreads++;
			VOP_STRATEGY(bp);
			curproc->p_ru.ru_inblock++;		/* XXX */
			if ((error = biowait(bp)) != 0) {
				brelse(bp);
				return (error);
			}
		}

#ifdef FFS2
		if (ip->i_ump->um_fstype == UM_UFS2) {
			daddr = ((int64_t *)bp->b_data)[xap->in_off];
			if (num == 1 && daddr && runp)
				for (bn = xap->in_off + 1;
				    bn < MNINDIR(ump) && *runp < maxrun &&
				    is_sequential(ump,
					((int64_t *)bp->b_data)[bn - 1],
					((int64_t *)bp->b_data)[bn]);
				    ++bn, ++*runp);

                        continue;
		}

#endif /* FFS2 */

		daddr = ((int32_t *)bp->b_data)[xap->in_off];
		if (num == 1 && daddr && runp)
			for (bn = xap->in_off + 1;
			    bn < MNINDIR(ump) && *runp < maxrun &&
			    is_sequential(ump,
				((int32_t *)bp->b_data)[bn - 1],
				((int32_t *)bp->b_data)[bn]);
			    ++bn, ++*runp);
	}
	if (bp)
		brelse(bp);

	daddr = blkptrtodb(ump, daddr);
	*bnp = daddr == 0 ? -1 : daddr;
	return (0);
}
예제 #8
0
파일: ufs_bmap.c 프로젝트: sofuture/bitrig
/*
 * Create an array of logical block number/offset pairs which represent the
 * path of indirect blocks required to access a data block.  The first "pair"
 * contains the logical block number of the appropriate single, double or
 * triple indirect block and the offset into the inode indirect block array.
 * Note, the logical block number of the inode single/double/triple indirect
 * block appears twice in the array, once with the offset into the i_ffs_ib and
 * once with the offset into the page itself.
 */
int
ufs_getlbns(struct vnode *vp, daddr64_t bn, struct indir *ap, int *nump)
{
	daddr64_t metalbn, realbn;
	struct ufsmount *ump;
	int64_t blockcnt;
	int i, numlevels, off;

	ump = VFSTOUFS(vp->v_mount);
	if (nump)
		*nump = 0;
	numlevels = 0;
	realbn = bn;
	if (bn < 0)
		bn = -bn;

#ifdef DIAGNOSTIC
	if (realbn < 0 && realbn > -NDADDR) {
		panic ("ufs_getlbns: Invalid indirect block %lld specified",
		    realbn);
	}
#endif

	/* The first NDADDR blocks are direct blocks. */
	if (bn < NDADDR)
		return (0);

	/* 
	 * Determine the number of levels of indirection.  After this loop
	 * is done, blockcnt indicates the number of data blocks possible
	 * at the given level of indirection, and NIADDR - i is the number
	 * of levels of indirection needed to locate the requested block.
	 */
	for (blockcnt = 1, i = NIADDR, bn -= NDADDR;; i--, bn -= blockcnt) {
		if (i == 0)
			return (EFBIG);
		blockcnt *= MNINDIR(ump);
		if (bn < blockcnt)
			break;
	}

	/* Calculate the address of the first meta-block. */
	if (realbn >= 0)
		metalbn = -(realbn - bn + NIADDR - i);
	else
		metalbn = -(-realbn - bn + NIADDR - i);

	/* 
	 * At each iteration, off is the offset into the bap array which is
	 * an array of disk addresses at the current level of indirection.
	 * The logical block number and the offset in that block are stored
	 * into the argument array.
	 */
	ap->in_lbn = metalbn;
	ap->in_off = off = NIADDR - i;
	ap->in_exists = 0;
	ap++;
	for (++numlevels; i <= NIADDR; i++) {
		/* If searching for a meta-data block, quit when found. */
		if (metalbn == realbn)
			break;

		blockcnt /= MNINDIR(ump);
		off = (bn / blockcnt) % MNINDIR(ump);

		++numlevels;
		ap->in_lbn = metalbn;
		ap->in_off = off;
		ap->in_exists = 0;
		++ap;

		metalbn -= -1 + off * blockcnt;
	}
#ifdef DIAGNOSTIC
	if (realbn < 0 && metalbn != realbn) {
		panic("ufs_getlbns: indirect block %lld not found", realbn);
	}
#endif
	if (nump)
		*nump = numlevels;
	return (0);
}
예제 #9
0
/*
 * Last reference to an inode.  If necessary, write or delete it.
 */
int
ufs_inactive(void *v)
{
	struct vop_inactive_args *ap = v;
	struct vnode *vp = ap->a_vp;
	struct inode *ip = VTOI(vp);
	struct fs *fs = ip->i_fs;
	struct proc *p = curproc;
	mode_t mode;
	int error = 0, logged = 0, truncate_error = 0;
#ifdef DIAGNOSTIC
	extern int prtactive;

	if (prtactive && vp->v_usecount != 0)
		vprint("ufs_inactive: pushing active", vp);
#endif

	UFS_WAPBL_JUNLOCK_ASSERT(vp->v_mount);

	/*
	 * Ignore inodes related to stale file handles.
	 */
	if (ip->i_din1 == NULL || DIP(ip, mode) == 0)
		goto out;

	if (DIP(ip, nlink) <= 0 && (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
		error = UFS_WAPBL_BEGIN(vp->v_mount);
		if (error)
			goto out;
		logged = 1;
		if (getinoquota(ip) == 0)
			(void)ufs_quota_free_inode(ip, NOCRED);
		if (DIP(ip, size) != 0 && vp->v_mount->mnt_wapbl) {
			/*
			 * When journaling, only truncate one indirect block at
			 * a time.
			 */
			uint64_t incr = MNINDIR(ip->i_ump) << fs->fs_bshift;
			uint64_t base = NDADDR << fs->fs_bshift;
			while (!error && DIP(ip, size) > base + incr) {
				/*
				 * round down to next full indirect block
				 * boundary.
				 */
				uint64_t nsize = base +
				    ((DIP(ip, size) - base - 1) &
				    ~(incr - 1));
				error = UFS_TRUNCATE(ip, nsize, 0, NOCRED);
				if (error)
					break;
				UFS_WAPBL_END(vp->v_mount);
				error = UFS_WAPBL_BEGIN(vp->v_mount);
				if (error)
					goto out;
			}
		}

		if (error == 0) {
			truncate_error = UFS_TRUNCATE(ip, (off_t)0, 0, NOCRED);
			/* XXX pedro: remove me */
			if (truncate_error)
				printf("UFS_TRUNCATE()=%d\n", truncate_error);
		}

		DIP_ASSIGN(ip, rdev, 0);
		mode = DIP(ip, mode);
		DIP_ASSIGN(ip, mode, 0);
		ip->i_flag |= IN_CHANGE | IN_UPDATE;

		/*
		 * Setting the mode to zero needs to wait for the inode to be
		 * written just as does a change to the link count. So, rather
		 * than creating a new entry point to do the same thing, we
		 * just use softdep_change_linkcnt(). Also, we can't let
		 * softdep co-opt us to help on its worklist, as we may end up
		 * trying to recycle vnodes and getting to this same point a
		 * couple of times, blowing the kernel stack. However, this
		 * could be optimized by checking if we are coming from
		 * vrele(), vput() or vclean() (by checking for VXLOCK) and
		 * just avoiding the co-opt to happen in the last case.
		 */
		if (DOINGSOFTDEP(vp))
			softdep_change_linkcnt(ip, 1);

		UFS_INODE_FREE(ip, ip->i_number, mode);
	}

	if (ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) {
		if (!logged++) {
			int err;
			err = UFS_WAPBL_BEGIN(vp->v_mount);
			if (err) {
				error = err;
				goto out;
			}
		}
		UFS_UPDATE(ip, 0);
	}
	if (logged)
		UFS_WAPBL_END(vp->v_mount);
out:
	VOP_UNLOCK(vp, 0);

	/*
	 * If we are done with the inode, reclaim it
	 * so that it can be reused immediately.
	 */
	if (error == 0 && truncate_error == 0 &&
	    (ip->i_din1 == NULL || DIP(ip, mode) == 0))
		vrecycle(vp, p);

	return (truncate_error ? truncate_error : error);
}
예제 #10
0
/*
 * Indirect blocks are now on the vnode for the file.  They are given negative
 * logical block numbers.  Indirect blocks are addressed by the negative
 * address of the first data block to which they point.  Double indirect blocks
 * are addressed by one less than the address of the first indirect block to
 * which they point.  Triple indirect blocks are addressed by one less than
 * the address of the first double indirect block to which they point.
 *
 * ext2_bmaparray does the bmap conversion, and if requested returns the
 * array of logical blocks which must be traversed to get to a block.
 * Each entry contains the offset into that block that gets you to the
 * next block and the disk address of the block (if it is assigned).
 */
static
int
ext2_bmaparray(struct vnode *vp, ext2_daddr_t bn, ext2_daddr_t *bnp,
	      struct indir *ap, int *nump, int *runp, int *runb)
{
	struct inode *ip;
	struct buf *bp;
	struct ext2_mount *ump;
	struct mount *mp;
	struct ext2_sb_info *fs;
	struct indir a[NIADDR+1], *xap;
	ext2_daddr_t daddr;
	long metalbn;
	int error, maxrun, num;

	ip = VTOI(vp);
	mp = vp->v_mount;
	ump = VFSTOEXT2(mp);
	fs = ip->i_e2fs;
#ifdef DIAGNOSTIC
	if ((ap != NULL && nump == NULL) || (ap == NULL && nump != NULL))
		panic("ext2_bmaparray: invalid arguments");
#endif

	if (runp) {
		*runp = 0;
	}

	if (runb) {
		*runb = 0;
	}

	maxrun = mp->mnt_iosize_max / mp->mnt_stat.f_iosize - 1;

	xap = ap == NULL ? a : ap;
	if (!nump)
		nump = &num;
	error = ext2_getlbns(vp, bn, xap, nump);
	if (error)
		return (error);

	num = *nump;
	if (num == 0) {
		*bnp = blkptrtodb(ump, ip->i_db[bn]);
		if (*bnp == 0)
			*bnp = -1;
		else if (runp) {
			daddr_t bnb = bn;
			for (++bn; bn < NDADDR && *runp < maxrun &&
			    is_sequential(ump, ip->i_db[bn - 1], ip->i_db[bn]);
			    ++bn, ++*runp);
			bn = bnb;
			if (runb && (bn > 0)) {
				for (--bn; (bn >= 0) && (*runb < maxrun) &&
					is_sequential(ump, ip->i_db[bn],
						ip->i_db[bn+1]);
						--bn, ++*runb);
			}
		}
		return (0);
	}


	/* Get disk address out of indirect block array */
	daddr = ip->i_ib[xap->in_off];

	for (bp = NULL, ++xap; --num; ++xap) {
		/*
		 * Exit the loop if there is no disk address assigned yet and
		 * the indirect block isn't in the cache, or if we were
		 * looking for an indirect block and we've found it.
		 */

		metalbn = xap->in_lbn;
		if ((daddr == 0 &&
		     !findblk(vp, dbtodoff(fs, metalbn), FINDBLK_TEST)) ||
		    metalbn == bn) {
			break;
		}
		/*
		 * If we get here, we've either got the block in the cache
		 * or we have a disk address for it, go fetch it.
		 */
		if (bp)
			bqrelse(bp);

		xap->in_exists = 1;
		bp = getblk(vp, lblktodoff(fs, metalbn),
			    mp->mnt_stat.f_iosize, 0, 0);
		if ((bp->b_flags & B_CACHE) == 0) {
#ifdef DIAGNOSTIC
			if (!daddr)
				panic("ext2_bmaparray: indirect block not in cache");
#endif
			/*
			 * This runs through ext2_strategy using bio2 to
			 * cache the disk offset, then comes back through
			 * bio1.  So we want to wait on bio1
			 */
			bp->b_bio1.bio_done = biodone_sync;
			bp->b_bio1.bio_flags |= BIO_SYNC;
			bp->b_bio2.bio_offset = fsbtodoff(fs, daddr);
			bp->b_flags &= ~(B_INVAL|B_ERROR);
			bp->b_cmd = BUF_CMD_READ;
			vfs_busy_pages(bp->b_vp, bp);
			vn_strategy(bp->b_vp, &bp->b_bio1);
			error = biowait(&bp->b_bio1, "biord");
			if (error) {
				brelse(bp);
				return (error);
			}
		}

		daddr = ((ext2_daddr_t *)bp->b_data)[xap->in_off];
		if (num == 1 && daddr && runp) {
			for (bn = xap->in_off + 1;
			    bn < MNINDIR(ump) && *runp < maxrun &&
			    is_sequential(ump,
			    ((ext2_daddr_t *)bp->b_data)[bn - 1],
			    ((ext2_daddr_t *)bp->b_data)[bn]);
			    ++bn, ++*runp);
			bn = xap->in_off;
			if (runb && bn) {
				for(--bn; bn >= 0 && *runb < maxrun &&
					is_sequential(ump, ((daddr_t *)bp->b_data)[bn],
					    ((daddr_t *)bp->b_data)[bn+1]);
					--bn, ++*runb);
			}
		}
	}
	if (bp)
		bqrelse(bp);

	daddr = blkptrtodb(ump, daddr);
	*bnp = daddr == 0 ? -1 : daddr;
	return (0);
}
예제 #11
0
파일: bmap.c 프로젝트: ele7enxxh/dtrace-pf
/*
 * Create an array of logical block number/offset pairs which represent the
 * path of indirect blocks required to access a data block.  The first "pair"
 * contains the logical block number of the appropriate single, double or
 * triple indirect block and the offset into the inode indirect block array.
 * Note, the logical block number of the inode single/double/triple indirect
 * block appears twice in the array, once with the offset into the i_ib and
 * once with the offset into the page itself.
 */
static int
bmap_getlbns(struct nandfs_node *node, nandfs_lbn_t bn, struct nandfs_indir *ap, int *nump)
{
	nandfs_daddr_t blockcnt;
	nandfs_lbn_t metalbn, realbn;
	struct nandfs_device *fsdev;
	int i, numlevels, off;

	fsdev = node->nn_nandfsdev;

	DPRINTF(BMAP, ("%s: node %p bn=%jx mnindir=%zd enter\n", __func__,
	    node, bn, MNINDIR(fsdev)));

	if (nump)
		*nump = 0;
	numlevels = 0;
	realbn = bn;

	if (bn < 0)
		bn = -bn;

	/* The first NDADDR blocks are direct blocks. */
	if (bn < NDADDR)
		return (0);

	/*
	 * Determine the number of levels of indirection.  After this loop
	 * is done, blockcnt indicates the number of data blocks possible
	 * at the previous level of indirection, and NIADDR - i is the number
	 * of levels of indirection needed to locate the requested block.
	 */
	for (blockcnt = 1, i = NIADDR, bn -= NDADDR;; i--, bn -= blockcnt) {
		DPRINTF(BMAP, ("%s: blockcnt=%jd i=%d bn=%jd\n", __func__,
		    blockcnt, i, bn));
		if (i == 0)
			return (EFBIG);
		blockcnt *= MNINDIR(fsdev);
		if (bn < blockcnt)
			break;
	}

	/* Calculate the address of the first meta-block. */
	if (realbn >= 0)
		metalbn = -(realbn - bn + NIADDR - i);
	else
		metalbn = -(-realbn - bn + NIADDR - i);

	/*
	 * At each iteration, off is the offset into the bap array which is
	 * an array of disk addresses at the current level of indirection.
	 * The logical block number and the offset in that block are stored
	 * into the argument array.
	 */
	ap->in_lbn = metalbn;
	ap->in_off = off = NIADDR - i;

	DPRINTF(BMAP, ("%s: initial: ap->in_lbn=%jx ap->in_off=%d\n", __func__,
	    metalbn, off));

	ap++;
	for (++numlevels; i <= NIADDR; i++) {
		/* If searching for a meta-data block, quit when found. */
		if (metalbn == realbn)
			break;

		blockcnt /= MNINDIR(fsdev);
		off = (bn / blockcnt) % MNINDIR(fsdev);

		++numlevels;
		ap->in_lbn = metalbn;
		ap->in_off = off;

		DPRINTF(BMAP, ("%s: in_lbn=%jx in_off=%d\n", __func__,
		    ap->in_lbn, ap->in_off));
		++ap;

		metalbn -= -1 + off * blockcnt;
	}
	if (nump)
		*nump = numlevels;

	DPRINTF(BMAP, ("%s: numlevels=%d\n", __func__, numlevels));

	return (0);
}
예제 #12
0
파일: bmap.c 프로젝트: ele7enxxh/dtrace-pf
int
bmap_truncate_mapping(struct nandfs_node *node, nandfs_lbn_t lastblk,
    nandfs_lbn_t todo)
{
	struct nandfs_inode *ip;
	struct nandfs_indir a[NIADDR + 1], f[NIADDR], *ap;
	nandfs_daddr_t indir_lbn[NIADDR];
	nandfs_daddr_t *copy;
	int error, level;
	nandfs_lbn_t left, tosub;
	struct nandfs_device *fsdev;
	int cleaned, i;
	int num, *nump;

	DPRINTF(BMAP, ("%s: node %p lastblk %jx truncating by %jx\n", __func__,
	    node, lastblk, todo));

	ip = &node->nn_inode;
	fsdev = node->nn_nandfsdev;

	ap = a;
	nump = &num;

	error = bmap_getlbns(node, lastblk, ap, nump);
	if (error)
		return (error);

	indir_lbn[SINGLE] = -NDADDR;
	indir_lbn[DOUBLE] = indir_lbn[SINGLE] - MNINDIR(fsdev) - 1;
	indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - MNINDIR(fsdev)
	    * MNINDIR(fsdev) - 1;

	for (i = 0; i < NIADDR; i++) {
		f[i].in_off = MNINDIR(fsdev) - 1;
		f[i].in_lbn = 0xdeadbeef;
	}

	left = todo;

#ifdef DEBUG
	a[num].in_off = -1;
#endif

	ap++;
	num -= 2;

	if (num < 0)
		goto direct;

	copy = malloc(MNINDIR(fsdev) * sizeof(nandfs_daddr_t) * (num + 1),
	    M_NANDFSTEMP, M_WAITOK);

	for (level = num; level >= SINGLE && left > 0; level--) {
		cleaned = 0;

		if (ip->i_ib[level] == 0) {
			tosub = blocks_inside(fsdev, level, ap);
			if (tosub > left)
				left = 0;
			else
				left -= tosub;
		} else {
			if (ap == f)
				ap->in_lbn = indir_lbn[level];
			error = bmap_truncate_indirect(node, level, &left,
			    &cleaned, ap, f, copy);
			if (error) {
				nandfs_error("%s: error %d when truncate "
				    "at level %d\n", __func__, error, level);
				return (error);
			}
		}

		if (cleaned) {
			nandfs_vblock_end(fsdev, ip->i_ib[level]);
			ip->i_ib[level] = 0;
		}

		ap = f;
	}

	free(copy, M_NANDFSTEMP);

direct:
	if (num < 0)
		i = lastblk;
	else
		i = NDADDR - 1;

	for (; i >= 0 && left > 0; i--) {
		if (ip->i_db[i] != 0) {
			error = nandfs_bdestroy(node, ip->i_db[i]);
			if (error) {
				nandfs_error("%s: cannot destroy "
				    "block %jx, error %d\n", __func__,
				    (uintmax_t)ip->i_db[i], error);
				return (error);
			}
			ip->i_db[i] = 0;
		}

		left--;
	}

	KASSERT(left == 0,
	    ("truncated wrong number of blocks (%jd should be 0)", left));

	return (error);
}
예제 #13
0
파일: bmap.c 프로젝트: ele7enxxh/dtrace-pf
static int
bmap_truncate_indirect(struct nandfs_node *node, int level, nandfs_lbn_t *left,
    int *cleaned, struct nandfs_indir *ap, struct nandfs_indir *fp,
    nandfs_daddr_t *copy)
{
	struct buf *bp;
	nandfs_lbn_t i, lbn, nlbn, factor, tosub;
	struct nandfs_device *fsdev;
	int error, lcleaned, modified;

	DPRINTF(BMAP, ("%s: node %p level %d left %jx\n", __func__,
	    node, level, *left));

	fsdev = node->nn_nandfsdev;

	MPASS(ap->in_off >= 0 && ap->in_off < MNINDIR(fsdev));

	factor = lbn_offset(fsdev, level);
	lbn = ap->in_lbn;

	error = nandfs_bread_meta(node, lbn, NOCRED, 0, &bp);
	if (error) {
		brelse(bp);
		return (error);
	}

	bcopy(bp->b_data, copy, fsdev->nd_blocksize);
	bqrelse(bp);

	modified = 0;

	i = ap->in_off;

	if (ap != fp)
		ap++;
	for (nlbn = lbn + 1 - i * factor; i >= 0 && *left > 0; i--,
	    nlbn += factor) {
		lcleaned = 0;

		DPRINTF(BMAP,
		    ("%s: node %p i=%jx nlbn=%jx left=%jx ap=%p vblk %jx\n",
		    __func__, node, i, nlbn, *left, ap, copy[i]));

		if (copy[i] == 0) {
			tosub = blocks_inside(fsdev, level - 1, ap);
			if (tosub > *left)
				tosub = 0;

			*left -= tosub;
		} else {
			if (level > SINGLE) {
				if (ap == fp)
					ap->in_lbn = nlbn;

				error = bmap_truncate_indirect(node, level - 1,
				    left, &lcleaned, ap, fp,
				    copy + MNINDIR(fsdev));
				if (error)
					return (error);
			} else {
				error = nandfs_bdestroy(node, copy[i]);
				if (error)
					return (error);
				lcleaned = 1;
				*left -= 1;
			}
		}

		if (lcleaned) {
			if (level > SINGLE) {
				error = nandfs_vblock_end(fsdev, copy[i]);
				if (error)
					return (error);
			}
			copy[i] = 0;
			modified++;
		}

		ap = fp;
	}

	if (i == -1)
		*cleaned = 1;

	error = nandfs_bread_meta(node, lbn, NOCRED, 0, &bp);
	if (error) {
		brelse(bp);
		return (error);
	}
	if (modified)
		bcopy(copy, bp->b_data, fsdev->nd_blocksize);

	error = nandfs_dirty_buf_meta(bp, 0);
	if (error)
		return (error);

	return (error);
}
예제 #14
0
int
ext2fs_bmaparray(struct vnode *vp,
#undef struct
                 daddr_t bn, daddr_t *bnp, struct indir *ap,
		int *nump, int *runp)
{
	struct inode *ip;
	struct buf *bp, *cbp;
#define struct
//	struct ufsmount *ump;
	struct mount *mp;
#undef struct
	struct indir a[NIADDR+1], *xap;
	daddr_t daddr;
	daddr_t metalbn;
	int error, maxrun = 0, num;

	ip = VTOI(vp);
	mp = EXT2_SIMPLE_FILE_SYSTEM_PRIVATE_DATA_FROM_THIS(vp->Filesystem);
//	mp = vp->v_mount; !!!! need to fix this badly!
//	ump = ip->i_ump; NEED TO DO SOMETHING ABOUT ufsmount
#ifdef DIAGNOSTIC
	if ((ap != NULL && nump == NULL) || (ap == NULL && nump != NULL))
		panic("ext2fs_bmaparray: invalid arguments");
#endif

	if (runp) {
		/*
		 * XXX
		 * If MAXBSIZE is the largest transfer the disks can handle,
		 * we probably want maxrun to be 1 block less so that we
		 * don't create a block larger than the device can handle.
		 */
		*runp = 0;
		maxrun = MAXBSIZE / //mp->mnt_stat.f_iosize - 1; NEEDS FIX!!!
				      mp->fs->e2fs_bsize - 1;
	}

	if (bn >= 0 && bn < NDADDR) {
		/* XXX ondisk32 */
		*bnp = blkptrtodb(ump, fs2h32(ip->i_e2fs_blocks[bn]));
		if (*bnp == 0)
			*bnp = -1;
		else if (runp)
			/* XXX ondisk32 */
			for (++bn; bn < NDADDR && *runp < maxrun &&
				is_sequential(ump, (daddr_t)fs2h32(ip->i_e2fs_blocks[bn - 1]),
							  (daddr_t)fs2h32(ip->i_e2fs_blocks[bn]));
				++bn, ++*runp);
		return (0);
	}

	xap = ap == NULL ? a : ap;
	if (!nump)
		nump = &num;
	if ((error = ufs_getlbns(vp, bn, xap, nump)) != 0)
		return (error);

	num = *nump;

	/* Get disk address out of indirect block array */
	/* XXX ondisk32 */
	daddr = fs2h32(ip->i_e2fs_blocks[NDADDR + xap->in_off]);

#ifdef DIAGNOSTIC
    if (num > NIADDR + 1 || num < 1) {
		printf("ext2fs_bmaparray: num=%d\n", num);
		panic("ext2fs_bmaparray: num");
	}
#endif
	for (bp = NULL, ++xap; --num; ++xap) {
		/*
		 * Exit the loop if there is no disk address assigned yet and
		 * the indirect block isn't in the cache, or if we were
		 * looking for an indirect block and we've found it.
		 */

		metalbn = xap->in_lbn;
		if (metalbn == bn)
			break;
		if (daddr == 0) {
			mutex_enter(&bufcache_lock);
			cbp = incore(vp, metalbn);
			mutex_exit(&bufcache_lock);
			if (cbp == NULL)
				break;
		}
		/*
		 * If we get here, we've either got the block in the cache
		 * or we have a disk address for it, go fetch it.
		 */
		if (bp)
			brelse(bp, 0);

		xap->in_exists = 1;
		//!!!!!!!!!!!!!!replaced 3rd param with 1 ftw
		bp = getblk(vp, metalbn, 1, 0, 0);
		if (bp == NULL) {

			/*
			 * getblk() above returns NULL only iff we are
			 * pagedaemon.  See the implementation of getblk
			 * for detail.
			 */

			 return (ENOMEM);
		}
		if (bp->b_oflags & (BO_DONE | BO_DELWRI)) {
			trace(TR_BREADHIT, pack(vp, size), metalbn);
		}
#ifdef DIAGNOSTIC
		else if (!daddr)
			panic("ext2fs_bmaparry: indirect block not in cache");
#endif
		else {
			trace(TR_BREADMISS, pack(vp, size), metalbn);
			bp->b_blkno = blkptrtodb(ump, daddr);
			bp->b_flags |= B_READ;
			VOP_STRATEGY(vp, bp);
//			curlwp->l_ru.ru_inblock++;	*//* XXX */
			if ((error = biowait(bp)) != 0) {
				brelse(bp, 0);
				return (error);
			}
		}

		/* XXX ondisk32 */
		daddr = fs2h32(((int32_t *)bp->b_data)[xap->in_off]);
		if (num == 1 && daddr && runp)
			/* XXX ondisk32 */
			for (bn = xap->in_off + 1;
				bn < MNINDIR(ump) && *runp < maxrun &&
				is_sequential(ump, ((int32_t *)bp->b_data)[bn - 1],
				((int32_t *)bp->b_data)[bn]);
				++bn, ++*runp);
	}
	if (bp)
		brelse(bp, 0);

	daddr = blkptrtodb(ump, daddr);
	*bnp = daddr == 0 ? -1 : daddr;
	return (0);
}