示例#1
0
/*
 * Bmap converts the logical block number of a file to its physical block
 * number on the disk. The conversion is done by using the logical block
 * number to index into the array of block pointers described by the dinode.
 *
 * BMAP must return the contiguous before and after run in bytes, inclusive
 * of the returned block.
 *
 * ext2_bmap(struct vnode *a_vp, off_t a_loffset,
 *	    off_t *a_doffsetp, int *a_runp, int *a_runb)
 */
int
ext2_bmap(struct vop_bmap_args *ap)
{
	struct ext2_sb_info *fs;
	ext2_daddr_t lbn;
	ext2_daddr_t dbn;
	int error;

	/*
	 * Check for underlying vnode requests and ensure that logical
	 * to physical mapping is requested.
	 */
	if (ap->a_doffsetp == NULL)
		return (0);

	fs = VTOI(ap->a_vp)->i_e2fs;
	KKASSERT(((int)ap->a_loffset & ((1 << fs->s_bshift) - 1)) == 0);
	lbn = ap->a_loffset >> fs->s_bshift;

	error = ext2_bmaparray(ap->a_vp, lbn, &dbn, NULL, NULL,
			      ap->a_runp, ap->a_runb);

	if (error || dbn == (ext2_daddr_t)-1) {
		*ap->a_doffsetp = NOOFFSET;
	} else {
		*ap->a_doffsetp = dbtodoff(fs, dbn);
		if (ap->a_runp)
			*ap->a_runp = (*ap->a_runp + 1) << fs->s_bshift;
		if (ap->a_runb)
			*ap->a_runb = *ap->a_runb << fs->s_bshift;
	}
	return (error);
}
示例#2
0
/*
 * hpfs_read(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
 *	     struct ucred *a_cred)
 */
static int
hpfs_read(struct vop_read_args *ap)
{
	struct vnode *vp = ap->a_vp;
	struct hpfsnode *hp = VTOHP(vp);
	struct uio *uio = ap->a_uio;
	struct buf *bp;
	u_int xfersz, toread;
	u_int off;
	daddr_t lbn, bn;
	int resid;
	int runl;
	int error = 0;

	resid = (int)szmin(uio->uio_resid, hp->h_fn.fn_size - uio->uio_offset);

	dprintf(("hpfs_read(0x%x, off: %d resid: %d, segflg: %d): "
		 "[resid: 0x%lx]\n",
		 hp->h_no, (u_int32_t)uio->uio_offset,
		 uio->uio_resid, uio->uio_segflg, resid));

	while (resid) {
		lbn = uio->uio_offset >> DEV_BSHIFT;
		off = uio->uio_offset & (DEV_BSIZE - 1);
		dprintf(("hpfs_read: resid: 0x%lx lbn: 0x%x off: 0x%x\n",
			uio->uio_resid, lbn, off));
		error = hpfs_hpbmap(hp, lbn, &bn, &runl);
		if (error)
			return (error);

		toread = min(off + resid, min(DFLTPHYS, (runl+1)*DEV_BSIZE));
		xfersz = (toread + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
		dprintf(("hpfs_read: bn: 0x%x (0x%x) toread: 0x%x (0x%x)\n",
			bn, runl, toread, xfersz));

		if (toread == 0) 
			break;

		error = bread(hp->h_devvp, dbtodoff(bn), xfersz, &bp);
		if (error) {
			brelse(bp);
			break;
		}

		error = uiomove(bp->b_data + off, (size_t)(toread - off), uio);
		if(error) {
			brelse(bp);
			break;
		}
		brelse(bp);
		resid -= toread;
	}
	dprintf(("hpfs_read: successful\n"));
	return (error);
}
示例#3
0
/*
 * Release blocks associated with the inode ip and stored in the indirect
 * block bn.  Blocks are free'd in LIFO order up to (but not including)
 * lastbn.  If level is greater than SINGLE, the block is an indirect block
 * and recursive calls to indirtrunc must be used to cleanse other indirect
 * blocks.
 *
 * NB: triple indirect blocks are untested.
 */
static int
ffs_indirtrunc(struct inode *ip, ufs_daddr_t lbn, ufs_daddr_t dbn,
	       ufs_daddr_t lastbn, int level, long *countp)
{
	int i;
	struct buf *bp;
	struct fs *fs = ip->i_fs;
	ufs_daddr_t *bap;
	struct vnode *vp;
	ufs_daddr_t *copy = NULL, nb, nlbn, last;
	long blkcount, factor;
	int nblocks, blocksreleased = 0;
	int error = 0, allerror = 0;

	/*
	 * Calculate index in current block of last
	 * block to be kept.  -1 indicates the entire
	 * block so we need not calculate the index.
	 */
	factor = 1;
	for (i = SINGLE; i < level; i++)
		factor *= NINDIR(fs);
	last = lastbn;
	if (lastbn > 0)
		last /= factor;
	nblocks = btodb(fs->fs_bsize);
	/*
	 * Get buffer of block pointers, zero those entries corresponding
	 * to blocks to be free'd, and update on disk copy first.  Since
	 * double(triple) indirect before single(double) indirect, calls
	 * to bmap on these blocks will fail.  However, we already have
	 * the on disk address, so we have to set the bio_offset field
	 * explicitly instead of letting bread do everything for us.
	 */
	vp = ITOV(ip);
	bp = getblk(vp, lblktodoff(fs, lbn), (int)fs->fs_bsize, 0, 0);
	if ((bp->b_flags & B_CACHE) == 0) {
		bp->b_flags &= ~(B_ERROR|B_INVAL);
		bp->b_cmd = BUF_CMD_READ;
		if (bp->b_bcount > bp->b_bufsize)
			panic("ffs_indirtrunc: bad buffer size");
		/*
		 * BIO is bio2 which chains back to bio1.  We wait
		 * on bio1.
		 */
		bp->b_bio2.bio_offset = dbtodoff(fs, dbn);
		bp->b_bio1.bio_done = biodone_sync;
		bp->b_bio1.bio_flags |= BIO_SYNC;
		vfs_busy_pages(vp, bp);
		/*
		 * Access the block device layer using the device vnode
		 * and the translated block number (bio2) instead of the
		 * file vnode (vp) and logical block number (bio1).
		 *
		 * Even though we are bypassing the vnode layer, we still
		 * want the vnode state to indicate that an I/O on its behalf
		 * is in progress.
		 */
		bio_start_transaction(&bp->b_bio1, &vp->v_track_read);
		vn_strategy(ip->i_devvp, &bp->b_bio2);
		error = biowait(&bp->b_bio1, "biord");
	}
	if (error) {
		brelse(bp);
		*countp = 0;
		return (error);
	}

	bap = (ufs_daddr_t *)bp->b_data;
	if (lastbn != -1) {
		copy = kmalloc(fs->fs_bsize, M_TEMP, M_WAITOK);
		bcopy((caddr_t)bap, (caddr_t)copy, (uint)fs->fs_bsize);
		bzero((caddr_t)&bap[last + 1],
		    (uint)(NINDIR(fs) - (last + 1)) * sizeof (ufs_daddr_t));
		if (DOINGASYNC(vp)) {
			bawrite(bp);
		} else {
			error = bwrite(bp);
			if (error)
				allerror = error;
		}
		bap = copy;
	}

	/*
	 * Recursively free totally unused blocks.
	 */
	for (i = NINDIR(fs) - 1, nlbn = lbn + 1 - i * factor; i > last;
	    i--, nlbn += factor) {
		nb = bap[i];
		if (nb == 0)
			continue;
		if (level > SINGLE) {
			if ((error = ffs_indirtrunc(ip, nlbn, fsbtodb(fs, nb),
			    (ufs_daddr_t)-1, level - 1, &blkcount)) != 0)
				allerror = error;
			blocksreleased += blkcount;
		}
		ffs_blkfree(ip, nb, fs->fs_bsize);
		blocksreleased += nblocks;
	}

	/*
	 * Recursively free last partial block.
	 */
	if (level > SINGLE && lastbn >= 0) {
		last = lastbn % factor;
		nb = bap[i];
		if (nb != 0) {
			error = ffs_indirtrunc(ip, nlbn, fsbtodb(fs, nb),
			    last, level - 1, &blkcount);
			if (error)
				allerror = error;
			blocksreleased += blkcount;
		}
	}
	if (copy != NULL) {
		kfree(copy, M_TEMP);
	} else {
		bp->b_flags |= B_INVAL | B_NOCACHE;
		brelse(bp);
	}
		
	*countp = blocksreleased;
	return (allerror);
}
示例#4
0
/*
 * Indirect blocks are now on the vnode for the file.  They are given negative
 * logical block numbers.  Indirect blocks are addressed by the negative
 * address of the first data block to which they point.  Double indirect blocks
 * are addressed by one less than the address of the first indirect block to
 * which they point.  Triple indirect blocks are addressed by one less than
 * the address of the first double indirect block to which they point.
 *
 * ext2_bmaparray does the bmap conversion, and if requested returns the
 * array of logical blocks which must be traversed to get to a block.
 * Each entry contains the offset into that block that gets you to the
 * next block and the disk address of the block (if it is assigned).
 */
static
int
ext2_bmaparray(struct vnode *vp, ext2_daddr_t bn, ext2_daddr_t *bnp,
	      struct indir *ap, int *nump, int *runp, int *runb)
{
	struct inode *ip;
	struct buf *bp;
	struct ext2_mount *ump;
	struct mount *mp;
	struct ext2_sb_info *fs;
	struct indir a[NIADDR+1], *xap;
	ext2_daddr_t daddr;
	long metalbn;
	int error, maxrun, num;

	ip = VTOI(vp);
	mp = vp->v_mount;
	ump = VFSTOEXT2(mp);
	fs = ip->i_e2fs;
#ifdef DIAGNOSTIC
	if ((ap != NULL && nump == NULL) || (ap == NULL && nump != NULL))
		panic("ext2_bmaparray: invalid arguments");
#endif

	if (runp) {
		*runp = 0;
	}

	if (runb) {
		*runb = 0;
	}

	maxrun = mp->mnt_iosize_max / mp->mnt_stat.f_iosize - 1;

	xap = ap == NULL ? a : ap;
	if (!nump)
		nump = &num;
	error = ext2_getlbns(vp, bn, xap, nump);
	if (error)
		return (error);

	num = *nump;
	if (num == 0) {
		*bnp = blkptrtodb(ump, ip->i_db[bn]);
		if (*bnp == 0)
			*bnp = -1;
		else if (runp) {
			daddr_t bnb = bn;
			for (++bn; bn < NDADDR && *runp < maxrun &&
			    is_sequential(ump, ip->i_db[bn - 1], ip->i_db[bn]);
			    ++bn, ++*runp);
			bn = bnb;
			if (runb && (bn > 0)) {
				for (--bn; (bn >= 0) && (*runb < maxrun) &&
					is_sequential(ump, ip->i_db[bn],
						ip->i_db[bn+1]);
						--bn, ++*runb);
			}
		}
		return (0);
	}


	/* Get disk address out of indirect block array */
	daddr = ip->i_ib[xap->in_off];

	for (bp = NULL, ++xap; --num; ++xap) {
		/*
		 * Exit the loop if there is no disk address assigned yet and
		 * the indirect block isn't in the cache, or if we were
		 * looking for an indirect block and we've found it.
		 */

		metalbn = xap->in_lbn;
		if ((daddr == 0 &&
		     !findblk(vp, dbtodoff(fs, metalbn), FINDBLK_TEST)) ||
		    metalbn == bn) {
			break;
		}
		/*
		 * If we get here, we've either got the block in the cache
		 * or we have a disk address for it, go fetch it.
		 */
		if (bp)
			bqrelse(bp);

		xap->in_exists = 1;
		bp = getblk(vp, lblktodoff(fs, metalbn),
			    mp->mnt_stat.f_iosize, 0, 0);
		if ((bp->b_flags & B_CACHE) == 0) {
#ifdef DIAGNOSTIC
			if (!daddr)
				panic("ext2_bmaparray: indirect block not in cache");
#endif
			/*
			 * This runs through ext2_strategy using bio2 to
			 * cache the disk offset, then comes back through
			 * bio1.  So we want to wait on bio1
			 */
			bp->b_bio1.bio_done = biodone_sync;
			bp->b_bio1.bio_flags |= BIO_SYNC;
			bp->b_bio2.bio_offset = fsbtodoff(fs, daddr);
			bp->b_flags &= ~(B_INVAL|B_ERROR);
			bp->b_cmd = BUF_CMD_READ;
			vfs_busy_pages(bp->b_vp, bp);
			vn_strategy(bp->b_vp, &bp->b_bio1);
			error = biowait(&bp->b_bio1, "biord");
			if (error) {
				brelse(bp);
				return (error);
			}
		}

		daddr = ((ext2_daddr_t *)bp->b_data)[xap->in_off];
		if (num == 1 && daddr && runp) {
			for (bn = xap->in_off + 1;
			    bn < MNINDIR(ump) && *runp < maxrun &&
			    is_sequential(ump,
			    ((ext2_daddr_t *)bp->b_data)[bn - 1],
			    ((ext2_daddr_t *)bp->b_data)[bn]);
			    ++bn, ++*runp);
			bn = xap->in_off;
			if (runb && bn) {
				for(--bn; bn >= 0 && *runb < maxrun &&
					is_sequential(ump, ((daddr_t *)bp->b_data)[bn],
					    ((daddr_t *)bp->b_data)[bn+1]);
					--bn, ++*runb);
			}
		}
	}
	if (bp)
		bqrelse(bp);

	daddr = blkptrtodb(ump, daddr);
	*bnp = daddr == 0 ? -1 : daddr;
	return (0);
}
示例#5
0
/*
 * hpfs_readdir(struct vnode *a_vp, struct uio *a_uio, struct ucred *a_cred,
 *		int *a_ncookies, off_t **cookies)
 */
int
hpfs_readdir(struct vop_readdir_args *ap)
{
	struct vnode *vp = ap->a_vp;
	struct hpfsnode *hp = VTOHP(vp);
	struct hpfsmount *hpmp = hp->h_hpmp;
	struct uio *uio = ap->a_uio;
	int ncookies = 0, i, num, cnum;
	int error = 0;
	struct buf *bp;
	struct dirblk *dp;
	struct hpfsdirent *dep;
	lsn_t olsn;
	lsn_t lsn;
	int level;

	dprintf(("hpfs_readdir(0x%x, 0x%x, 0x%lx): ",
		hp->h_no, (u_int32_t)uio->uio_offset, uio->uio_resid));

	/*
	 * As we need to fake up . and .., and the remaining directory structure
	 * can't be expressed in one off_t as well, we just increment uio_offset
	 * by 1 for each entry.
	 *
	 * num is the entry we need to start reporting
	 * cnum is the current entry
	 */
	if (uio->uio_offset < 0 || uio->uio_offset > INT_MAX)
		return(EINVAL);
	if ((error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY)) != 0)
		return (error);

	num = uio->uio_offset;
	cnum = 0;

	if( num <= cnum ) {
		dprintf((". faked, "));
		if (vop_write_dirent(&error, uio, hp->h_no, DT_DIR, 1, "."))
			goto done;
		if (error)
			goto done;
		ncookies ++;
	}
	cnum++;

	if( num <= cnum ) {
		dprintf((".. faked, "));
		if (vop_write_dirent(&error, uio, hp->h_fn.fn_parent, DT_DIR, 2, ".."))
			goto readdone;
		if (error)
			goto done;
		ncookies ++;
	}
	cnum++;

	lsn = ((alleaf_t *)hp->h_fn.fn_abd)->al_lsn;

	olsn = 0;
	level = 1;

dive:
	dprintf(("[dive 0x%x] ", lsn));
	error = bread(hp->h_devvp, dbtodoff(lsn), D_BSIZE, &bp);
	if (error) {
		brelse(bp);
		goto done;
	}

	dp = (struct dirblk *) bp->b_data;
	if (dp->d_magic != D_MAGIC) {
		kprintf("hpfs_readdir: MAGIC DOESN'T MATCH\n");
		brelse(bp);
		error = EINVAL;
		goto done;
	}

	dep = D_DIRENT(dp);

	if (olsn) {
		dprintf(("[restore 0x%x] ", olsn));

		while(!(dep->de_flag & DE_END) ) {
			if((dep->de_flag & DE_DOWN) &&
			   (olsn == DE_DOWNLSN(dep)))
					 break;
			dep = (hpfsdirent_t *)((caddr_t)dep + dep->de_reclen);
		}

		if((dep->de_flag & DE_DOWN) && (olsn == DE_DOWNLSN(dep))) {
			if (dep->de_flag & DE_END)
				goto blockdone;

			if (!(dep->de_flag & DE_SPECIAL)) {
				if (num <= cnum) {
					if (hpfs_de_uiomove(&error, hpmp, dep, uio)) {
						brelse(bp);
						dprintf(("[resid] "));
						goto readdone;
					}
					if (error) {
						brelse (bp);
						goto done;
					}
					ncookies++;
				}
				cnum++;
			}

			dep = (hpfsdirent_t *)((caddr_t)dep + dep->de_reclen);
		} else {
			kprintf("hpfs_readdir: ERROR! oLSN not found\n");
			brelse(bp);
			error = EINVAL;
			goto done;
		}
	}

	olsn = 0;

	while(!(dep->de_flag & DE_END)) {
		if(dep->de_flag & DE_DOWN) {
			lsn = DE_DOWNLSN(dep);
			brelse(bp);
			level++;
			goto dive;
		}

		if (!(dep->de_flag & DE_SPECIAL)) {
			if (num <= cnum) {
				if (hpfs_de_uiomove(&error, hpmp, dep, uio)) {
					brelse(bp);
					dprintf(("[resid] "));
					goto readdone;
				}
				if (error) {
					brelse (bp);
					goto done;
				}
				ncookies++;
			}
			cnum++;
		}

		dep = (hpfsdirent_t *)((caddr_t)dep + dep->de_reclen);
	}

	if(dep->de_flag & DE_DOWN) {
		dprintf(("[enddive] "));
		lsn = DE_DOWNLSN(dep);
		brelse(bp);
		level++;
		goto dive;
	}

blockdone:
	dprintf(("[EOB] "));
	olsn = lsn;
	lsn = dp->d_parent;
	brelse(bp);
	level--;

	dprintf(("[level %d] ", level));

	if (level > 0)
		goto dive;	/* undive really */

	if (ap->a_eofflag) {
	    dprintf(("[EOF] "));
	    *ap->a_eofflag = 1;
	}

readdone:
	uio->uio_offset = cnum;
	dprintf(("[readdone]\n"));
	if (!error && ap->a_ncookies != NULL) {
		off_t *cookies;
		off_t *cookiep;

		dprintf(("%d cookies, ",ncookies));
		if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1)
			panic("hpfs_readdir: unexpected uio from NFS server");
		cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
		for (cookiep = cookies, i=0; i < ncookies; i++)
			*cookiep++ = ++num;

		*ap->a_ncookies = ncookies;
		*ap->a_cookies = cookies;
	}

done:
	vn_unlock(ap->a_vp);
	return (error);
}
示例#6
0
/*
 * hpfs_write(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
 *	      struct ucred *a_cred)
 */
static int
hpfs_write(struct vop_write_args *ap)
{
	struct vnode *vp = ap->a_vp;
	struct hpfsnode *hp = VTOHP(vp);
	struct uio *uio = ap->a_uio;
	struct buf *bp;
	u_int xfersz, towrite;
	u_int off;
	daddr_t lbn, bn;
	int runl;
	int error = 0;

	dprintf(("hpfs_write(0x%x, off: %d resid: %ld, segflg: %d):\n",
		hp->h_no, (u_int32_t)uio->uio_offset,
		uio->uio_resid, uio->uio_segflg));

	if (ap->a_ioflag & IO_APPEND) {
		dprintf(("hpfs_write: APPEND mode\n"));
		uio->uio_offset = hp->h_fn.fn_size;
	}
	if (uio->uio_offset + uio->uio_resid > hp->h_fn.fn_size) {
		error = hpfs_extend (hp, uio->uio_offset + uio->uio_resid);
		if (error) {
			kprintf("hpfs_write: hpfs_extend FAILED %d\n", error);
			return (error);
		}
	}

	while (uio->uio_resid) {
		lbn = uio->uio_offset >> DEV_BSHIFT;
		off = uio->uio_offset & (DEV_BSIZE - 1);
		dprintf(("hpfs_write: resid: 0x%lx lbn: 0x%x off: 0x%x\n",
			uio->uio_resid, lbn, off));
		error = hpfs_hpbmap(hp, lbn, &bn, &runl);
		if (error)
			return (error);

		towrite = szmin(off + uio->uio_resid,
				min(DFLTPHYS, (runl+1)*DEV_BSIZE));
		xfersz = (towrite + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
		dprintf(("hpfs_write: bn: 0x%x (0x%x) towrite: 0x%x (0x%x)\n",
			bn, runl, towrite, xfersz));

		/*
		 * We do not have to issue a read-before-write if the xfer
		 * size does not cover the whole block.
		 *
		 * In the UIO_NOCOPY case, however, we are not overwriting
		 * anything and must do a read-before-write to fill in
		 * any missing pieces.
		 */
		if (off == 0 && towrite == xfersz &&
		    uio->uio_segflg != UIO_NOCOPY) {
			bp = getblk(hp->h_devvp, dbtodoff(bn), xfersz, 0, 0);
			clrbuf(bp);
		} else {
			error = bread(hp->h_devvp, dbtodoff(bn), xfersz, &bp);
			if (error) {
				brelse(bp);
				return (error);
			}
		}

		error = uiomove(bp->b_data + off, (size_t)(towrite - off), uio);
		if(error) {
			brelse(bp);
			return (error);
		}

		if (ap->a_ioflag & IO_SYNC)
			bwrite(bp);
		else
			bawrite(bp);
	}

	dprintf(("hpfs_write: successful\n"));
	return (0);
}