Esempio n. 1
0
main()
{
	int pos,c;
	pos = 0;
	while ((c == getchar()) != EOF ) {
		line[pos] = c;
		if (c == '\n') {
			printl(pos);
			pos = 0;
		} else if (++pos >= MAXCOLM) {
			pos = findblk(pos);
			printl(pos);
			pos = newpos(pos);
		} else if (c == '\t') {
	               //pos = exptab(pos);
		}
	}
}
Esempio n. 2
0
/*
 * Indirect blocks are now on the vnode for the file.  They are given negative
 * logical block numbers.  Indirect blocks are addressed by the negative
 * address of the first data block to which they point.  Double indirect blocks
 * are addressed by one less than the address of the first indirect block to
 * which they point.  Triple indirect blocks are addressed by one less than
 * the address of the first double indirect block to which they point.
 *
 * ext2_bmaparray does the bmap conversion, and if requested returns the
 * array of logical blocks which must be traversed to get to a block.
 * Each entry contains the offset into that block that gets you to the
 * next block and the disk address of the block (if it is assigned).
 */
static
int
ext2_bmaparray(struct vnode *vp, ext2_daddr_t bn, ext2_daddr_t *bnp,
	      struct indir *ap, int *nump, int *runp, int *runb)
{
	struct inode *ip;
	struct buf *bp;
	struct ext2_mount *ump;
	struct mount *mp;
	struct ext2_sb_info *fs;
	struct indir a[NIADDR+1], *xap;
	ext2_daddr_t daddr;
	long metalbn;
	int error, maxrun, num;

	ip = VTOI(vp);
	mp = vp->v_mount;
	ump = VFSTOEXT2(mp);
	fs = ip->i_e2fs;
#ifdef DIAGNOSTIC
	if ((ap != NULL && nump == NULL) || (ap == NULL && nump != NULL))
		panic("ext2_bmaparray: invalid arguments");
#endif

	if (runp) {
		*runp = 0;
	}

	if (runb) {
		*runb = 0;
	}

	maxrun = mp->mnt_iosize_max / mp->mnt_stat.f_iosize - 1;

	xap = ap == NULL ? a : ap;
	if (!nump)
		nump = #
	error = ext2_getlbns(vp, bn, xap, nump);
	if (error)
		return (error);

	num = *nump;
	if (num == 0) {
		*bnp = blkptrtodb(ump, ip->i_db[bn]);
		if (*bnp == 0)
			*bnp = -1;
		else if (runp) {
			daddr_t bnb = bn;
			for (++bn; bn < NDADDR && *runp < maxrun &&
			    is_sequential(ump, ip->i_db[bn - 1], ip->i_db[bn]);
			    ++bn, ++*runp);
			bn = bnb;
			if (runb && (bn > 0)) {
				for (--bn; (bn >= 0) && (*runb < maxrun) &&
					is_sequential(ump, ip->i_db[bn],
						ip->i_db[bn+1]);
						--bn, ++*runb);
			}
		}
		return (0);
	}


	/* Get disk address out of indirect block array */
	daddr = ip->i_ib[xap->in_off];

	for (bp = NULL, ++xap; --num; ++xap) {
		/*
		 * Exit the loop if there is no disk address assigned yet and
		 * the indirect block isn't in the cache, or if we were
		 * looking for an indirect block and we've found it.
		 */

		metalbn = xap->in_lbn;
		if ((daddr == 0 &&
		     !findblk(vp, dbtodoff(fs, metalbn), FINDBLK_TEST)) ||
		    metalbn == bn) {
			break;
		}
		/*
		 * If we get here, we've either got the block in the cache
		 * or we have a disk address for it, go fetch it.
		 */
		if (bp)
			bqrelse(bp);

		xap->in_exists = 1;
		bp = getblk(vp, lblktodoff(fs, metalbn),
			    mp->mnt_stat.f_iosize, 0, 0);
		if ((bp->b_flags & B_CACHE) == 0) {
#ifdef DIAGNOSTIC
			if (!daddr)
				panic("ext2_bmaparray: indirect block not in cache");
#endif
			/*
			 * This runs through ext2_strategy using bio2 to
			 * cache the disk offset, then comes back through
			 * bio1.  So we want to wait on bio1
			 */
			bp->b_bio1.bio_done = biodone_sync;
			bp->b_bio1.bio_flags |= BIO_SYNC;
			bp->b_bio2.bio_offset = fsbtodoff(fs, daddr);
			bp->b_flags &= ~(B_INVAL|B_ERROR);
			bp->b_cmd = BUF_CMD_READ;
			vfs_busy_pages(bp->b_vp, bp);
			vn_strategy(bp->b_vp, &bp->b_bio1);
			error = biowait(&bp->b_bio1, "biord");
			if (error) {
				brelse(bp);
				return (error);
			}
		}

		daddr = ((ext2_daddr_t *)bp->b_data)[xap->in_off];
		if (num == 1 && daddr && runp) {
			for (bn = xap->in_off + 1;
			    bn < MNINDIR(ump) && *runp < maxrun &&
			    is_sequential(ump,
			    ((ext2_daddr_t *)bp->b_data)[bn - 1],
			    ((ext2_daddr_t *)bp->b_data)[bn]);
			    ++bn, ++*runp);
			bn = xap->in_off;
			if (runb && bn) {
				for(--bn; bn >= 0 && *runb < maxrun &&
					is_sequential(ump, ((daddr_t *)bp->b_data)[bn],
					    ((daddr_t *)bp->b_data)[bn+1]);
					--bn, ++*runb);
			}
		}
	}
	if (bp)
		bqrelse(bp);

	daddr = blkptrtodb(ump, daddr);
	*bnp = daddr == 0 ? -1 : daddr;
	return (0);
}
Esempio n. 3
0
static inline int
_vnode_validate(hammer_dedup_cache_t dcp, void *data, int *errorp)
{
	struct hammer_transaction trans;
	hammer_inode_t ip;
	struct vnode *vp;
	struct buf *bp;
	/* off_t dooffset; */
	int result, error;

	result = error = 0;
	*errorp = 0;

	hammer_simple_transaction(&trans, dcp->hmp);

	ip = hammer_get_inode(&trans, NULL, dcp->obj_id, HAMMER_MAX_TID,
	    dcp->localization, 0, &error);
	if (ip == NULL) {
		kprintf("dedup: unable to find objid %016llx:%08x\n",
		    (long long)dcp->obj_id, dcp->localization);
		*errorp = 1;
		goto failed2;
	}

	error = hammer_get_vnode(ip, &vp);
	if (error) {
		kprintf("dedup: unable to acquire vnode for %016llx:%08x\n",
		    (long long)dcp->obj_id, dcp->localization);
		*errorp = 2;
		goto failed;
	}

	if ((bp = findblk(ip->vp, dcp->file_offset, FINDBLK_NBLOCK)) != NULL) {
		dfly_brelse(bp); /* bremfree(bp) */

		/* XXX if (mapped to userspace) goto done, *errorp = 4 */

		if ((bp->b_flags & B_CACHE) == 0 || bp->b_flags & B_DIRTY) {
			*errorp = 5;
			goto done;
		}

	/* XXX 	if (bp->b_bio2.bio_offset != dcp->data_offset) {
			error = VOP_BMAP(ip->vp, dcp->file_offset, &dooffset,
			    NULL, NULL, BUF_CMD_READ);
			if (error) {
				*errorp = 6;
				goto done;
			}

			if (dooffset != dcp->data_offset) {
				*errorp = 7;
				goto done;
			}
			hammer_live_dedup_bmap_saves++;
		}
*/

		if (bcmp(data, bp->b_data, dcp->bytes) == 0)
			result = 1;

done:
		dfly_brelse(bp); /* XX free to buffer not kfree ... bqrelse(bp);*/
	} else {
		*errorp = 3;
	}
	vput(vp);

failed:
	hammer_rel_inode(ip, 0);
failed2:
	hammer_done_transaction(&trans);
	return (result);
}