示例#1
0
/*
 * Find a block in the specified cylinder group.
 *
 * It is a panic if a request is made to find a block if none are
 * available.
 */
static daddr_t
ext2_mapsearch(struct m_ext2fs *fs, char *bbp, daddr_t bpref)
{
    char *loc;
    int start, len;

    /*
     * find the fragment by searching through the free block
     * map for an appropriate bit pattern
     */
    if (bpref)
        start = dtogd(fs, bpref) / NBBY;
    else
        start = 0;
    len = howmany(fs->e2fs->e2fs_fpg, NBBY) - start;
    loc = memcchr(&bbp[start], 0xff, len);
    if (loc == NULL) {
        len = start + 1;
        start = 0;
        loc = memcchr(&bbp[start], 0xff, len);
        if (loc == NULL) {
            printf("start = %d, len = %d, fs = %s\n",
                   start, len, fs->e2fs_fsmnt);
            panic("ext2_mapsearch: map corrupted");
            /* NOTREACHED */
        }
    }
    return ((loc - bbp) * NBBY + ffs(~*loc) - 1);
}
示例#2
0
int
cgbfree(struct uufsd *disk, ufs2_daddr_t bno, long size)
{
	u_int8_t *blksfree;
	struct fs *fs;
	struct cg *cgp;
	ufs1_daddr_t fragno, cgbno;
	int i, cg, blk, frags, bbase;

	fs = &disk->d_fs;
	cg = dtog(fs, bno);
	if (cgread1(disk, cg) != 1)
		return (-1);
	cgp = &disk->d_cg;
	cgbno = dtogd(fs, bno);
	blksfree = cg_blksfree(cgp);
	if (size == fs->fs_bsize) {
		fragno = fragstoblks(fs, cgbno);
		ffs_setblock(fs, blksfree, fragno);
		ffs_clusteracct(fs, cgp, fragno, 1);
		cgp->cg_cs.cs_nbfree++;
		fs->fs_cstotal.cs_nbfree++;
		fs->fs_cs(fs, cg).cs_nbfree++;
	} else {
		bbase = cgbno - fragnum(fs, cgbno);
		/*
		 * decrement the counts associated with the old frags
		 */
		blk = blkmap(fs, blksfree, bbase);
		ffs_fragacct(fs, blk, cgp->cg_frsum, -1);
		/*
		 * deallocate the fragment
		 */
		frags = numfrags(fs, size);
		for (i = 0; i < frags; i++)
			setbit(blksfree, cgbno + i);
		cgp->cg_cs.cs_nffree += i;
		fs->fs_cstotal.cs_nffree += i;
		fs->fs_cs(fs, cg).cs_nffree += i;
		/*
		 * add back in counts associated with the new frags
		 */
		blk = blkmap(fs, blksfree, bbase);
		ffs_fragacct(fs, blk, cgp->cg_frsum, 1);
		/*
		 * if a complete block has been reassembled, account for it
		 */
		fragno = fragstoblks(fs, bbase);
		if (ffs_isblock(fs, blksfree, fragno)) {
			cgp->cg_cs.cs_nffree -= fs->fs_frag;
			fs->fs_cstotal.cs_nffree -= fs->fs_frag;
			fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag;
			ffs_clusteracct(fs, cgp, fragno, 1);
			cgp->cg_cs.cs_nbfree++;
			fs->fs_cstotal.cs_nbfree++;
			fs->fs_cs(fs, cg).cs_nbfree++;
		}
	}
	return cgwrite(disk);
}
示例#3
0
static daddr_t
ext2fs_mapsearch(struct m_ext2fs *fs, char *bbp, daddr_t bpref)
{
	int start, len, loc, i, map;

	/*
	 * find the fragment by searching through the free block
	 * map for an appropriate bit pattern
	 */
	if (bpref)
		start = dtogd(fs, bpref) / NBBY;
	else
		start = 0;
	len = howmany(fs->e2fs.e2fs_fpg, NBBY) - start;
	loc = skpc(0xff, len, &bbp[start]);
	if (loc == 0) {
		len = start + 1;
		start = 0;
		loc = skpc(0xff, len, &bbp[start]);
		if (loc == 0) {
			printf("start = %d, len = %d, fs = %s\n",
				start, len, fs->e2fs_fsmnt);
			panic("ext2fs_alloccg: map corrupted");
			/* NOTREACHED */
		}
	}
	i = start + len - loc;
	map = bbp[i] ^ 0xff;
	if (map == 0) {
		printf("fs = %s\n", fs->e2fs_fsmnt);
		panic("ext2fs_mapsearch: block not in map");
	}
	return i * NBBY + ffs(map) - 1;
}
示例#4
0
int chkuse( daddr_t blkno, int cnt ) {
	int cg;
	daddr_t fsbn, bn;
	fsbn = dbtofsb( fs, blkno );
	if ( (unsigned) ( fsbn + cnt ) > fs->fs_size ) {
		printf( "block %ld out of range of file system\n", (long) blkno );
		return ( 1 );
	}
	cg = dtog( fs, fsbn );
	if ( fsbn < cgdmin( fs, cg ) ) {
		if ( cg == 0 || ( fsbn + cnt ) > cgsblock( fs, cg ) ) {
			printf( "block %ld in non-data area: cannot attach\n", (long) blkno );
			return ( 1 );
		}
	} else {
		if ( ( fsbn + cnt ) > cgbase( fs, cg + 1 ) ) {
			printf( "block %ld in non-data area: cannot attach\n", (long) blkno );
			return ( 1 );
		}
	}
	if ( cgread1( &disk, cg ) != 1 ) {
		fprintf( stderr, "cg %d: could not be read\n", cg );
		errs++;
		return ( 1 );
	}
	if ( !cg_chkmagic( &acg ) ) {
		fprintf( stderr, "cg %d: bad magic number\n", cg );
		errs++;
		return ( 1 );
	}
	bn = dtogd( fs, fsbn );
	if ( isclr( cg_blksfree( &acg ), bn ) ) printf( "Warning: sector %ld is in use\n", (long) blkno );
	return ( 0 );
}
/*
 * Allocate a block in a cylinder group.
 *
 * This algorithm implements the following policy:
 *   1) allocate the requested block.
 *   2) allocate a rotationally optimal block in the same cylinder.
 *   3) allocate the next available block on the block rotor for the
 *      specified cylinder group.
 * Note that this routine only allocates fs_bsize blocks; these
 * blocks may be fragmented by the routine that allocates them.
 */
static daddr_t
ffs_alloccgblk(struct inode *ip, struct buf *bp, daddr_t bpref)
{
	struct cg *cgp;
	daddr_t blkno;
	int32_t bno;
	struct fs *fs = ip->i_fs;
	const int needswap = UFS_FSNEEDSWAP(fs);
	u_int8_t *blksfree;

	cgp = (struct cg *)bp->b_data;
	blksfree = cg_blksfree(cgp, needswap);
	if (bpref == 0 || dtog(fs, bpref) != ufs_rw32(cgp->cg_cgx, needswap)) {
		bpref = ufs_rw32(cgp->cg_rotor, needswap);
	} else {
		bpref = ffs_blknum(fs, bpref);
		bno = dtogd(fs, bpref);
		/*
		 * if the requested block is available, use it
		 */
		if (ffs_isblock(fs, blksfree, ffs_fragstoblks(fs, bno)))
			goto gotit;
	}
	/*
	 * Take the next available one in this cylinder group.
	 */
	bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag);
	if (bno < 0)
		return (0);
	cgp->cg_rotor = ufs_rw32(bno, needswap);
gotit:
	blkno = ffs_fragstoblks(fs, bno);
	ffs_clrblock(fs, blksfree, (long)blkno);
	ffs_clusteracct(fs, cgp, blkno, -1);
	ufs_add32(cgp->cg_cs.cs_nbfree, -1, needswap);
	fs->fs_cstotal.cs_nbfree--;
	fs->fs_cs(fs, ufs_rw32(cgp->cg_cgx, needswap)).cs_nbfree--;
	fs->fs_fmod = 1;
	blkno = ufs_rw32(cgp->cg_cgx, needswap) * fs->fs_fpg + bno;
	return (blkno);
}
示例#6
0
/*
 * Free a block or fragment.
 *
 */
void
ext2_blkfree(struct inode *ip, e4fs_daddr_t bno, long size)
{
    struct m_ext2fs *fs;
    struct buf *bp;
    struct ext2mount *ump;
    int cg, error;
    char *bbp;

    fs = ip->i_e2fs;
    ump = ip->i_ump;
    cg = dtog(fs, bno);
    if ((u_int)bno >= fs->e2fs->e2fs_bcount) {
        printf("bad block %lld, ino %llu\n", (long long)bno,
               (unsigned long long)ip->i_number);
        ext2_fserr(fs, ip->i_uid, "bad block");
        return;
    }
    error = bread(ip->i_devvp,
                  fsbtodb(fs, fs->e2fs_gd[cg].ext2bgd_b_bitmap),
                  (int)fs->e2fs_bsize, NOCRED, &bp);
    if (error) {
        brelse(bp);
        return;
    }
    bbp = (char *)bp->b_data;
    bno = dtogd(fs, bno);
    if (isclr(bbp, bno)) {
        printf("block = %lld, fs = %s\n",
               (long long)bno, fs->e2fs_fsmnt);
        panic("ext2_blkfree: freeing free block");
    }
    clrbit(bbp, bno);
    EXT2_LOCK(ump);
    ext2_clusteracct(fs, bbp, cg, bno, 1);
    fs->e2fs->e2fs_fbcount++;
    fs->e2fs_gd[cg].ext2bgd_nbfree++;
    fs->e2fs_fmod = 1;
    EXT2_UNLOCK(ump);
    bdwrite(bp);
}
示例#7
0
/*
 * allocate a data block with the specified number of fragments
 */
ufs_daddr_t
allocblk(long frags)
{
	int i, j, k, cg, baseblk;
	struct cg *cgp = &cgrp;

	if (frags <= 0 || frags > sblock.fs_frag)
		return (0);
	for (i = 0; i < maxfsblock - sblock.fs_frag; i += sblock.fs_frag) {
		for (j = 0; j <= sblock.fs_frag - frags; j++) {
			if (testbmap(i + j))
				continue;
			for (k = 1; k < frags; k++)
				if (testbmap(i + j + k))
					break;
			if (k < frags) {
				j += k;
				continue;
			}
			cg = dtog(&sblock, i + j);
			getblk(&cgblk, cgtod(&sblock, cg), sblock.fs_cgsize);
			if (!cg_chkmagic(cgp))
				pfatal("CG %d: BAD MAGIC NUMBER\n", cg);
			baseblk = dtogd(&sblock, i + j);
			for (k = 0; k < frags; k++) {
				setbmap(i + j + k);
				clrbit(cg_blksfree(cgp), baseblk + k);
			}
			n_blks += frags;
			if (frags == sblock.fs_frag)
				cgp->cg_cs.cs_nbfree--;
			else
				cgp->cg_cs.cs_nffree -= frags;
			cgdirty();
			return (i + j);
		}
	}
	return (0);
}
示例#8
0
static int32_t
ext2fs_mapsearch(struct m_ext2fs *fs, char *bbp, int32_t bpref)
{
	int32_t bno;
	int start, len, loc, i, map;

	/*
	 * find the fragment by searching through the free block
	 * map for an appropriate bit pattern
	 */
	if (bpref)
		start = dtogd(fs, bpref) / NBBY;
	else
		start = 0;
	len = howmany(fs->e2fs.e2fs_fpg, NBBY) - start;
	loc = skpc(0xff, len, &bbp[start]);
	if (loc == 0) {
		len = start + 1;
		start = 0;
		loc = skpc(0xff, len, &bbp[start]);
		if (loc == 0) {
			printf("start = %d, len = %d, fs = %s\n",
				start, len, fs->e2fs_fsmnt);
			panic("ext2fs_alloccg: map corrupted");
			/* NOTREACHED */
		}
	}
	i = start + len - loc;
	map = bbp[i];
	bno = i * NBBY;
	for (i = 1; i < (1 << NBBY); i <<= 1, bno++) {
		if ((map & i) == 0)
			return (bno);
	}
	printf("fs = %s\n", fs->e2fs_fsmnt);
	panic("ext2fs_mapsearch: block not in map");
	/* NOTREACHED */
}
示例#9
0
/*
 * Free a block.
 *
 * The specified block is placed back in the
 * free map.
 */
void
ext2fs_blkfree(struct inode *ip, daddr_t bno)
{
	struct m_ext2fs *fs;
	char *bbp;
	struct buf *bp;
	int error, cg;

	fs = ip->i_e2fs;
	cg = dtog(fs, bno);
	if ((u_int)bno >= fs->e2fs.e2fs_bcount) {
		printf("bad block %lld, ino %llu\n", (long long)bno,
		    (unsigned long long)ip->i_number);
		ext2fs_fserr(fs, ip->i_uid, "bad block");
		return;
	}
	error = bread(ip->i_devvp,
		fsbtodb(fs, fs->e2fs_gd[cg].ext2bgd_b_bitmap),
		(int)fs->e2fs_bsize, NOCRED, B_MODIFY, &bp);
	if (error) {
		brelse(bp, 0);
		return;
	}
	bbp = (char *)bp->b_data;
	bno = dtogd(fs, bno);
	if (isclr(bbp, bno)) {
		printf("dev = 0x%llx, block = %lld, fs = %s\n",
		    (unsigned long long)ip->i_dev, (long long)bno,
		    fs->e2fs_fsmnt);
		panic("blkfree: freeing free block");
	}
	clrbit(bbp, bno);
	fs->e2fs.e2fs_fbcount++;
	fs->e2fs_gd[cg].ext2bgd_nbfree++;

	fs->e2fs_fmod = 1;
	bdwrite(bp);
}
/*
 * Find a block of the specified size in the specified cylinder group.
 *
 * It is a panic if a request is made to find a block if none are
 * available.
 */
static int32_t
ffs_mapsearch(struct fs *fs, struct cg *cgp, daddr_t bpref, int allocsiz)
{
	int32_t bno;
	int start, len, loc, i;
	int blk, field, subfield, pos;
	int ostart, olen;
	const int needswap = UFS_FSNEEDSWAP(fs);

	/*
	 * find the fragment by searching through the free block
	 * map for an appropriate bit pattern
	 */
	if (bpref)
		start = dtogd(fs, bpref) / NBBY;
	else
		start = ufs_rw32(cgp->cg_frotor, needswap) / NBBY;
	len = howmany(fs->fs_fpg, NBBY) - start;
	ostart = start;
	olen = len;
	loc = scanc((u_int)len,
		(const u_char *)&cg_blksfree(cgp, needswap)[start],
		(const u_char *)fragtbl[fs->fs_frag],
		(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
	if (loc == 0) {
		len = start + 1;
		start = 0;
		loc = scanc((u_int)len,
			(const u_char *)&cg_blksfree(cgp, needswap)[0],
			(const u_char *)fragtbl[fs->fs_frag],
			(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
		if (loc == 0) {
			errx(1,
    "ffs_alloccg: map corrupted: start %d len %d offset %d %ld",
				ostart, olen,
				ufs_rw32(cgp->cg_freeoff, needswap),
				(long)cg_blksfree(cgp, needswap) - (long)cgp);
			/* NOTREACHED */
		}
	}
	bno = (start + len - loc) * NBBY;
	cgp->cg_frotor = ufs_rw32(bno, needswap);
	/*
	 * found the byte in the map
	 * sift through the bits to find the selected frag
	 */
	for (i = bno + NBBY; bno < i; bno += fs->fs_frag) {
		blk = blkmap(fs, cg_blksfree(cgp, needswap), bno);
		blk <<= 1;
		field = around[allocsiz];
		subfield = inside[allocsiz];
		for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) {
			if ((blk & field) == subfield)
				return (bno + pos);
			field <<= 1;
			subfield <<= 1;
		}
	}
	errx(1, "ffs_alloccg: block not in map: bno %lld", (long long)bno);
	return (-1);
}
/*
 * Free a block or fragment.
 *
 * The specified block or fragment is placed back in the
 * free map. If a fragment is deallocated, a possible 
 * block reassembly is checked.
 */
void
ffs_blkfree(struct inode *ip, daddr_t bno, long size)
{
	struct cg *cgp;
	struct buf *bp;
	int32_t fragno, cgbno;
	int i, error, cg, blk, frags, bbase;
	struct fs *fs = ip->i_fs;
	const int needswap = UFS_FSNEEDSWAP(fs);

	if (size > fs->fs_bsize || ffs_fragoff(fs, size) != 0 ||
	    ffs_fragnum(fs, bno) + ffs_numfrags(fs, size) > fs->fs_frag) {
		errx(1, "blkfree: bad size: bno %lld bsize %d size %ld",
		    (long long)bno, fs->fs_bsize, size);
	}
	cg = dtog(fs, bno);
	if (bno >= fs->fs_size) {
		warnx("bad block %lld, ino %llu", (long long)bno,
		    (unsigned long long)ip->i_number);
		return;
	}
	error = bread(ip->i_devvp, FFS_FSBTODB(fs, cgtod(fs, cg)),
	    (int)fs->fs_cgsize, 0, &bp);
	if (error) {
		brelse(bp, 0);
		return;
	}
	cgp = (struct cg *)bp->b_data;
	if (!cg_chkmagic(cgp, needswap)) {
		brelse(bp, 0);
		return;
	}
	cgbno = dtogd(fs, bno);
	if (size == fs->fs_bsize) {
		fragno = ffs_fragstoblks(fs, cgbno);
		if (!ffs_isfreeblock(fs, cg_blksfree(cgp, needswap), fragno)) {
			errx(1, "blkfree: freeing free block %lld",
			    (long long)bno);
		}
		ffs_setblock(fs, cg_blksfree(cgp, needswap), fragno);
		ffs_clusteracct(fs, cgp, fragno, 1);
		ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap);
		fs->fs_cstotal.cs_nbfree++;
		fs->fs_cs(fs, cg).cs_nbfree++;
	} else {
		bbase = cgbno - ffs_fragnum(fs, cgbno);
		/*
		 * decrement the counts associated with the old frags
		 */
		blk = blkmap(fs, cg_blksfree(cgp, needswap), bbase);
		ffs_fragacct(fs, blk, cgp->cg_frsum, -1, needswap);
		/*
		 * deallocate the fragment
		 */
		frags = ffs_numfrags(fs, size);
		for (i = 0; i < frags; i++) {
			if (isset(cg_blksfree(cgp, needswap), cgbno + i)) {
				errx(1, "blkfree: freeing free frag: block %lld",
				    (long long)(cgbno + i));
			}
			setbit(cg_blksfree(cgp, needswap), cgbno + i);
		}
		ufs_add32(cgp->cg_cs.cs_nffree, i, needswap);
		fs->fs_cstotal.cs_nffree += i;
		fs->fs_cs(fs, cg).cs_nffree += i;
		/*
		 * add back in counts associated with the new frags
		 */
		blk = blkmap(fs, cg_blksfree(cgp, needswap), bbase);
		ffs_fragacct(fs, blk, cgp->cg_frsum, 1, needswap);
		/*
		 * if a complete block has been reassembled, account for it
		 */
		fragno = ffs_fragstoblks(fs, bbase);
		if (ffs_isblock(fs, cg_blksfree(cgp, needswap), fragno)) {
			ufs_add32(cgp->cg_cs.cs_nffree, -fs->fs_frag, needswap);
			fs->fs_cstotal.cs_nffree -= fs->fs_frag;
			fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag;
			ffs_clusteracct(fs, cgp, fragno, 1);
			ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap);
			fs->fs_cstotal.cs_nbfree++;
			fs->fs_cs(fs, cg).cs_nbfree++;
		}
	}
	fs->fs_fmod = 1;
	bdwrite(bp);
}
/*
 * Determine whether a block can be allocated.
 *
 * Check to see if a block of the appropriate size is available,
 * and if it is, allocate it.
 */
static daddr_t
ffs_alloccg(struct inode *ip, int cg, daddr_t bpref, int size)
{
	struct cg *cgp;
	struct buf *bp;
	daddr_t bno, blkno;
	int error, frags, allocsiz, i;
	struct fs *fs = ip->i_fs;
	const int needswap = UFS_FSNEEDSWAP(fs);

	if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize)
		return (0);
	error = bread(ip->i_devvp, FFS_FSBTODB(fs, cgtod(fs, cg)),
	    (int)fs->fs_cgsize, 0, &bp);
	if (error) {
		return (0);
	}
	cgp = (struct cg *)bp->b_data;
	if (!cg_chkmagic(cgp, needswap) ||
	    (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) {
		brelse(bp, 0);
		return (0);
	}
	if (size == fs->fs_bsize) {
		bno = ffs_alloccgblk(ip, bp, bpref);
		bwrite(bp);
		return (bno);
	}
	/*
	 * check to see if any fragments are already available
	 * allocsiz is the size which will be allocated, hacking
	 * it down to a smaller size if necessary
	 */
	frags = ffs_numfrags(fs, size);
	for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++)
		if (cgp->cg_frsum[allocsiz] != 0)
			break;
	if (allocsiz == fs->fs_frag) {
		/*
		 * no fragments were available, so a block will be 
		 * allocated, and hacked up
		 */
		if (cgp->cg_cs.cs_nbfree == 0) {
			brelse(bp, 0);
			return (0);
		}
		bno = ffs_alloccgblk(ip, bp, bpref);
		bpref = dtogd(fs, bno);
		for (i = frags; i < fs->fs_frag; i++)
			setbit(cg_blksfree(cgp, needswap), bpref + i);
		i = fs->fs_frag - frags;
		ufs_add32(cgp->cg_cs.cs_nffree, i, needswap);
		fs->fs_cstotal.cs_nffree += i;
		fs->fs_cs(fs, cg).cs_nffree += i;
		fs->fs_fmod = 1;
		ufs_add32(cgp->cg_frsum[i], 1, needswap);
		bdwrite(bp);
		return (bno);
	}
	bno = ffs_mapsearch(fs, cgp, bpref, allocsiz);
	for (i = 0; i < frags; i++)
		clrbit(cg_blksfree(cgp, needswap), bno + i);
	ufs_add32(cgp->cg_cs.cs_nffree, -frags, needswap);
	fs->fs_cstotal.cs_nffree -= frags;
	fs->fs_cs(fs, cg).cs_nffree -= frags;
	fs->fs_fmod = 1;
	ufs_add32(cgp->cg_frsum[allocsiz], -1, needswap);
	if (frags != allocsiz)
		ufs_add32(cgp->cg_frsum[allocsiz - frags], 1, needswap);
	blkno = cg * fs->fs_fpg + bno;
	bdwrite(bp);
	return blkno;
}
示例#13
0
static int32_t
ext2fs_alloccg(struct inode *ip, int cg, int32_t bpref, int size)
{
	struct m_ext2fs *fs;
	char *bbp;
	struct buf *bp;
	int error, bno, start, end, loc;

	fs = ip->i_e2fs;
	if (fs->e2fs_gd[cg].ext2bgd_nbfree == 0)
		return (0);
	error = bread(ip->i_devvp, fsbtodb(fs,
		fs->e2fs_gd[cg].ext2bgd_b_bitmap),
		(int)fs->e2fs_bsize, &bp);
	if (error || fs->e2fs_gd[cg].ext2bgd_nbfree == 0) {
		brelse(bp);
		return (0);
	}
	bbp = (char *)bp->b_data;

	if (dtog(fs, bpref) != cg)
		bpref = 0;
	if (bpref != 0) {
		bpref = dtogd(fs, bpref);
		/*
		 * if the requested block is available, use it
		 */
		if (isclr(bbp, bpref)) {
			bno = bpref;
			goto gotit;
		}
	}
	/*
	 * no blocks in the requested cylinder, so take next
	 * available one in this cylinder group.
	 * first try to get 8 contigous blocks, then fall back to a single
	 * block.
	 */
	if (bpref)
		start = dtogd(fs, bpref) / NBBY;
	else
		start = 0;
	end = howmany(fs->e2fs.e2fs_fpg, NBBY) - start;
	for (loc = start; loc < end; loc++) {
		if (bbp[loc] == 0) {
			bno = loc * NBBY;
			goto gotit;
		}
	}
	for (loc = 0; loc < start; loc++) {
		if (bbp[loc] == 0) {
			bno = loc * NBBY;
			goto gotit;
		}
	}

	bno = ext2fs_mapsearch(fs, bbp, bpref);
	if (bno < 0)
		return (0);
gotit:
#ifdef DIAGNOSTIC
	if (isset(bbp, (long)bno)) {
		printf("ext2fs_alloccgblk: cg=%d bno=%d fs=%s\n",
			cg, bno, fs->e2fs_fsmnt);
		panic("ext2fs_alloccg: dup alloc");
	}
#endif
	setbit(bbp, (long)bno);
	fs->e2fs.e2fs_fbcount--;
	fs->e2fs_gd[cg].ext2bgd_nbfree--;
	fs->e2fs_fmod = 1;
	bdwrite(bp);
	return (cg * fs->e2fs.e2fs_fpg + fs->e2fs.e2fs_first_dblock + bno);
}
示例#14
0
/*
 * Determine whether a cluster can be allocated.
 */
static daddr_t
ext2_clusteralloc(struct inode *ip, int cg, daddr_t bpref, int len)
{
    struct m_ext2fs *fs;
    struct ext2mount *ump;
    struct buf *bp;
    char *bbp;
    int bit, error, got, i, loc, run;
    int32_t *lp;
    daddr_t bno;

    fs = ip->i_e2fs;
    ump = ip->i_ump;

    if (fs->e2fs_maxcluster[cg] < len)
        return (0);

    EXT2_UNLOCK(ump);
    error = bread(ip->i_devvp,
                  fsbtodb(fs, fs->e2fs_gd[cg].ext2bgd_b_bitmap),
                  (int)fs->e2fs_bsize, NOCRED, &bp);
    if (error)
        goto fail_lock;

    bbp = (char *)bp->b_data;
    EXT2_LOCK(ump);
    /*
     * Check to see if a cluster of the needed size (or bigger) is
     * available in this cylinder group.
     */
    lp = &fs->e2fs_clustersum[cg].cs_sum[len];
    for (i = len; i <= fs->e2fs_contigsumsize; i++)
        if (*lp++ > 0)
            break;
    if (i > fs->e2fs_contigsumsize) {
        /*
         * Update the cluster summary information to reflect
         * the true maximum-sized cluster so that future cluster
         * allocation requests can avoid reading the bitmap only
         * to find no cluster.
         */
        lp = &fs->e2fs_clustersum[cg].cs_sum[len - 1];
        for (i = len - 1; i > 0; i--)
            if (*lp-- > 0)
                break;
        fs->e2fs_maxcluster[cg] = i;
        goto fail;
    }
    EXT2_UNLOCK(ump);

    /* Search the bitmap to find a big enough cluster like in FFS. */
    if (dtog(fs, bpref) != cg)
        bpref = 0;
    if (bpref != 0)
        bpref = dtogd(fs, bpref);
    loc = bpref / NBBY;
    bit = 1 << (bpref % NBBY);
    for (run = 0, got = bpref; got < fs->e2fs->e2fs_fpg; got++) {
        if ((bbp[loc] & bit) != 0)
            run = 0;
        else {
            run++;
            if (run == len)
                break;
        }
        if ((got & (NBBY - 1)) != (NBBY - 1))
            bit <<= 1;
        else {
            loc++;
            bit = 1;
        }
    }

    if (got >= fs->e2fs->e2fs_fpg)
        goto fail_lock;

    /* Allocate the cluster that we found. */
    for (i = 1; i < len; i++)
        if (!isclr(bbp, got - run + i))
            panic("ext2_clusteralloc: map mismatch");

    bno = got - run + 1;
    if (bno >= fs->e2fs->e2fs_fpg)
        panic("ext2_clusteralloc: allocated out of group");

    EXT2_LOCK(ump);
    for (i = 0; i < len; i += fs->e2fs_fpb) {
        setbit(bbp, bno + i);
        ext2_clusteracct(fs, bbp, cg, bno + i, -1);
        fs->e2fs->e2fs_fbcount--;
        fs->e2fs_gd[cg].ext2bgd_nbfree--;
    }
    fs->e2fs_fmod = 1;
    EXT2_UNLOCK(ump);

    bdwrite(bp);
    return (cg * fs->e2fs->e2fs_fpg + fs->e2fs->e2fs_first_dblock + bno);

fail_lock:
    EXT2_LOCK(ump);
fail:
    brelse(bp);
    return (0);
}
示例#15
0
/*
 * Determine whether a block can be allocated.
 *
 * Check to see if a block of the appropriate size is available,
 * and if it is, allocate it.
 */
static daddr_t
ext2_alloccg(struct inode *ip, int cg, daddr_t bpref, int size)
{
    struct m_ext2fs *fs;
    struct buf *bp;
    struct ext2mount *ump;
    daddr_t bno, runstart, runlen;
    int bit, loc, end, error, start;
    char *bbp;
    /* XXX ondisk32 */
    fs = ip->i_e2fs;
    ump = ip->i_ump;
    if (fs->e2fs_gd[cg].ext2bgd_nbfree == 0)
        return (0);
    EXT2_UNLOCK(ump);
    error = bread(ip->i_devvp, fsbtodb(fs,
                                       fs->e2fs_gd[cg].ext2bgd_b_bitmap),
                  (int)fs->e2fs_bsize, NOCRED, &bp);
    if (error) {
        brelse(bp);
        EXT2_LOCK(ump);
        return (0);
    }
    if (fs->e2fs_gd[cg].ext2bgd_nbfree == 0) {
        /*
         * Another thread allocated the last block in this
         * group while we were waiting for the buffer.
         */
        brelse(bp);
        EXT2_LOCK(ump);
        return (0);
    }
    bbp = (char *)bp->b_data;

    if (dtog(fs, bpref) != cg)
        bpref = 0;
    if (bpref != 0) {
        bpref = dtogd(fs, bpref);
        /*
         * if the requested block is available, use it
         */
        if (isclr(bbp, bpref)) {
            bno = bpref;
            goto gotit;
        }
    }
    /*
     * no blocks in the requested cylinder, so take next
     * available one in this cylinder group.
     * first try to get 8 contigous blocks, then fall back to a single
     * block.
     */
    if (bpref)
        start = dtogd(fs, bpref) / NBBY;
    else
        start = 0;
    end = howmany(fs->e2fs->e2fs_fpg, NBBY) - start;
retry:
    runlen = 0;
    runstart = 0;
    for (loc = start; loc < end; loc++) {
        if (bbp[loc] == (char)0xff) {
            runlen = 0;
            continue;
        }

        /* Start of a run, find the number of high clear bits. */
        if (runlen == 0) {
            bit = fls(bbp[loc]);
            runlen = NBBY - bit;
            runstart = loc * NBBY + bit;
        } else if (bbp[loc] == 0) {
            /* Continue a run. */
            runlen += NBBY;
        } else {
            /*
             * Finish the current run.  If it isn't long
             * enough, start a new one.
             */
            bit = ffs(bbp[loc]) - 1;
            runlen += bit;
            if (runlen >= 8) {
                bno = runstart;
                goto gotit;
            }

            /* Run was too short, start a new one. */
            bit = fls(bbp[loc]);
            runlen = NBBY - bit;
            runstart = loc * NBBY + bit;
        }

        /* If the current run is long enough, use it. */
        if (runlen >= 8) {
            bno = runstart;
            goto gotit;
        }
    }
    if (start != 0) {
        end = start;
        start = 0;
        goto retry;
    }

    bno = ext2_mapsearch(fs, bbp, bpref);
    if (bno < 0) {
        brelse(bp);
        EXT2_LOCK(ump);
        return (0);
    }
gotit:
#ifdef INVARIANTS
    if (isset(bbp, bno)) {
        printf("ext2fs_alloccgblk: cg=%d bno=%jd fs=%s\n",
               cg, (intmax_t)bno, fs->e2fs_fsmnt);
        panic("ext2fs_alloccg: dup alloc");
    }
#endif
    setbit(bbp, bno);
    EXT2_LOCK(ump);
    ext2_clusteracct(fs, bbp, cg, bno, -1);
    fs->e2fs->e2fs_fbcount--;
    fs->e2fs_gd[cg].ext2bgd_nbfree--;
    fs->e2fs_fmod = 1;
    EXT2_UNLOCK(ump);
    bdwrite(bp);
    return (cg * fs->e2fs->e2fs_fpg + fs->e2fs->e2fs_first_dblock + bno);
}