Exemplo n.º 1
0
Arquivo: bio.c Projeto: vnea/UPMC
/**
 * @fn struct buf *breada(int dev, bc_daddr_t blkno, bc_daddr_t rablkno)
 * Lit le bloc demandé et lance une e/s sur le bloc suivant du device
 *
 * Read in the block, like bread, but also start I/O on the
 * read-ahead block (which is not allocated to the caller)
 * @param dev : device (major+minor)
 * @param blkno : numéro de bloc
 * @param rablkno : read-ahead bloc
 * @return le buffer associé au device dev et de numéro de bloc blkno
 */
struct buf *breada(int dev, bc_daddr_t blkno, bc_daddr_t rablkno) {
  struct buf *bp, *rabp;
  
  bp = NULL;

  if (!incore(dev, blkno)) {
    bp = getblk(dev, blkno);
    if ((bp->b_flags&B_DONE) == 0) {
      bp->b_flags |= B_READ;
      bp->b_count = BSIZE;
      (*bdevsw[major(dev)].d_strategy)(bp);
    }
  }
  if (rablkno && !incore(dev, rablkno)) {
    rabp = getblk(dev, rablkno);
    if (rabp->b_flags & B_DONE)
      brelse(rabp);
    else {
      rabp->b_flags |= B_READ|B_ASYNC;
      rabp->b_count = BSIZE;
      (*bdevsw[major(dev)].d_strategy)(rabp);
    }
  }
  if(bp == NULL)
    return(bread(dev, blkno));
  iowait(bp);
  return(bp);
}
Exemplo n.º 2
0
breada(adev, blkno, rablkno)
{
	register struct buf *rbp, *rabp;
	register int dev;

	dev = adev;
	rbp = 0;
	if (!incore(dev, blkno)) {
		rbp = getblk(dev, blkno);
		if ((rbp->b_flags&B_DONE) == 0) {
			rbp->b_flags =| B_READ;
			rbp->b_wcount = -256;
			(*bdevsw[adev.d_major].d_strategy)(rbp);
		}
	}
	if (rablkno && !incore(dev, rablkno) && raflag) {
		rabp = getblk(dev, rablkno);
		if (rabp->b_flags & B_DONE)
			brelse(rabp);
		else {
			rabp->b_flags =| B_READ|B_ASYNC;
			rabp->b_wcount = -256;
			(*bdevsw[adev.d_major].d_strategy)(rabp);
		}
	}
	if (rbp==0)
		return(bread(dev, blkno));
	iowait(rbp);
	return(rbp);
}
Exemplo n.º 3
0
/*
 * Get a block of requested size that is associated with
 * a given vnode and block offset. If it is found in the
 * block cache, mark it as having been found, make it busy
 * and return it. Otherwise, return an empty block of the
 * correct size. It is up to the caller to insure that the
 * cached blocks be of the correct size.
 */
struct buf *
getblk(register struct vnode *vp, daddr_t blkno, int size)
{
	struct buf *bp, *bh;
	int x;

	for (;;) {
		if (bp = incore(vp, blkno)) {
			x = splbio();
			if (bp->b_flags & B_BUSY) {
				bp->b_flags |= B_WANTED;
				sleep (bp, PRIBIO);
				splx(x);
				continue;
			}
			bp->b_flags |= B_BUSY | B_CACHE;
			bremfree(bp);
			if (size > bp->b_bufsize)
				panic("now what do we do?");
			/* if (bp->b_bufsize != size) allocbuf(bp, size); */
		} else {

			if((bp = getnewbuf(size)) == 0) continue;
			bp->b_blkno = bp->b_lblkno = blkno;
			bgetvp(vp, bp);
			x = splbio();
			bh = BUFHASH(vp, blkno);
			binshash(bp, bh);
			bp->b_flags = B_BUSY;
		}
		splx(x);
		return (bp);
	}
}
    static void GenerateWindow(int widgets, const std::string& filename){

        std::cout << "Loading base files" << std::endl;

        ///
        /// load base files

        // ---------------------------------------------------------------------
        QFile basepre("../window_pre.txt");
        if (!basepre.open(QFile::ReadOnly | QFile::Text)){
            std::cout << "ERROR Loading basepre" << std::endl;
            return;
        }
        QTextStream inpre(&basepre);
        QString basepre_string = inpre.readAll();
        std::cout << "basepre content = " << basepre_string.toStdString() << std::endl;

        // ---------------------------------------------------------------------
        QFile basecore("../window_core.txt");
        if (!basecore.open(QFile::ReadOnly | QFile::Text)){
            std::cout << "ERROR Loading basecore" << std::endl;
            return;
        }
        QTextStream incore(&basecore);
        QString basecore_string = incore.readAll();
        std::cout << "basecore content = " << basecore_string.toStdString() << std::endl;

        // ---------------------------------------------------------------------
        QFile basepost("../window_post.txt");
        if (!basepost.open(QFile::ReadOnly | QFile::Text)){
            std::cout << "ERROR Loading basepost" << std::endl;
            return;
        }
        QTextStream inpost(&basepost);
        QString basepost_string = inpost.readAll();
        std::cout << "basepost content = " << basepost_string.toStdString() << std::endl;

        ///
        /// create window file
        QFile file(filename.c_str());
        if ( file.open(QIODevice::ReadWrite))
        {
            QTextStream stream( &file );
            //
            stream << QString(basepre_string);
            //
            for (int i = 0; i < widgets; i++){
                stream << QString(basecore_string).replace("##widget##",QString::number(i)) << "\n";
            }
            //
            stream << QString(basepost_string);
        }
        file.close();

        std::cout << "done." << std::endl;
    }
Exemplo n.º 5
0
/*
 * The last lbn argument is the current block on which I/O is being
 * performed.  Check to see that it doesn't fall in the middle of
 * the current block (if last_bp == NULL).
 */
void
cluster_wbuild(struct vnode *vp, struct buf *last_bp, long size,
    daddr64_t start_lbn, int len, daddr64_t lbn)
{
	struct buf *bp;

#ifdef DIAGNOSTIC
	if (size != vp->v_mount->mnt_stat.f_iosize)
		panic("cluster_wbuild: size %ld != filesize %ld",
			size, vp->v_mount->mnt_stat.f_iosize);
#endif
redo:
	while ((!incore(vp, start_lbn) || start_lbn == lbn) && len) {
		++start_lbn;
		--len;
	}

	/* Get more memory for current buffer */
	if (len <= 1) {
		if (last_bp) {
			bawrite(last_bp);
		} else if (len) {
			bp = getblk(vp, start_lbn, size, 0, 0);
			/*
			 * The buffer could have already been flushed out of
			 * the cache. If that has happened, we'll get a new
			 * buffer here with random data, just drop it.
			 */
			if ((bp->b_flags & B_DELWRI) == 0)
				brelse(bp);
			else
				bawrite(bp);
		}
		return;
	}

	bp = getblk(vp, start_lbn, size, 0, 0);
	if (!(bp->b_flags & B_DELWRI)) {
		++start_lbn;
		--len;
		brelse(bp);
		goto redo;
	}

	++start_lbn;
	--len;
	bawrite(bp);
	goto redo;
}
Exemplo n.º 6
0
/*
 * Assign a buffer for the given block.
 *
 * The block is selected from the buffer list with LRU
 * algorithm.  If the appropriate block already exists in the
 * block list, return it.  Otherwise, the least recently used
 * block is used.
 */
struct buf *
getblk(dev_t dev, int blkno)
{
    struct buf *bp;

    DPRINTF(VFSDB_BIO, ("getblk: dev=%llx blkno=%d\n", (long long)dev, blkno));
start:
    BIO_LOCK();
    bp = incore(dev, blkno);
    if (bp != NULL) {
        /* Block found in cache. */
        if (ISSET(bp->b_flags, B_BUSY)) {
            /*
             * Wait buffer ready.
             */
            BIO_UNLOCK();
            BUF_LOCK(bp);
            BUF_UNLOCK(bp);
            /* Scan again if it's busy */
            goto start;
        }
        bio_remove(bp);
        SET(bp->b_flags, B_BUSY);
    } else {
        bp = bio_remove_head();
        if (ISSET(bp->b_flags, B_DELWRI)) {
            BIO_UNLOCK();
            bwrite(bp);
            goto start;
        }
        bp->b_flags = B_BUSY;
        bp->b_dev = dev;
        bp->b_blkno = blkno;
    }
    BUF_LOCK(bp);
    BIO_UNLOCK();
    DPRINTF(VFSDB_BIO, ("getblk: done bp=%p\n", bp));
    return bp;
}
Exemplo n.º 7
0
/*
 * Read-ahead multiple disk blocks. The first is sync, the rest async.
 * Trivial modification to the breada algorithm presented in Bach (p.55).
 */
int
breadn(struct vnode *vp, daddr_t blkno, int size, daddr_t rablks[],
    int rasizes[], int nrablks, struct ucred *cred, struct buf **bpp)
{
	struct buf *bp;
	int i;

	bp = *bpp = bio_doread(vp, blkno, size, 0);

	/*
	 * For each of the read-ahead blocks, start a read, if necessary.
	 */
	for (i = 0; i < nrablks; i++) {
		/* If it's in the cache, just go on to next one. */
		if (incore(vp, rablks[i]))
			continue;

		/* Get a buffer for the read-ahead block */
		(void) bio_doread(vp, rablks[i], rasizes[i], B_ASYNC);
	}

	/* Otherwise, we had to start a read for it; wait until it's valid. */
	return (biowait(bp));
}
Exemplo n.º 8
0
int
ext2_bmaparray(struct vnode *vp, daddr_t bn, daddr_t *bnp, int *runp, int *runb)
{
	struct inode *ip;
	struct buf *bp;
	struct ext2mount *ump;
	struct mount *mp;
	struct vnode *devvp;
	struct indir a[NIADDR+1], *ap;
	daddr_t daddr;
	e2fs_lbn_t metalbn;
	int error, num, maxrun = 0, bsize;
	int *nump;

	ap = NULL;
	ip = VTOI(vp);
	mp = vp->v_mount;
	ump = VFSTOEXT2(mp);
	devvp = ump->um_devvp;

	bsize = EXT2_BLOCK_SIZE(ump->um_e2fs);

	if (runp) {
		maxrun = mp->mnt_iosize_max / bsize - 1;
		*runp = 0;
	}

	if (runb) {
		*runb = 0;
	}


	ap = a;
	nump = &num;
	error = ext2_getlbns(vp, bn, ap, nump);
	if (error)
		return (error);

	num = *nump;
	if (num == 0) {
		*bnp = blkptrtodb(ump, ip->i_db[bn]);
		if (*bnp == 0) {
			*bnp = -1;
		} else if (runp) {
			daddr_t bnb = bn;
			for (++bn; bn < NDADDR && *runp < maxrun &&
			    is_sequential(ump, ip->i_db[bn - 1], ip->i_db[bn]);
			    ++bn, ++*runp);
			bn = bnb;
			if (runb && (bn > 0)) {
				for (--bn; (bn >= 0) && (*runb < maxrun) &&
					is_sequential(ump, ip->i_db[bn],
						ip->i_db[bn + 1]);
						--bn, ++*runb);
			}
		}
		return (0);
	}


	/* Get disk address out of indirect block array */
	daddr = ip->i_ib[ap->in_off];

	for (bp = NULL, ++ap; --num; ++ap) {
		/*
		 * Exit the loop if there is no disk address assigned yet and
		 * the indirect block isn't in the cache, or if we were
		 * looking for an indirect block and we've found it.
		 */

		metalbn = ap->in_lbn;
		if ((daddr == 0 && !incore(&vp->v_bufobj, metalbn)) || metalbn == bn)
			break;
		/*
		 * If we get here, we've either got the block in the cache
		 * or we have a disk address for it, go fetch it.
		 */
		if (bp)
			bqrelse(bp);

		bp = getblk(vp, metalbn, bsize, 0, 0, 0);
		if ((bp->b_flags & B_CACHE) == 0) {
#ifdef INVARIANTS
			if (!daddr)
				panic("ext2_bmaparray: indirect block not in cache");
#endif
			bp->b_blkno = blkptrtodb(ump, daddr);
			bp->b_iocmd = BIO_READ;
			bp->b_flags &= ~B_INVAL;
			bp->b_ioflags &= ~BIO_ERROR;
			vfs_busy_pages(bp, 0);
			bp->b_iooffset = dbtob(bp->b_blkno);
			bstrategy(bp);
			curthread->td_ru.ru_inblock++;
			error = bufwait(bp);
			if (error) {
				brelse(bp);
				return (error);
			}
		}

		daddr = ((e2fs_daddr_t *)bp->b_data)[ap->in_off];
		if (num == 1 && daddr && runp) {
			for (bn = ap->in_off + 1;
			    bn < MNINDIR(ump) && *runp < maxrun &&
			    is_sequential(ump,
			    ((e2fs_daddr_t *)bp->b_data)[bn - 1],
			    ((e2fs_daddr_t *)bp->b_data)[bn]);
			    ++bn, ++*runp);
			bn = ap->in_off;
			if (runb && bn) {
				for (--bn; bn >= 0 && *runb < maxrun &&
					is_sequential(ump,
					((e2fs_daddr_t *)bp->b_data)[bn],
					((e2fs_daddr_t *)bp->b_data)[bn + 1]);
					--bn, ++*runb);
			}
		}
	}
	if (bp)
		bqrelse(bp);

	/*
	 * Since this is FFS independent code, we are out of scope for the
	 * definitions of BLK_NOCOPY and BLK_SNAP, but we do know that they
	 * will fall in the range 1..um_seqinc, so we use that test and
	 * return a request for a zeroed out buffer if attempts are made
	 * to read a BLK_NOCOPY or BLK_SNAP block.
	 */
	if ((ip->i_flags & SF_SNAPSHOT) && daddr > 0 && daddr < ump->um_seqinc){
		*bnp = -1;
		return (0);
	}
	*bnp = blkptrtodb(ump, daddr);
	if (*bnp == 0) {
		*bnp = -1;
	}
	return (0);
}
Exemplo n.º 9
0
/*
 * Indirect blocks are now on the vnode for the file.  They are given negative
 * logical block numbers.  Indirect blocks are addressed by the negative
 * address of the first data block to which they point.  Double indirect blocks
 * are addressed by one less than the address of the first indirect block to
 * which they point.  Triple indirect blocks are addressed by one less than
 * the address of the first double indirect block to which they point.
 *
 * ufs_bmaparray does the bmap conversion, and if requested returns the
 * array of logical blocks which must be traversed to get to a block.
 * Each entry contains the offset into that block that gets you to the
 * next block and the disk address of the block (if it is assigned).
 */
int
ufs_bmaparray(struct vnode *vp, daddr64_t bn, daddr64_t *bnp, struct indir *ap,
    int *nump, int *runp)
{
	struct inode *ip;
	struct buf *bp;
	struct ufsmount *ump;
	struct mount *mp;
	struct vnode *devvp;
	struct indir a[NIADDR+1], *xap;
	daddr64_t daddr, metalbn;
	int error, maxrun = 0, num;

	ip = VTOI(vp);
	mp = vp->v_mount;
	ump = VFSTOUFS(mp);
#ifdef DIAGNOSTIC
	if ((ap != NULL && nump == NULL) || (ap == NULL && nump != NULL))
		panic("ufs_bmaparray: invalid arguments");
#endif

	if (runp) {
		/*
		 * XXX
		 * If MAXBSIZE is the largest transfer the disks can handle,
		 * we probably want maxrun to be 1 block less so that we
		 * don't create a block larger than the device can handle.
		 */
		*runp = 0;
		maxrun = MAXBSIZE / mp->mnt_stat.f_iosize - 1;
	}

	xap = ap == NULL ? a : ap;
	if (!nump)
		nump = &num;
	if ((error = ufs_getlbns(vp, bn, xap, nump)) != 0)
		return (error);

	num = *nump;
	if (num == 0) {
		*bnp = blkptrtodb(ump, DIP(ip, db[bn]));
		if (*bnp == 0)
			*bnp = -1;
		else if (runp)
			for (++bn; bn < NDADDR && *runp < maxrun &&
			    is_sequential(ump, DIP(ip, db[bn - 1]),
			        DIP(ip, db[bn]));
			    ++bn, ++*runp);
		return (0);
	}


	/* Get disk address out of indirect block array */
	daddr = DIP(ip, ib[xap->in_off]);

	devvp = VFSTOUFS(vp->v_mount)->um_devvp;
	for (bp = NULL, ++xap; --num; ++xap) {
		/* 
		 * Exit the loop if there is no disk address assigned yet and
		 * the indirect block isn't in the cache, or if we were
		 * looking for an indirect block and we've found it.
		 */

		metalbn = xap->in_lbn;
		if ((daddr == 0 && !incore(vp, metalbn)) || metalbn == bn)
			break;
		/*
		 * If we get here, we've either got the block in the cache
		 * or we have a disk address for it, go fetch it.
		 */
		if (bp)
			brelse(bp);

		xap->in_exists = 1;
		bp = getblk(vp, metalbn, mp->mnt_stat.f_iosize, 0, 0);
		if (bp->b_flags & (B_DONE | B_DELWRI)) {
			;
		}
#ifdef DIAGNOSTIC
		else if (!daddr)
			panic("ufs_bmaparray: indirect block not in cache");
#endif
		else {
			bp->b_blkno = blkptrtodb(ump, daddr);
			bp->b_flags |= B_READ;
			bcstats.pendingreads++;
			bcstats.numreads++;
			VOP_STRATEGY(bp);
			curproc->p_ru.ru_inblock++;		/* XXX */
			if ((error = biowait(bp)) != 0) {
				brelse(bp);
				return (error);
			}
		}

#ifdef FFS2
		if (ip->i_ump->um_fstype == UM_UFS2) {
			daddr = ((int64_t *)bp->b_data)[xap->in_off];
			if (num == 1 && daddr && runp)
				for (bn = xap->in_off + 1;
				    bn < MNINDIR(ump) && *runp < maxrun &&
				    is_sequential(ump,
					((int64_t *)bp->b_data)[bn - 1],
					((int64_t *)bp->b_data)[bn]);
				    ++bn, ++*runp);

                        continue;
		}

#endif /* FFS2 */

		daddr = ((int32_t *)bp->b_data)[xap->in_off];
		if (num == 1 && daddr && runp)
			for (bn = xap->in_off + 1;
			    bn < MNINDIR(ump) && *runp < maxrun &&
			    is_sequential(ump,
				((int32_t *)bp->b_data)[bn - 1],
				((int32_t *)bp->b_data)[bn]);
			    ++bn, ++*runp);
	}
	if (bp)
		brelse(bp);

	daddr = blkptrtodb(ump, daddr);
	*bnp = daddr == 0 ? -1 : daddr;
	return (0);
}
Exemplo n.º 10
0
int
ffs_fsync(void *v)
{
	struct vop_fsync_args /* {
		struct vnode *a_vp;
		kauth_cred_t a_cred;
		int a_flags;
		off_t a_offlo;
		off_t a_offhi;
		struct lwp *a_l;
	} */ *ap = v;
	struct buf *bp;
	int num, error, i;
	struct indir ia[NIADDR + 1];
	int bsize;
	daddr_t blk_high;
	struct vnode *vp;
	struct mount *mp;

	vp = ap->a_vp;
	mp = vp->v_mount;

	fstrans_start(mp, FSTRANS_LAZY);
	if ((ap->a_offlo == 0 && ap->a_offhi == 0) || (vp->v_type != VREG)) {
		error = ffs_full_fsync(vp, ap->a_flags);
		goto out;
	}

	bsize = mp->mnt_stat.f_iosize;
	blk_high = ap->a_offhi / bsize;
	if (ap->a_offhi % bsize != 0)
		blk_high++;

	/*
	 * First, flush all pages in range.
	 */

	mutex_enter(vp->v_interlock);
	error = VOP_PUTPAGES(vp, trunc_page(ap->a_offlo),
	    round_page(ap->a_offhi), PGO_CLEANIT |
	    ((ap->a_flags & FSYNC_WAIT) ? PGO_SYNCIO : 0));
	if (error) {
		goto out;
	}

#ifdef WAPBL
	KASSERT(vp->v_type == VREG);
	if (mp->mnt_wapbl) {
		/*
		 * Don't bother writing out metadata if the syncer is
		 * making the request.  We will let the sync vnode
		 * write it out in a single burst through a call to
		 * VFS_SYNC().
		 */
		if ((ap->a_flags & (FSYNC_DATAONLY | FSYNC_LAZY)) != 0) {
			fstrans_done(mp);
			return 0;
		}
		error = 0;
		if (vp->v_tag == VT_UFS && VTOI(vp)->i_flag &
		    (IN_ACCESS | IN_CHANGE | IN_UPDATE | IN_MODIFY |
				 IN_MODIFIED | IN_ACCESSED)) {
			error = UFS_WAPBL_BEGIN(mp);
			if (error) {
				fstrans_done(mp);
				return error;
			}
			error = ffs_update(vp, NULL, NULL, UPDATE_CLOSE |
			    ((ap->a_flags & FSYNC_WAIT) ? UPDATE_WAIT : 0));
			UFS_WAPBL_END(mp);
		}
		if (error || (ap->a_flags & FSYNC_NOLOG) != 0) {
			fstrans_done(mp);
			return error;
		}
		error = wapbl_flush(mp->mnt_wapbl, 0);
		fstrans_done(mp);
		return error;
	}
#endif /* WAPBL */

	/*
	 * Then, flush indirect blocks.
	 */

	if (blk_high >= NDADDR) {
		error = ufs_getlbns(vp, blk_high, ia, &num);
		if (error)
			goto out;

		mutex_enter(&bufcache_lock);
		for (i = 0; i < num; i++) {
			if ((bp = incore(vp, ia[i].in_lbn)) == NULL)
				continue;
			if ((bp->b_cflags & BC_BUSY) != 0 ||
			    (bp->b_oflags & BO_DELWRI) == 0)
				continue;
			bp->b_cflags |= BC_BUSY | BC_VFLUSH;
			mutex_exit(&bufcache_lock);
			bawrite(bp);
			mutex_enter(&bufcache_lock);
		}
		mutex_exit(&bufcache_lock);
	}

	if (ap->a_flags & FSYNC_WAIT) {
		mutex_enter(vp->v_interlock);
		while (vp->v_numoutput > 0)
			cv_wait(&vp->v_cv, vp->v_interlock);
		mutex_exit(vp->v_interlock);
	}

	error = ffs_update(vp, NULL, NULL, UPDATE_CLOSE |
	    (((ap->a_flags & (FSYNC_WAIT | FSYNC_DATAONLY)) == FSYNC_WAIT)
	    ? UPDATE_WAIT : 0));

	if (error == 0 && ap->a_flags & FSYNC_CACHE) {
		int l = 0;
		VOP_IOCTL(VTOI(vp)->i_devvp, DIOCCACHESYNC, &l, FWRITE,
			curlwp->l_cred);
	}

out:
	fstrans_done(mp);
	return error;
}
Exemplo n.º 11
0
/*
 * Allocate a buffer.
 */
struct buf *
buf_get(struct vnode *vp, daddr_t blkno, size_t size)
{
	struct buf *bp;
	int poolwait = size == 0 ? PR_NOWAIT : PR_WAITOK;
	int npages;
	int s;

	s = splbio();
	if (size) {
		/*
		 * Wake up the cleaner if we have lots of dirty pages,
		 * or if we are getting low on buffer cache kva.
		 */
		if (UNCLEAN_PAGES >= hidirtypages ||
			bcstats.kvaslots_avail <= 2 * RESERVE_SLOTS)
			wakeup(&bd_req);

		npages = atop(round_page(size));

		/*
		 * if our cache has been previously shrunk,
		 * allow it to grow again with use up to
		 * bufhighpages (cachepercent)
		 */
		if (bufpages < bufhighpages)
			bufadjust(bufhighpages);

		/*
		 * If would go over the page target with our
		 * new allocation, free enough buffers first
		 * to stay at the target with our new allocation.
		 */
		while ((bcstats.numbufpages + npages > targetpages) &&
		    (bp = bufcache_getcleanbuf())) {
			bufcache_take(bp);
			if (bp->b_vp) {
				RB_REMOVE(buf_rb_bufs,
				    &bp->b_vp->v_bufs_tree, bp);
				brelvp(bp);
			}
			buf_put(bp);
		}

		/*
		 * If we get here, we tried to free the world down
		 * above, and couldn't get down - Wake the cleaner
		 * and wait for it to push some buffers out.
		 */
		if ((bcstats.numbufpages + npages > targetpages ||
		    bcstats.kvaslots_avail <= RESERVE_SLOTS) &&
		    curproc != syncerproc && curproc != cleanerproc) {
			wakeup(&bd_req);
			needbuffer++;
			tsleep(&needbuffer, PRIBIO, "needbuffer", 0);
			splx(s);
			return (NULL);
		}
		if (bcstats.numbufpages + npages > bufpages) {
			/* cleaner or syncer */
			nobuffers = 1;
			tsleep(&nobuffers, PRIBIO, "nobuffers", 0);
			splx(s);
			return (NULL);
		}
	}

	bp = pool_get(&bufpool, poolwait|PR_ZERO);

	if (bp == NULL) {
		splx(s);
		return (NULL);
	}

	bp->b_freelist.tqe_next = NOLIST;
	bp->b_dev = NODEV;
	LIST_INIT(&bp->b_dep);
	bp->b_bcount = size;

	buf_acquire_nomap(bp);

	if (vp != NULL) {
		/*
		 * We insert the buffer into the hash with B_BUSY set
		 * while we allocate pages for it. This way any getblk
		 * that happens while we allocate pages will wait for
		 * this buffer instead of starting its own guf_get.
		 *
		 * But first, we check if someone beat us to it.
		 */
		if (incore(vp, blkno)) {
			pool_put(&bufpool, bp);
			splx(s);
			return (NULL);
		}

		bp->b_blkno = bp->b_lblkno = blkno;
		bgetvp(vp, bp);
		if (RB_INSERT(buf_rb_bufs, &vp->v_bufs_tree, bp))
			panic("buf_get: dup lblk vp %p bp %p", vp, bp);
	} else {
		bp->b_vnbufs.le_next = NOLIST;
		SET(bp->b_flags, B_INVAL);
		bp->b_vp = NULL;
	}

	LIST_INSERT_HEAD(&bufhead, bp, b_list);
	bcstats.numbufs++;

	if (size) {
		buf_alloc_pages(bp, round_page(size));
		buf_map(bp);
	}

	splx(s);

	return (bp);
}
Exemplo n.º 12
0
int
ext2fs_bmaparray(struct vnode *vp,
#undef struct
                 daddr_t bn, daddr_t *bnp, struct indir *ap,
		int *nump, int *runp)
{
	struct inode *ip;
	struct buf *bp, *cbp;
#define struct
//	struct ufsmount *ump;
	struct mount *mp;
#undef struct
	struct indir a[NIADDR+1], *xap;
	daddr_t daddr;
	daddr_t metalbn;
	int error, maxrun = 0, num;

	ip = VTOI(vp);
	mp = EXT2_SIMPLE_FILE_SYSTEM_PRIVATE_DATA_FROM_THIS(vp->Filesystem);
//	mp = vp->v_mount; !!!! need to fix this badly!
//	ump = ip->i_ump; NEED TO DO SOMETHING ABOUT ufsmount
#ifdef DIAGNOSTIC
	if ((ap != NULL && nump == NULL) || (ap == NULL && nump != NULL))
		panic("ext2fs_bmaparray: invalid arguments");
#endif

	if (runp) {
		/*
		 * XXX
		 * If MAXBSIZE is the largest transfer the disks can handle,
		 * we probably want maxrun to be 1 block less so that we
		 * don't create a block larger than the device can handle.
		 */
		*runp = 0;
		maxrun = MAXBSIZE / //mp->mnt_stat.f_iosize - 1; NEEDS FIX!!!
				      mp->fs->e2fs_bsize - 1;
	}

	if (bn >= 0 && bn < NDADDR) {
		/* XXX ondisk32 */
		*bnp = blkptrtodb(ump, fs2h32(ip->i_e2fs_blocks[bn]));
		if (*bnp == 0)
			*bnp = -1;
		else if (runp)
			/* XXX ondisk32 */
			for (++bn; bn < NDADDR && *runp < maxrun &&
				is_sequential(ump, (daddr_t)fs2h32(ip->i_e2fs_blocks[bn - 1]),
							  (daddr_t)fs2h32(ip->i_e2fs_blocks[bn]));
				++bn, ++*runp);
		return (0);
	}

	xap = ap == NULL ? a : ap;
	if (!nump)
		nump = &num;
	if ((error = ufs_getlbns(vp, bn, xap, nump)) != 0)
		return (error);

	num = *nump;

	/* Get disk address out of indirect block array */
	/* XXX ondisk32 */
	daddr = fs2h32(ip->i_e2fs_blocks[NDADDR + xap->in_off]);

#ifdef DIAGNOSTIC
    if (num > NIADDR + 1 || num < 1) {
		printf("ext2fs_bmaparray: num=%d\n", num);
		panic("ext2fs_bmaparray: num");
	}
#endif
	for (bp = NULL, ++xap; --num; ++xap) {
		/*
		 * Exit the loop if there is no disk address assigned yet and
		 * the indirect block isn't in the cache, or if we were
		 * looking for an indirect block and we've found it.
		 */

		metalbn = xap->in_lbn;
		if (metalbn == bn)
			break;
		if (daddr == 0) {
			mutex_enter(&bufcache_lock);
			cbp = incore(vp, metalbn);
			mutex_exit(&bufcache_lock);
			if (cbp == NULL)
				break;
		}
		/*
		 * If we get here, we've either got the block in the cache
		 * or we have a disk address for it, go fetch it.
		 */
		if (bp)
			brelse(bp, 0);

		xap->in_exists = 1;
		//!!!!!!!!!!!!!!replaced 3rd param with 1 ftw
		bp = getblk(vp, metalbn, 1, 0, 0);
		if (bp == NULL) {

			/*
			 * getblk() above returns NULL only iff we are
			 * pagedaemon.  See the implementation of getblk
			 * for detail.
			 */

			 return (ENOMEM);
		}
		if (bp->b_oflags & (BO_DONE | BO_DELWRI)) {
			trace(TR_BREADHIT, pack(vp, size), metalbn);
		}
#ifdef DIAGNOSTIC
		else if (!daddr)
			panic("ext2fs_bmaparry: indirect block not in cache");
#endif
		else {
			trace(TR_BREADMISS, pack(vp, size), metalbn);
			bp->b_blkno = blkptrtodb(ump, daddr);
			bp->b_flags |= B_READ;
			VOP_STRATEGY(vp, bp);
//			curlwp->l_ru.ru_inblock++;	*//* XXX */
			if ((error = biowait(bp)) != 0) {
				brelse(bp, 0);
				return (error);
			}
		}

		/* XXX ondisk32 */
		daddr = fs2h32(((int32_t *)bp->b_data)[xap->in_off]);
		if (num == 1 && daddr && runp)
			/* XXX ondisk32 */
			for (bn = xap->in_off + 1;
				bn < MNINDIR(ump) && *runp < maxrun &&
				is_sequential(ump, ((int32_t *)bp->b_data)[bn - 1],
				((int32_t *)bp->b_data)[bn]);
				++bn, ++*runp);
	}
	if (bp)
		brelse(bp, 0);

	daddr = blkptrtodb(ump, daddr);
	*bnp = daddr == 0 ? -1 : daddr;
	return (0);
}