예제 #1
0
/*
 * Release the buffer, start I/O on it, but don't wait for completion.
 */
void
bawrite(struct buf *bp)
{
	ASSERT(SEMA_HELD(&bp->b_sem));

	/* Use bfreelist.b_bcount as a weird-ass heuristic */
	if (bfreelist.b_bcount > 4)
		bp->b_flags |= B_ASYNC;
	BWRITE(bp);
}
예제 #2
0
/*
 * Ensure that a specified block is up-to-date on disk.
 */
void
blkflush(dev_t dev, daddr_t blkno)
{
	struct buf *bp, *dp;
	struct hbuf *hp;
	struct buf *sbp = NULL;
	uint_t index;
	kmutex_t *hmp;

	index = bio_bhash(dev, blkno);
	hp    = &hbuf[index];
	dp    = (struct buf *)hp;
	hmp   = &hp->b_lock;

	/*
	 * Identify the buffer in the cache belonging to
	 * this device and blkno (if any).
	 */
	mutex_enter(hmp);
	for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) {
		if (bp->b_blkno != blkno || bp->b_edev != dev ||
		    (bp->b_flags & B_STALE))
			continue;
		sbp = bp;
		break;
	}
	mutex_exit(hmp);
	if (sbp == NULL)
		return;
	/*
	 * Now check the buffer we have identified and
	 * make sure it still belongs to the device and is B_DELWRI
	 */
	sema_p(&sbp->b_sem);
	if (sbp->b_blkno == blkno && sbp->b_edev == dev &&
	    (sbp->b_flags & (B_DELWRI|B_STALE)) == B_DELWRI) {
		mutex_enter(hmp);
		hp->b_length--;
		notavail(sbp);
		mutex_exit(hmp);
		/*
		 * XXX - There is nothing to guarantee a synchronous
		 * write here if the B_ASYNC flag is set.  This needs
		 * some investigation.
		 */
		if (sbp->b_vp == NULL) {		/* !ufs */
			BWRITE(sbp);	/* synchronous write */
		} else {				/* ufs */
			UFS_BWRITE(VTOI(sbp->b_vp)->i_ufsvfs, sbp);
		}
	} else {
		sema_v(&sbp->b_sem);
	}
}
int hapsLocalLocus::write(FILE *fp)
{
	if (localLocus::write(fp)==0)
		return 0;
	BWRITE(rsName,fp);
}
예제 #4
0
/*
 * As part of file system hardening, this daemon is awakened
 * every second to flush cached data which includes the
 * buffer cache, the inode cache and mapped pages.
 */
void
fsflush()
{
	struct buf *bp, *dwp;
	struct hbuf *hp;
	int autoup;
	unsigned int ix, icount, count = 0;
	callb_cpr_t cprinfo;
	uint_t		bcount;
	kmutex_t	*hmp;
	struct vfssw *vswp;

	proc_fsflush = ttoproc(curthread);
	proc_fsflush->p_cstime = 0;
	proc_fsflush->p_stime =  0;
	proc_fsflush->p_cutime =  0;
	proc_fsflush->p_utime = 0;
	bcopy("fsflush", curproc->p_user.u_psargs, 8);
	bcopy("fsflush", curproc->p_user.u_comm, 7);

	mutex_init(&fsflush_lock, NULL, MUTEX_DEFAULT, NULL);
	sema_init(&fsflush_sema, 0, NULL, SEMA_DEFAULT, NULL);

	/*
	 * Setup page coalescing.
	 */
	fsf_npgsz = page_num_pagesizes();
	ASSERT(fsf_npgsz < MAX_PAGESIZES);
	for (ix = 0; ix < fsf_npgsz - 1; ++ix) {
		fsf_pgcnt[ix] =
		    page_get_pagesize(ix + 1) / page_get_pagesize(ix);
		fsf_mask[ix] = page_get_pagecnt(ix + 1) - 1;
	}

	autoup = v.v_autoup * hz;
	icount = v.v_autoup / tune.t_fsflushr;
	CALLB_CPR_INIT(&cprinfo, &fsflush_lock, callb_generic_cpr, "fsflush");
loop:
	sema_v(&fsflush_sema);
	mutex_enter(&fsflush_lock);
	CALLB_CPR_SAFE_BEGIN(&cprinfo);
	cv_wait(&fsflush_cv, &fsflush_lock);		/* wait for clock */
	CALLB_CPR_SAFE_END(&cprinfo, &fsflush_lock);
	mutex_exit(&fsflush_lock);
	sema_p(&fsflush_sema);

	/*
	 * Write back all old B_DELWRI buffers on the freelist.
	 */
	bcount = 0;
	for (ix = 0; ix < v.v_hbuf; ix++) {

		hp = &hbuf[ix];
		dwp = (struct buf *)&dwbuf[ix];

		bcount += (hp->b_length);

		if (dwp->av_forw == dwp) {
			continue;
		}

		hmp = &hbuf[ix].b_lock;
		mutex_enter(hmp);
		bp = dwp->av_forw;

		/*
		 * Go down only on the delayed write lists.
		 */
		while (bp != dwp) {

			ASSERT(bp->b_flags & B_DELWRI);

			if ((bp->b_flags & B_DELWRI) &&
			    (ddi_get_lbolt() - bp->b_start >= autoup) &&
			    sema_tryp(&bp->b_sem)) {
				bp->b_flags |= B_ASYNC;
				hp->b_length--;
				notavail(bp);
				mutex_exit(hmp);
				if (bp->b_vp == NULL) {
					BWRITE(bp);
				} else {
					UFS_BWRITE(VTOI(bp->b_vp)->i_ufsvfs,
					    bp);
				}
				mutex_enter(hmp);
				bp = dwp->av_forw;
			} else {
				bp = bp->av_forw;
			}
		}
		mutex_exit(hmp);
	}

	/*
	 *
	 * There is no need to wakeup any thread waiting on bio_mem_cv
	 * since brelse will wake them up as soon as IO is complete.
	 */
	bfreelist.b_bcount = bcount;

	if (dopageflush)
		fsflush_do_pages();

	if (!doiflush)
		goto loop;

	/*
	 * If the system was not booted to single user mode, skip the
	 * inode flushing until after fsflush_iflush_delay secs have elapsed.
	 */
	if ((boothowto & RB_SINGLE) == 0 &&
	    (ddi_get_lbolt64() / hz) < fsflush_iflush_delay)
		goto loop;

	/*
	 * Flush cached attribute information (e.g. inodes).
	 */
	if (++count >= icount) {
		count = 0;

		/*
		 * Sync back cached data.
		 */
		RLOCK_VFSSW();
		for (vswp = &vfssw[1]; vswp < &vfssw[nfstype]; vswp++) {
			if (ALLOCATED_VFSSW(vswp) && VFS_INSTALLED(vswp)) {
				vfs_refvfssw(vswp);
				RUNLOCK_VFSSW();
				(void) fsop_sync_by_kind(vswp - vfssw,
				    SYNC_ATTR, kcred);
				vfs_unrefvfssw(vswp);
				RLOCK_VFSSW();
			}
		}
		RUNLOCK_VFSSW();
	}
	goto loop;
}
예제 #5
0
/*
 * Make sure all write-behind blocks on dev (or NODEV for all)
 * are flushed out.
 */
void
bflush(dev_t dev)
{
	struct buf *bp, *dp;
	struct hbuf *hp;
	struct buf *delwri_list = EMPTY_LIST;
	int i, index;
	kmutex_t *hmp;

	mutex_enter(&blist_lock);
	/*
	 * Wait for any invalidates or flushes ahead of us to finish.
	 * We really could split blist_lock up per device for better
	 * parallelism here.
	 */
	while (bio_doinginval || bio_doingflush) {
		bio_flinv_cv_wanted = 1;
		cv_wait(&bio_flushinval_cv, &blist_lock);
	}
	bio_doingflush++;
	/*
	 * Gather all B_DELWRI buffer for device.
	 * Lock ordering is b_sem > hash lock (brelse).
	 * Since we are finding the buffer via the delayed write list,
	 * it may be busy and we would block trying to get the
	 * b_sem lock while holding hash lock. So transfer all the
	 * candidates on the delwri_list and then drop the hash locks.
	 */
	for (i = 0; i < v.v_hbuf; i++) {
		vfs_syncprogress();
		hmp = &hbuf[i].b_lock;
		dp = (struct buf *)&dwbuf[i];
		mutex_enter(hmp);
		for (bp = dp->av_forw; bp != dp; bp = bp->av_forw) {
			if (dev == NODEV || bp->b_edev == dev) {
				if (bp->b_list == NULL) {
					bp->b_list = delwri_list;
					delwri_list = bp;
				}
			}
		}
		mutex_exit(hmp);
	}
	mutex_exit(&blist_lock);

	/*
	 * Now that the hash locks have been dropped grab the semaphores
	 * and write back all the buffers that have B_DELWRI set.
	 */
	while (delwri_list != EMPTY_LIST) {
		vfs_syncprogress();
		bp = delwri_list;

		sema_p(&bp->b_sem);	/* may block */
		if ((dev != bp->b_edev && dev != NODEV) ||
		    (panicstr && bp->b_flags & B_BUSY)) {
			sema_v(&bp->b_sem);
			delwri_list = bp->b_list;
			bp->b_list = NULL;
			continue;	/* No longer a candidate */
		}
		if (bp->b_flags & B_DELWRI) {
			index = bio_bhash(bp->b_edev, bp->b_blkno);
			hp = &hbuf[index];
			hmp = &hp->b_lock;
			dp = (struct buf *)hp;

			bp->b_flags |= B_ASYNC;
			mutex_enter(hmp);
			hp->b_length--;
			notavail(bp);
			mutex_exit(hmp);
			if (bp->b_vp == NULL) {		/* !ufs */
				BWRITE(bp);
			} else {			/* ufs */
				UFS_BWRITE(VTOI(bp->b_vp)->i_ufsvfs, bp);
			}
		} else {
			sema_v(&bp->b_sem);
		}
		delwri_list = bp->b_list;
		bp->b_list = NULL;
	}
	mutex_enter(&blist_lock);
	bio_doingflush--;
	if (bio_flinv_cv_wanted) {
		bio_flinv_cv_wanted = 0;
		cv_broadcast(&bio_flushinval_cv);
	}
	mutex_exit(&blist_lock);
}
예제 #6
0
/*
 * Write the buffer, waiting for completion (unless B_ASYNC is set).
 * Then release the buffer.
 * This interface is provided for binary compatibility.  Using
 * BWRITE() directly avoids the extra function call overhead invoked
 * by calling this routine.
 */
void
bwrite(struct buf *bp)
{
	BWRITE(bp);
}