/* ARGSUSED */ int mfs_start(struct mount *mp, int flags, struct proc *p) { struct vnode *vp = VFSTOUFS(mp)->um_devvp; struct mfsnode *mfsp = VTOMFS(vp); struct buf *bp; caddr_t base; int sleepreturn = 0; base = mfsp->mfs_baseoff; while (mfsp->mfs_buflist != (struct buf *)-1) { while ((bp = mfsp->mfs_buflist) != NULL) { mfsp->mfs_buflist = bp->b_actf; mfs_doio(bp, base); wakeup((caddr_t)bp); } /* * If a non-ignored signal is received, try to unmount. * If that fails, clear the signal (it has been "processed"), * otherwise we will loop here, as tsleep will always return * EINTR/ERESTART. */ if (sleepreturn != 0) { if (vfs_busy(mp, VB_WRITE|VB_NOWAIT) || dounmount(mp, 0, p, NULL)) CLRSIG(p, CURSIG(p)); sleepreturn = 0; continue; } sleepreturn = tsleep((caddr_t)vp, mfs_pri, "mfsidl", 0); } return (0); }
/* * Pass I/O requests to the memory filesystem process. */ int mfs_strategy(void *v) { struct vop_strategy_args *ap = v; struct buf *bp = ap->a_bp; struct mfsnode *mfsp; struct vnode *vp; struct proc *p = curproc; int s; if (!vfinddev(bp->b_dev, VBLK, &vp) || vp->v_usecount == 0) panic("mfs_strategy: bad dev"); mfsp = VTOMFS(vp); if (p != NULL && mfsp->mfs_pid == p->p_pid) { mfs_doio(mfsp, bp); } else { s = splbio(); bp->b_actf = mfsp->mfs_buflist; mfsp->mfs_buflist = bp; splx(s); wakeup((caddr_t)vp); } return (0); }
int mfsstrategy(struct dev_strategy_args *ap) { cdev_t dev = ap->a_head.a_dev; struct bio *bio = ap->a_bio; struct buf *bp = bio->bio_buf; off_t boff = bio->bio_offset; off_t eoff = boff + bp->b_bcount; struct mfsnode *mfsp; if ((mfsp = dev->si_drv1) == NULL) { bp->b_error = ENXIO; goto error; } if (boff < 0) goto bad; if (eoff > mfsp->mfs_size) { if (boff > mfsp->mfs_size || (bp->b_flags & B_BNOCLIP)) goto bad; /* * Return EOF by completing the I/O with 0 bytes transfered. * Set B_INVAL to indicate that any data in the buffer is not * valid. */ if (boff == mfsp->mfs_size) { bp->b_resid = bp->b_bcount; bp->b_flags |= B_INVAL; goto done; } bp->b_bcount = mfsp->mfs_size - boff; } /* * Initiate I/O */ if (mfsp->mfs_td == curthread) { mfs_doio(bio, mfsp); } else { bioq_insert_tail(&mfsp->bio_queue, bio); wakeup((caddr_t)mfsp); } return(0); /* * Failure conditions on bio */ bad: bp->b_error = EINVAL; error: bp->b_flags |= B_ERROR | B_INVAL; done: biodone(bio); return(0); }
/* ARGSUSED */ int mfs_close(void *v) { struct vop_close_args *ap = v; struct vnode *vp = ap->a_vp; struct mfsnode *mfsp = VTOMFS(vp); struct buf *bp; int error, s; /* * Finish any pending I/O requests. */ while (1) { s = splbio(); bp = mfsp->mfs_buflist; if (bp == NULL) { splx(s); break; } mfsp->mfs_buflist = bp->b_actf; splx(s); mfs_doio(mfsp, bp); wakeup((caddr_t)bp); } /* * On last close of a memory filesystem * we must invalidate any in core blocks, so that * we can free up its vnode. */ if ((error = vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p, 0, 0)) != 0) return (error); #ifdef DIAGNOSTIC /* * There should be no way to have any more uses of this * vnode, so if we find any other uses, it is a panic. */ if (vp->v_usecount > 1) printf("mfs_close: ref count %d > 1\n", vp->v_usecount); if (mfsp->mfs_buflist) printf("mfs_close: dirty buffers\n"); if (vp->v_usecount > 1 || mfsp->mfs_buflist) panic("mfs_close"); #endif /* * Send a request to the filesystem server to exit. */ mfsp->mfs_buflist = (struct buf *)(-1); wakeup((caddr_t)vp); return (0); }
/* ARGSUSED */ int mfs_close(void *v) { struct vop_close_args /* { struct vnode *a_vp; int a_fflag; kauth_cred_t a_cred; } */ *ap = v; struct vnode *vp = ap->a_vp; struct mfsnode *mfsp = VTOMFS(vp); struct buf *bp; int error; /* * Finish any pending I/O requests. */ mutex_enter(&mfs_lock); while ((bp = bufq_get(mfsp->mfs_buflist)) != NULL) { mutex_exit(&mfs_lock); mfs_doio(bp, mfsp->mfs_baseoff); mutex_enter(&mfs_lock); } mutex_exit(&mfs_lock); /* * On last close of a memory filesystem * we must invalidate any in core blocks, so that * we can, free up its vnode. */ if ((error = vinvalbuf(vp, V_SAVE, ap->a_cred, curlwp, 0, 0)) != 0) return (error); /* * There should be no way to have any more uses of this * vnode, so if we find any other uses, it is a panic. */ if (bufq_peek(mfsp->mfs_buflist) != NULL) panic("mfs_close"); /* * Send a request to the filesystem server to exit. */ mutex_enter(&mfs_lock); mfsp->mfs_shutdown = 1; cv_broadcast(&mfsp->mfs_cv); mutex_exit(&mfs_lock); return (0); }
/* * Pass I/O requests to the memory filesystem process. */ int mfs_strategy(void *v) { struct vop_strategy_args /* { struct vnode *a_vp; struct buf *a_bp; } */ *ap = v; struct vnode *vp = ap->a_vp; struct buf *bp = ap->a_bp; struct mfsnode *mfsp; if (vp->v_type != VBLK || vp->v_usecount == 0) panic("mfs_strategy: bad dev"); mfsp = VTOMFS(vp); /* check for mini-root access */ if (mfsp->mfs_proc == NULL) { void *base; base = (char *)mfsp->mfs_baseoff + (bp->b_blkno << DEV_BSHIFT); if (bp->b_flags & B_READ) memcpy(bp->b_data, base, bp->b_bcount); else memcpy(base, bp->b_data, bp->b_bcount); bp->b_resid = 0; biodone(bp); } else if (mfsp->mfs_proc == curproc) { mfs_doio(bp, mfsp->mfs_baseoff); } else if (doing_shutdown) { /* * bitbucket I/O during shutdown. * Note that reads should *not* happen here, but.. */ if (bp->b_flags & B_READ) printf("warning: mfs read during shutdown\n"); bp->b_resid = 0; biodone(bp); } else { mutex_enter(&mfs_lock); bufq_put(mfsp->mfs_buflist, bp); cv_broadcast(&mfsp->mfs_cv); mutex_exit(&mfs_lock); } return (0); }
/* ARGSUSED */ int mfs_close(void *v) { struct vop_close_args *ap = v; struct vnode *vp = ap->a_vp; struct mfsnode *mfsp = VTOMFS(vp); struct buf *bp; int error; /* * Finish any pending I/O requests. */ while (1) { bp = bufq_dequeue(&mfsp->mfs_bufq); if (bp == NULL) break; mfs_doio(mfsp, bp); wakeup(bp); } /* * On last close of a memory filesystem we must invalidate any in * core blocks, so that we can free up its vnode. */ if ((error = vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p, 0, 0)) != 0) return (error); #ifdef DIAGNOSTIC /* * There should be no way to have any more buffers on this vnode. */ if (bufq_peek(&mfsp->mfs_bufq)) printf("mfs_close: dirty buffers\n"); #endif /* * Send a request to the filesystem server to exit. */ mfsp->mfs_shutdown = 1; wakeup(vp); return (0); }
/* * Pass I/O requests to the memory filesystem process. */ int mfs_strategy(void *v) { struct vop_strategy_args *ap = v; struct buf *bp = ap->a_bp; struct mfsnode *mfsp; struct vnode *vp; struct proc *p = curproc; if (!vfinddev(bp->b_dev, VBLK, &vp) || vp->v_usecount == 0) panic("mfs_strategy: bad dev"); mfsp = VTOMFS(vp); if (p != NULL && mfsp->mfs_pid == p->p_pid) { mfs_doio(mfsp, bp); } else { bufq_queue(&mfsp->mfs_bufq, bp); wakeup(vp); } return (0); }
/* ARGSUSED */ int mfs_start(struct mount *mp, int flags, struct proc *p) { struct vnode *vp = VFSTOUFS(mp)->um_devvp; struct mfsnode *mfsp = VTOMFS(vp); struct buf *bp; int sleepreturn = 0; while (1) { while (1) { if (mfsp->mfs_shutdown == 1) break; bp = bufq_dequeue(&mfsp->mfs_bufq); if (bp == NULL) break; mfs_doio(mfsp, bp); wakeup(bp); } if (mfsp->mfs_shutdown == 1) break; /* * If a non-ignored signal is received, try to unmount. * If that fails, clear the signal (it has been "processed"), * otherwise we will loop here, as tsleep will always return * EINTR/ERESTART. */ if (sleepreturn != 0) { if (vfs_busy(mp, VB_WRITE|VB_NOWAIT) || dounmount(mp, (CURSIG(p) == SIGKILL) ? MNT_FORCE : 0, p, NULL)) CLRSIG(p, CURSIG(p)); sleepreturn = 0; continue; } sleepreturn = tsleep((caddr_t)vp, mfs_pri, "mfsidl", 0); } return (0); }
/* ARGSUSED */ int mfs_start(struct mount *mp, int flags) { struct vnode *vp; struct mfsnode *mfsp; struct proc *p; struct buf *bp; void *base; int sleepreturn = 0, refcnt, error; ksiginfoq_t kq; /* * Ensure that file system is still mounted when getting mfsnode. * Add a reference to the mfsnode to prevent it disappearing in * this routine. */ if ((error = vfs_busy(mp, NULL)) != 0) return error; vp = VFSTOUFS(mp)->um_devvp; mfsp = VTOMFS(vp); mutex_enter(&mfs_lock); mfsp->mfs_refcnt++; mutex_exit(&mfs_lock); vfs_unbusy(mp, false, NULL); base = mfsp->mfs_baseoff; mutex_enter(&mfs_lock); while (mfsp->mfs_shutdown != 1) { while ((bp = bufq_get(mfsp->mfs_buflist)) != NULL) { mutex_exit(&mfs_lock); mfs_doio(bp, base); mutex_enter(&mfs_lock); } /* * If a non-ignored signal is received, try to unmount. * If that fails, or the filesystem is already in the * process of being unmounted, clear the signal (it has been * "processed"), otherwise we will loop here, as tsleep * will always return EINTR/ERESTART. */ if (sleepreturn != 0) { mutex_exit(&mfs_lock); if (dounmount(mp, 0, curlwp) != 0) { p = curproc; ksiginfo_queue_init(&kq); mutex_enter(p->p_lock); sigclearall(p, NULL, &kq); mutex_exit(p->p_lock); ksiginfo_queue_drain(&kq); } sleepreturn = 0; mutex_enter(&mfs_lock); continue; } sleepreturn = cv_wait_sig(&mfsp->mfs_cv, &mfs_lock); } KASSERT(bufq_peek(mfsp->mfs_buflist) == NULL); refcnt = --mfsp->mfs_refcnt; mutex_exit(&mfs_lock); if (refcnt == 0) { bufq_free(mfsp->mfs_buflist); cv_destroy(&mfsp->mfs_cv); kmem_free(mfsp, sizeof(*mfsp)); } return (sleepreturn); }