/* * Disable fastpath mode. */ void fifo_fastoff(fifonode_t *fnp) { ASSERT(MUTEX_HELD(&fnp->fn_lock->flk_lock)); ASSERT(FTOV(fnp)->v_stream); /* FIFOSTAYFAST is set => FIFOFAST is set */ while ((fnp->fn_flag & FIFOSTAYFAST) || ((fnp->fn_flag & ISPIPE) && (fnp->fn_dest->fn_flag & FIFOSTAYFAST))) { ASSERT(fnp->fn_flag & FIFOFAST); /* indicate someone is waiting to turn into stream mode */ fnp->fn_flag |= FIFOWAITMODE; cv_wait(&fnp->fn_wait_cv, &fnp->fn_lock->flk_lock); fnp->fn_flag &= ~FIFOWAITMODE; } /* as we may have relased the lock, test the FIFOFAST flag here */ if (!(fnp->fn_flag & FIFOFAST)) return; #if FIFODEBUG if (Fifo_verbose) cmn_err(CE_NOTE, "Fifo reverting to streams mode\n"); #endif fifo_fastturnoff(fnp); if (fnp->fn_flag & ISPIPE) { fifo_fastturnoff(fnp->fn_dest); } }
static void pipe_destructor(void *buf, void *cdrarg) { #ifdef DEBUG fifodata_t *fdp = buf; fifonode_t *fnp1 = &fdp->fifo_fnode[0]; fifonode_t *fnp2 = &fdp->fifo_fnode[1]; vnode_t *vp1 = FTOV(fnp1); vnode_t *vp2 = FTOV(fnp2); ASSERT(vp1->v_vfsp == fifovfsp); ASSERT(vp2->v_vfsp == fifovfsp); ASSERT(vp1->v_rdev == fifodev); ASSERT(vp2->v_rdev == fifodev); #endif fnode_destructor(buf, cdrarg); }
void fifo_wakereader(fifonode_t *fn_dest, fifolock_t *fn_lock) { int fn_dflag = fn_dest->fn_flag; ASSERT(MUTEX_HELD(&fn_lock->flk_lock)); if (fn_dflag & FIFOWANTR) { cv_broadcast(&fn_dest->fn_wait_cv); } if (fn_dflag & FIFOISOPEN) { if (fn_dflag & FIFOPOLLR) strpollwakeup(FTOV(fn_dest), POLLIN | POLLRDNORM); if (fn_dflag & FIFOSETSIG) str_sendsig(FTOV(fn_dest), S_INPUT | S_RDNORM, 0, 0); } fn_dest->fn_flag = fn_dflag & ~(FIFOWANTR | FIFOPOLLR); }
/* * Create a pipe end by... * allocating a vnode-fifonode pair and initializing the fifonode. */ void makepipe(vnode_t **vpp1, vnode_t **vpp2) { fifonode_t *fnp1; fifonode_t *fnp2; vnode_t *nvp1; vnode_t *nvp2; fifodata_t *fdp; time_t now; fdp = kmem_cache_alloc(pipe_cache, KM_SLEEP); fdp->fifo_lock.flk_ref = 2; fnp1 = &fdp->fifo_fnode[0]; fnp2 = &fdp->fifo_fnode[1]; fnp1->fn_wcnt = fnp2->fn_wcnt = 1; fnp1->fn_rcnt = fnp2->fn_rcnt = 1; #if FIFODEBUG if (! Fifo_fastmode) { fnp1->fn_flag = fnp2->fn_flag = ISPIPE; } else { fnp1->fn_flag = fnp2->fn_flag = ISPIPE | FIFOFAST; } #else /* FIFODEBUG */ fnp1->fn_flag = fnp2->fn_flag = ISPIPE | FIFOFAST; #endif /* FIFODEBUG */ now = gethrestime_sec(); fnp1->fn_atime = fnp2->fn_atime = now; fnp1->fn_mtime = fnp2->fn_mtime = now; fnp1->fn_ctime = fnp2->fn_ctime = now; *vpp1 = nvp1 = FTOV(fnp1); *vpp2 = nvp2 = FTOV(fnp2); fifo_reinit_vp(nvp1); /* Reinitialize vnodes for reuse... */ fifo_reinit_vp(nvp2); nvp1->v_vfsp = fifovfsp; /* Need to re-establish VFS & device */ nvp2->v_vfsp = fifovfsp; /* before we can reuse this vnode. */ nvp1->v_rdev = fifodev; nvp2->v_rdev = fifodev; }
static int pipe_constructor(void *buf, void *cdrarg, int kmflags) { fifodata_t *fdp = buf; fifonode_t *fnp1 = &fdp->fifo_fnode[0]; fifonode_t *fnp2 = &fdp->fifo_fnode[1]; vnode_t *vp1; vnode_t *vp2; (void) fnode_constructor(buf, cdrarg, kmflags); vp1 = FTOV(fnp1); vp2 = FTOV(fnp2); vp1->v_vfsp = vp2->v_vfsp = fifovfsp; vp1->v_rdev = vp2->v_rdev = fifodev; fnp1->fn_realvp = fnp2->fn_realvp = NULL; fnp1->fn_dest = fnp2; fnp2->fn_dest = fnp1; return (0); }
/* * Find a fifonode-vnode pair on the fifoalloc hash list. * vp is a vnode to be shadowed. If it's on the hash list, * it already has a shadow, therefore return its corresponding * fifonode. */ static fifonode_t * fifofind(vnode_t *vp) { fifonode_t *fnode; ASSERT(MUTEX_HELD(&ftable_lock)); for (fnode = fifoalloc[FIFOHASH(vp)]; fnode; fnode = fnode->fn_nextp) { if (fnode->fn_realvp == vp) { VN_HOLD(FTOV(fnode)); return (fnode); } } return (NULL); }
void fifo_wakewriter(fifonode_t *fn_dest, fifolock_t *fn_lock) { int fn_dflag = fn_dest->fn_flag; ASSERT(MUTEX_HELD(&fn_lock->flk_lock)); ASSERT(fn_dest->fn_dest->fn_count < Fifohiwat); if ((fn_dflag & FIFOWANTW)) { cv_broadcast(&fn_dest->fn_wait_cv); } if ((fn_dflag & (FIFOHIWATW | FIFOISOPEN)) == (FIFOHIWATW | FIFOISOPEN)) { if (fn_dflag & FIFOPOLLW) strpollwakeup(FTOV(fn_dest), POLLWRNORM); if (fn_dflag & FIFOSETSIG) str_sendsig(FTOV(fn_dest), S_WRNORM, 0, 0); } /* * FIFOPOLLW can't be set without setting FIFOHIWAT * This allows us to clear both here. */ fn_dest->fn_flag = fn_dflag & ~(FIFOWANTW | FIFOHIWATW | FIFOPOLLW); }
static void fifo_fastturnoff(fifonode_t *fnp) { fifonode_t *fn_dest = fnp->fn_dest; mblk_t *fn_mp; int fn_flag; ASSERT(MUTEX_HELD(&fnp->fn_lock->flk_lock)); /* * Note: This end can't be closed if there * is stuff in fn_mp */ if ((fn_mp = fnp->fn_mp) != NULL) { ASSERT(fnp->fn_flag & FIFOISOPEN); ASSERT(FTOV(fnp)->v_stream != NULL); ASSERT(FTOV(fnp)->v_stream->sd_wrq != NULL); ASSERT(RD(FTOV(fnp)->v_stream->sd_wrq) != NULL); ASSERT(strvp2wq(FTOV(fnp)) != NULL); fnp->fn_mp = NULL; fnp->fn_count = 0; /* * Don't need to drop flk_lock across the put() * since we're just moving the message from the fifo * node to the STREAM head... */ put(RD(strvp2wq(FTOV(fnp))), fn_mp); } /* * Need to re-issue any pending poll requests * so that the STREAMS framework sees them * Writers would be waiting on fnp and readers on fn_dest */ if ((fnp->fn_flag & (FIFOISOPEN | FIFOPOLLW)) == (FIFOISOPEN | FIFOPOLLW)) { strpollwakeup(FTOV(fnp), POLLWRNORM); } fn_flag = fn_dest->fn_flag; if ((fn_flag & FIFOISOPEN) == FIFOISOPEN) { if ((fn_flag & (FIFOPOLLR | FIFOPOLLRBAND))) { strpollwakeup(FTOV(fn_dest), POLLIN|POLLRDNORM); } } /* * wake up any sleeping processes so they can notice we went * to streams mode */ fnp->fn_flag &= ~(FIFOFAST|FIFOWANTW|FIFOWANTR); cv_broadcast(&fnp->fn_wait_cv); }
static void fnode_destructor(void *buf, void *cdrarg) { fifodata_t *fdp = buf; fifolock_t *flp = &fdp->fifo_lock; fifonode_t *fnp = &fdp->fifo_fnode[0]; size_t size = (uintptr_t)cdrarg; mutex_destroy(&flp->flk_lock); cv_destroy(&flp->flk_wait_cv); ASSERT(flp->flk_ocsync == 0); while ((char *)fnp < (char *)buf + size) { vnode_t *vp = FTOV(fnp); if (vp == NULL) { return; /* constructor failed here */ } ASSERT(fnp->fn_mp == NULL); ASSERT(fnp->fn_count == 0); ASSERT(fnp->fn_lock == flp); ASSERT(fnp->fn_open == 0); ASSERT(fnp->fn_insync == 0); ASSERT(fnp->fn_rsynccnt == 0 && fnp->fn_wsynccnt == 0); ASSERT(fnp->fn_wwaitcnt == 0); ASSERT(fnp->fn_pcredp == NULL); ASSERT(vn_matchops(vp, fifo_vnodeops)); ASSERT(vp->v_stream == NULL); ASSERT(vp->v_type == VFIFO); ASSERT(vp->v_data == (caddr_t)fnp); ASSERT((vp->v_flag & (VNOMAP|VNOSWAP)) == (VNOMAP|VNOSWAP)); cv_destroy(&fnp->fn_wait_cv); vn_invalid(vp); vn_free(vp); fnp++; } }
int ntfs_vgetex(struct mount *mp, ino_t ino, u_int32_t attrtype, char *attrname, u_long lkflags, u_long flags, struct thread *td, struct vnode **vpp) { int error; struct ntfsmount *ntmp; struct ntnode *ip; struct fnode *fp; struct vnode *vp; enum vtype f_type; dprintf(("ntfs_vgetex: ino: %ju, attr: 0x%x:%s, lkf: 0x%lx, f: 0x%lx\n", (uintmax_t) ino, attrtype, attrname?attrname:"", lkflags, flags)); ntmp = VFSTONTFS(mp); *vpp = NULL; /* Get ntnode */ error = ntfs_ntlookup(ntmp, ino, &ip); if (error) { kprintf("ntfs_vget: ntfs_ntget failed\n"); return (error); } /* It may be not initialized fully, so force load it */ if (!(flags & VG_DONTLOADIN) && !(ip->i_flag & IN_LOADED)) { error = ntfs_loadntnode(ntmp, ip); if(error) { kprintf("ntfs_vget: CAN'T LOAD ATTRIBUTES FOR INO: %"PRId64"\n", ip->i_number); ntfs_ntput(ip); return (error); } } error = ntfs_fget(ntmp, ip, attrtype, attrname, &fp); if (error) { kprintf("ntfs_vget: ntfs_fget failed\n"); ntfs_ntput(ip); return (error); } f_type = VINT; if (!(flags & VG_DONTVALIDFN) && !(fp->f_flag & FN_VALID)) { if ((ip->i_frflag & NTFS_FRFLAG_DIR) && (fp->f_attrtype == NTFS_A_DATA && fp->f_attrname == NULL)) { f_type = VDIR; } else if (flags & VG_EXT) { f_type = VINT; fp->f_size = fp->f_allocated = 0; } else { f_type = VREG; error = ntfs_filesize(ntmp, fp, &fp->f_size, &fp->f_allocated); if (error) { ntfs_ntput(ip); return (error); } } fp->f_flag |= FN_VALID; } if (FTOV(fp)) { VGET(FTOV(fp), lkflags); *vpp = FTOV(fp); ntfs_ntput(ip); return (0); } error = getnewvnode(VT_NTFS, ntmp->ntm_mountp, &vp, VLKTIMEOUT, 0); if(error) { ntfs_frele(fp); ntfs_ntput(ip); return (error); } dprintf(("ntfs_vget: vnode: %p for ntnode: %ju\n", vp, (uintmax_t)ino)); fp->f_vp = vp; vp->v_data = fp; vp->v_type = f_type; if (ino == NTFS_ROOTINO) vsetflags(vp, VROOT); /* * Normal files use the buffer cache */ if (f_type == VREG) vinitvmio(vp, fp->f_size, PAGE_SIZE, -1); ntfs_ntput(ip); KKASSERT(lkflags & LK_TYPE_MASK); /* XXX leave vnode locked exclusively from getnewvnode */ *vpp = vp; return (0); }
/* * Provide a shadow for a vnode. We create a new shadow before checking for an * existing one, to minimize the amount of time we need to hold ftable_lock. * If a vp already has a shadow in the hash list, return its shadow. If not, * we hash the new vnode and return its pointer to the caller. */ vnode_t * fifovp(vnode_t *vp, cred_t *crp) { fifonode_t *fnp; fifonode_t *spec_fnp; /* Speculative fnode ptr. */ fifodata_t *fdp; vnode_t *newvp; struct vattr va; vnode_t *rvp; ASSERT(vp != NULL); fdp = kmem_cache_alloc(fnode_cache, KM_SLEEP); fdp->fifo_lock.flk_ref = 1; fnp = &fdp->fifo_fnode[0]; /* * Its possible that fifo nodes on different lofs mountpoints * shadow the same real filesystem fifo node. * In this case its necessary to get and store the realvp. * This way different fifo nodes sharing the same real vnode * can use realvp for communication. */ if (VOP_REALVP(vp, &rvp, NULL) == 0) vp = rvp; fnp->fn_realvp = vp; fnp->fn_wcnt = 0; fnp->fn_rcnt = 0; #if FIFODEBUG if (! Fifo_fastmode) { fnp->fn_flag = 0; } else { fnp->fn_flag = FIFOFAST; } #else /* FIFODEBUG */ fnp->fn_flag = FIFOFAST; #endif /* FIFODEBUG */ /* * initialize the times from vp. */ va.va_mask = AT_TIMES; if (VOP_GETATTR(vp, &va, 0, crp, NULL) == 0) { fnp->fn_atime = va.va_atime.tv_sec; fnp->fn_mtime = va.va_mtime.tv_sec; fnp->fn_ctime = va.va_ctime.tv_sec; } else { fnp->fn_atime = 0; fnp->fn_mtime = 0; fnp->fn_ctime = 0; } /* * Grab the VP here to avoid holding locks * whilst trying to acquire others. */ VN_HOLD(vp); mutex_enter(&ftable_lock); if ((spec_fnp = fifofind(vp)) != NULL) { mutex_exit(&ftable_lock); /* * Release the vnode and free up our pre-prepared fnode. * Zero the lock reference just to explicitly signal * this is unused. */ VN_RELE(vp); fdp->fifo_lock.flk_ref = 0; kmem_cache_free(fnode_cache, fdp); return (FTOV(spec_fnp)); } newvp = FTOV(fnp); fifo_reinit_vp(newvp); /* * Since the fifo vnode's v_vfsp needs to point to the * underlying filesystem's vfsp we need to bump up the * underlying filesystem's vfs reference count. * The count is decremented when the fifo node is * inactivated. */ VFS_HOLD(vp->v_vfsp); newvp->v_vfsp = vp->v_vfsp; newvp->v_rdev = vp->v_rdev; newvp->v_flag |= (vp->v_flag & VROOT); fifoinsert(fnp); mutex_exit(&ftable_lock); return (newvp); }
int ntfs_vgetex( struct mount *mp, ino_t ino, u_int32_t attrtype, char *attrname, u_long lkflags, u_long flags, struct vnode **vpp) { int error; struct ntfsmount *ntmp; struct ntnode *ip; struct fnode *fp; struct vnode *vp; enum vtype f_type = VBAD; dprintf(("ntfs_vgetex: ino: %llu, attr: 0x%x:%s, lkf: 0x%lx, f:" " 0x%lx\n", (unsigned long long)ino, attrtype, attrname ? attrname : "", (u_long)lkflags, (u_long)flags)); ntmp = VFSTONTFS(mp); *vpp = NULL; loop: /* Get ntnode */ error = ntfs_ntlookup(ntmp, ino, &ip); if (error) { printf("ntfs_vget: ntfs_ntget failed\n"); return (error); } /* It may be not initialized fully, so force load it */ if (!(flags & VG_DONTLOADIN) && !(ip->i_flag & IN_LOADED)) { error = ntfs_loadntnode(ntmp, ip); if(error) { printf("ntfs_vget: CAN'T LOAD ATTRIBUTES FOR INO:" " %llu\n", (unsigned long long)ip->i_number); ntfs_ntput(ip); return (error); } } error = ntfs_fget(ntmp, ip, attrtype, attrname, &fp); if (error) { printf("ntfs_vget: ntfs_fget failed\n"); ntfs_ntput(ip); return (error); } if (!(flags & VG_DONTVALIDFN) && !(fp->f_flag & FN_VALID)) { if ((ip->i_frflag & NTFS_FRFLAG_DIR) && (fp->f_attrtype == NTFS_A_DATA && fp->f_attrname == NULL)) { f_type = VDIR; } else if (flags & VG_EXT) { f_type = VNON; fp->f_size = fp->f_allocated = 0; } else { f_type = VREG; error = ntfs_filesize(ntmp, fp, &fp->f_size, &fp->f_allocated); if (error) { ntfs_ntput(ip); return (error); } } fp->f_flag |= FN_VALID; } /* * We may be calling vget() now. To avoid potential deadlock, we need * to release ntnode lock, since due to locking order vnode * lock has to be acquired first. * ntfs_fget() bumped ntnode usecount, so ntnode won't be recycled * prematurely. * Take v_interlock before releasing ntnode lock to avoid races. */ vp = FTOV(fp); if (vp) { mutex_enter(vp->v_interlock); ntfs_ntput(ip); if (vget(vp, lkflags) != 0) goto loop; *vpp = vp; return 0; } ntfs_ntput(ip); error = getnewvnode(VT_NTFS, ntmp->ntm_mountp, ntfs_vnodeop_p, NULL, &vp); if(error) { ntfs_frele(fp); return (error); } ntfs_ntget(ip); error = ntfs_fget(ntmp, ip, attrtype, attrname, &fp); if (error) { printf("ntfs_vget: ntfs_fget failed\n"); ntfs_ntput(ip); return (error); } if (FTOV(fp)) { /* * Another thread beat us, put back freshly allocated * vnode and retry. */ ntfs_ntput(ip); ungetnewvnode(vp); goto loop; } dprintf(("ntfs_vget: vnode: %p for ntnode: %llu\n", vp, (unsigned long long)ino)); fp->f_vp = vp; vp->v_data = fp; if (f_type != VBAD) vp->v_type = f_type; genfs_node_init(vp, &ntfs_genfsops); if (ino == NTFS_ROOTINO) vp->v_vflag |= VV_ROOT; ntfs_ntput(ip); if (lkflags & (LK_EXCLUSIVE | LK_SHARED)) { error = vn_lock(vp, lkflags); if (error) { vput(vp); return (error); } } uvm_vnp_setsize(vp, fp->f_size); /* XXX: mess, cf. ntfs_lookupfile() */ vref(ip->i_devvp); *vpp = vp; return (0); }