/* * Create a vnode and in-memory inode. EAGAIN is returned if we race the * creation of the in-memory inode. * * A locked, referenced vnode is returned in *vpp if no error occurs. */ int user_getnewvnode(struct mount *mp, struct vnode **vpp, ino_t ino, enum vtype vtype) { struct user_inode *ip; struct user_mount *ump; struct vnode *vp; int error; error = getnewvnode(VT_USERFS, mp, vpp, 0, LK_CANRECURSE); if (error) return (error); vp = *vpp; ip = kmalloc(sizeof(*ip), M_USERFSINODE, M_WAITOK|M_ZERO); vinitvmio(vp, 0, PAGE_SIZE, 0); ump = (void *)mp->mnt_data; /* * Snap together the new vnode/inode and determine if we have * raced the tree. */ if (userfs_ino_rb_tree_RB_LOOKUP(&ump->rb_root, ino)) { kfree(ip, M_USERFSINODE); vp->v_type = VBAD; vput(*vpp); return (EAGAIN); } ip->inum = ino; ip->vp = vp; ip->ump = ump; vp->v_data = ip; vp->v_type = vtype; userfs_ino_rb_tree_RB_INSERT(&ump->rb_root, ip); return(0); }
/* * Get the vnode associated with the given inode, allocating the vnode if * necessary. The vnode will be returned exclusively locked. * * The caller must lock the inode (shared or exclusive). * * Great care must be taken to avoid deadlocks and vnode acquisition/reclaim * races. */ struct vnode * hammer2_igetv(hammer2_inode_t *ip, int *errorp) { hammer2_inode_data_t *ipdata; hammer2_pfsmount_t *pmp; struct vnode *vp; ccms_state_t ostate; pmp = ip->pmp; KKASSERT(pmp != NULL); *errorp = 0; ipdata = &ip->chain->data->ipdata; for (;;) { /* * Attempt to reuse an existing vnode assignment. It is * possible to race a reclaim so the vget() may fail. The * inode must be unlocked during the vget() to avoid a * deadlock against a reclaim. */ vp = ip->vp; if (vp) { /* * Inode must be unlocked during the vget() to avoid * possible deadlocks, but leave the ip ref intact. * * vnode is held to prevent destruction during the * vget(). The vget() can still fail if we lost * a reclaim race on the vnode. */ vhold(vp); ostate = hammer2_inode_lock_temp_release(ip); if (vget(vp, LK_EXCLUSIVE)) { vdrop(vp); hammer2_inode_lock_temp_restore(ip, ostate); continue; } hammer2_inode_lock_temp_restore(ip, ostate); vdrop(vp); /* vp still locked and ref from vget */ if (ip->vp != vp) { kprintf("hammer2: igetv race %p/%p\n", ip->vp, vp); vput(vp); continue; } *errorp = 0; break; } /* * No vnode exists, allocate a new vnode. Beware of * allocation races. This function will return an * exclusively locked and referenced vnode. */ *errorp = getnewvnode(VT_HAMMER2, pmp->mp, &vp, 0, 0); if (*errorp) { kprintf("hammer2: igetv getnewvnode failed %d\n", *errorp); vp = NULL; break; } /* * Lock the inode and check for an allocation race. */ ostate = hammer2_inode_lock_upgrade(ip); if (ip->vp != NULL) { vp->v_type = VBAD; vx_put(vp); hammer2_inode_lock_downgrade(ip, ostate); continue; } switch (ipdata->type) { case HAMMER2_OBJTYPE_DIRECTORY: vp->v_type = VDIR; break; case HAMMER2_OBJTYPE_REGFILE: vp->v_type = VREG; vinitvmio(vp, ipdata->size, HAMMER2_LBUFSIZE, (int)ipdata->size & HAMMER2_LBUFMASK); break; case HAMMER2_OBJTYPE_SOFTLINK: /* * XXX for now we are using the generic file_read * and file_write code so we need a buffer cache * association. */ vp->v_type = VLNK; vinitvmio(vp, ipdata->size, HAMMER2_LBUFSIZE, (int)ipdata->size & HAMMER2_LBUFMASK); break; case HAMMER2_OBJTYPE_CDEV: vp->v_type = VCHR; /* fall through */ case HAMMER2_OBJTYPE_BDEV: vp->v_ops = &pmp->mp->mnt_vn_spec_ops; if (ipdata->type != HAMMER2_OBJTYPE_CDEV) vp->v_type = VBLK; addaliasu(vp, ipdata->rmajor, ipdata->rminor); break; case HAMMER2_OBJTYPE_FIFO: vp->v_type = VFIFO; vp->v_ops = &pmp->mp->mnt_vn_fifo_ops; break; default: panic("hammer2: unhandled objtype %d", ipdata->type); break; } if (ip == pmp->iroot) vsetflags(vp, VROOT); vp->v_data = ip; ip->vp = vp; hammer2_inode_ref(ip); /* vp association */ hammer2_inode_lock_downgrade(ip, ostate); break; } /* * Return non-NULL vp and *errorp == 0, or NULL vp and *errorp != 0. */ if (hammer2_debug & 0x0002) { kprintf("igetv vp %p refs 0x%08x aux 0x%08x\n", vp, vp->v_refcnt, vp->v_auxrefs); } return (vp); }
int ntfs_vgetex(struct mount *mp, ino_t ino, u_int32_t attrtype, char *attrname, u_long lkflags, u_long flags, struct thread *td, struct vnode **vpp) { int error; struct ntfsmount *ntmp; struct ntnode *ip; struct fnode *fp; struct vnode *vp; enum vtype f_type; dprintf(("ntfs_vgetex: ino: %ju, attr: 0x%x:%s, lkf: 0x%lx, f: 0x%lx\n", (uintmax_t) ino, attrtype, attrname?attrname:"", lkflags, flags)); ntmp = VFSTONTFS(mp); *vpp = NULL; /* Get ntnode */ error = ntfs_ntlookup(ntmp, ino, &ip); if (error) { kprintf("ntfs_vget: ntfs_ntget failed\n"); return (error); } /* It may be not initialized fully, so force load it */ if (!(flags & VG_DONTLOADIN) && !(ip->i_flag & IN_LOADED)) { error = ntfs_loadntnode(ntmp, ip); if(error) { kprintf("ntfs_vget: CAN'T LOAD ATTRIBUTES FOR INO: %"PRId64"\n", ip->i_number); ntfs_ntput(ip); return (error); } } error = ntfs_fget(ntmp, ip, attrtype, attrname, &fp); if (error) { kprintf("ntfs_vget: ntfs_fget failed\n"); ntfs_ntput(ip); return (error); } f_type = VINT; if (!(flags & VG_DONTVALIDFN) && !(fp->f_flag & FN_VALID)) { if ((ip->i_frflag & NTFS_FRFLAG_DIR) && (fp->f_attrtype == NTFS_A_DATA && fp->f_attrname == NULL)) { f_type = VDIR; } else if (flags & VG_EXT) { f_type = VINT; fp->f_size = fp->f_allocated = 0; } else { f_type = VREG; error = ntfs_filesize(ntmp, fp, &fp->f_size, &fp->f_allocated); if (error) { ntfs_ntput(ip); return (error); } } fp->f_flag |= FN_VALID; } if (FTOV(fp)) { VGET(FTOV(fp), lkflags); *vpp = FTOV(fp); ntfs_ntput(ip); return (0); } error = getnewvnode(VT_NTFS, ntmp->ntm_mountp, &vp, VLKTIMEOUT, 0); if(error) { ntfs_frele(fp); ntfs_ntput(ip); return (error); } dprintf(("ntfs_vget: vnode: %p for ntnode: %ju\n", vp, (uintmax_t)ino)); fp->f_vp = vp; vp->v_data = fp; vp->v_type = f_type; if (ino == NTFS_ROOTINO) vsetflags(vp, VROOT); /* * Normal files use the buffer cache */ if (f_type == VREG) vinitvmio(vp, fp->f_size, PAGE_SIZE, -1); ntfs_ntput(ip); KKASSERT(lkflags & LK_TYPE_MASK); /* XXX leave vnode locked exclusively from getnewvnode */ *vpp = vp; return (0); }
static int devfs_spec_open(struct vop_open_args *ap) { struct vnode *vp = ap->a_vp; struct vnode *orig_vp = NULL; struct devfs_node *node = DEVFS_NODE(vp); struct devfs_node *newnode; cdev_t dev, ndev = NULL; int error = 0; if (node) { if (node->d_dev == NULL) return ENXIO; if (!devfs_node_is_accessible(node)) return ENOENT; } if ((dev = vp->v_rdev) == NULL) return ENXIO; vn_lock(vp, LK_UPGRADE | LK_RETRY); if (node && ap->a_fp) { devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_open: -1.1-\n"); lockmgr(&devfs_lock, LK_EXCLUSIVE); ndev = devfs_clone(dev, node->d_dir.d_name, node->d_dir.d_namlen, ap->a_mode, ap->a_cred); if (ndev != NULL) { newnode = devfs_create_device_node( DEVFS_MNTDATA(vp->v_mount)->root_node, ndev, NULL, NULL); /* XXX: possibly destroy device if this happens */ if (newnode != NULL) { dev = ndev; devfs_link_dev(dev); devfs_debug(DEVFS_DEBUG_DEBUG, "parent here is: %s, node is: |%s|\n", ((node->parent->node_type == Nroot) ? "ROOT!" : node->parent->d_dir.d_name), newnode->d_dir.d_name); devfs_debug(DEVFS_DEBUG_DEBUG, "test: %s\n", ((struct devfs_node *)(TAILQ_LAST(DEVFS_DENODE_HEAD(node->parent), devfs_node_head)))->d_dir.d_name); /* * orig_vp is set to the original vp if we cloned. */ /* node->flags |= DEVFS_CLONED; */ devfs_allocv(&vp, newnode); orig_vp = ap->a_vp; ap->a_vp = vp; } } lockmgr(&devfs_lock, LK_RELEASE); } devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_open() called on %s! \n", dev->si_name); /* * Make this field valid before any I/O in ->d_open */ if (!dev->si_iosize_max) /* XXX: old DFLTPHYS == 64KB dependency */ dev->si_iosize_max = min(MAXPHYS,64*1024); if (dev_dflags(dev) & D_TTY) vsetflags(vp, VISTTY); /* * Open underlying device */ vn_unlock(vp); error = dev_dopen(dev, ap->a_mode, S_IFCHR, ap->a_cred, ap->a_fp); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* * Clean up any cloned vp if we error out. */ if (error) { if (orig_vp) { vput(vp); ap->a_vp = orig_vp; /* orig_vp = NULL; */ } return error; } /* * This checks if the disk device is going to be opened for writing. * It will be only allowed in the cases where securelevel permits it * and it's not mounted R/W. */ if ((dev_dflags(dev) & D_DISK) && (ap->a_mode & FWRITE) && (ap->a_cred != FSCRED)) { /* Very secure mode. No open for writing allowed */ if (securelevel >= 2) return EPERM; /* * If it is mounted R/W, do not allow to open for writing. * In the case it's mounted read-only but securelevel * is >= 1, then do not allow opening for writing either. */ if (vfs_mountedon(vp)) { if (!(dev->si_mountpoint->mnt_flag & MNT_RDONLY)) return EBUSY; else if (securelevel >= 1) return EPERM; } } if (dev_dflags(dev) & D_TTY) { if (dev->si_tty) { struct tty *tp; tp = dev->si_tty; if (!tp->t_stop) { devfs_debug(DEVFS_DEBUG_DEBUG, "devfs: no t_stop\n"); tp->t_stop = nottystop; } } } if (vn_isdisk(vp, NULL)) { if (!dev->si_bsize_phys) dev->si_bsize_phys = DEV_BSIZE; vinitvmio(vp, IDX_TO_OFF(INT_MAX), PAGE_SIZE, -1); } vop_stdopen(ap); #if 0 if (node) nanotime(&node->atime); #endif /* * If we replaced the vp the vop_stdopen() call will have loaded * it into fp->f_data and vref()d the vp, giving us two refs. So * instead of just unlocking it here we have to vput() it. */ if (orig_vp) vput(vp); /* Ugly pty magic, to make pty devices appear once they are opened */ if (node && (node->flags & DEVFS_PTY) == DEVFS_PTY) node->flags &= ~DEVFS_INVISIBLE; if (ap->a_fp) { KKASSERT(ap->a_fp->f_type == DTYPE_VNODE); KKASSERT((ap->a_fp->f_flag & FMASK) == (ap->a_mode & FMASK)); ap->a_fp->f_ops = &devfs_dev_fileops; KKASSERT(ap->a_fp->f_data == (void *)vp); } return 0; }
/* * Allocates a new vnode for the node node or returns a new reference to * an existing one if the node had already a vnode referencing it. The * resulting locked vnode is returned in *vpp. * * Returns zero on success or an appropriate error code on failure. * * The caller must ensure that node cannot go away (usually by holding * the related directory entry). * * If dnode is non-NULL this routine avoids deadlocking against it but * can return EAGAIN. Caller must try again. The dnode lock will cycle * in this case, it remains locked on return in all cases. dnode must * be shared-locked. */ int tmpfs_alloc_vp(struct mount *mp, struct tmpfs_node *dnode, struct tmpfs_node *node, int lkflag, struct vnode **vpp) { int error = 0; struct vnode *vp; loop: /* * Interlocked extraction from node. This can race many things. * We have to get a soft reference on the vnode while we hold * the node locked, then acquire it properly and check for races. */ TMPFS_NODE_LOCK(node); if ((vp = node->tn_vnode) != NULL) { KKASSERT((node->tn_vpstate & TMPFS_VNODE_DOOMED) == 0); vhold(vp); TMPFS_NODE_UNLOCK(node); if (dnode) { /* * Special-case handling to avoid deadlocking against * dnode. This case has been validated and occurs * every so often during synth builds. */ if (vget(vp, (lkflag & ~LK_RETRY) | LK_NOWAIT | LK_EXCLUSIVE) != 0) { TMPFS_NODE_UNLOCK(dnode); if (vget(vp, (lkflag & ~LK_RETRY) | LK_SLEEPFAIL | LK_EXCLUSIVE) == 0) { vn_unlock(vp); } vdrop(vp); TMPFS_NODE_LOCK_SH(dnode); return EAGAIN; } } else { /* * Normal path */ if (vget(vp, lkflag | LK_EXCLUSIVE) != 0) { vdrop(vp); goto loop; } } if (node->tn_vnode != vp) { vput(vp); vdrop(vp); goto loop; } vdrop(vp); goto out; } /* vp is NULL */ /* * This should never happen. */ if (node->tn_vpstate & TMPFS_VNODE_DOOMED) { TMPFS_NODE_UNLOCK(node); error = ENOENT; goto out; } /* * Interlock against other calls to tmpfs_alloc_vp() trying to * allocate and assign a vp to node. */ if (node->tn_vpstate & TMPFS_VNODE_ALLOCATING) { node->tn_vpstate |= TMPFS_VNODE_WANT; error = tsleep(&node->tn_vpstate, PINTERLOCKED | PCATCH, "tmpfs_alloc_vp", 0); TMPFS_NODE_UNLOCK(node); if (error) return error; goto loop; } node->tn_vpstate |= TMPFS_VNODE_ALLOCATING; TMPFS_NODE_UNLOCK(node); /* * Allocate a new vnode (may block). The ALLOCATING flag should * prevent a race against someone else assigning node->tn_vnode. */ error = getnewvnode(VT_TMPFS, mp, &vp, VLKTIMEOUT, LK_CANRECURSE); if (error != 0) goto unlock; KKASSERT(node->tn_vnode == NULL); KKASSERT(vp != NULL); vp->v_data = node; vp->v_type = node->tn_type; /* Type-specific initialization. */ switch (node->tn_type) { case VBLK: /* FALLTHROUGH */ case VCHR: /* FALLTHROUGH */ case VSOCK: break; case VREG: /* * VMIO is mandatory. Tmpfs also supports KVABIO * for its tmpfs_strategy(). */ vsetflags(vp, VKVABIO); vinitvmio(vp, node->tn_size, TMPFS_BLKSIZE, -1); break; case VLNK: break; case VFIFO: vp->v_ops = &mp->mnt_vn_fifo_ops; break; case VDIR: break; default: panic("tmpfs_alloc_vp: type %p %d", node, (int)node->tn_type); } unlock: TMPFS_NODE_LOCK(node); KKASSERT(node->tn_vpstate & TMPFS_VNODE_ALLOCATING); node->tn_vpstate &= ~TMPFS_VNODE_ALLOCATING; node->tn_vnode = vp; if (node->tn_vpstate & TMPFS_VNODE_WANT) { node->tn_vpstate &= ~TMPFS_VNODE_WANT; TMPFS_NODE_UNLOCK(node); wakeup(&node->tn_vpstate); } else { TMPFS_NODE_UNLOCK(node); } out: *vpp = vp; KKASSERT(IFF(error == 0, *vpp != NULL && vn_islocked(*vpp))); return error; }