/* * Sets *vpp to the root procfs vnode, referenced and exclusively locked */ int devfs_vfs_root(struct mount *mp, struct vnode **vpp) { int ret; devfs_debug(DEVFS_DEBUG_DEBUG, "(vfsops) devfs_root() called!\n"); lockmgr(&devfs_lock, LK_EXCLUSIVE); ret = devfs_allocv(vpp, DEVFS_MNTDATA(mp)->root_node); lockmgr(&devfs_lock, LK_RELEASE); return ret; }
static int devfs_root(struct mount *mp, int flags, struct vnode **vpp) { int error; struct vnode *vp; struct devfs_mount *dmp; dmp = VFSTODEVFS(mp); sx_xlock(&dmp->dm_lock); error = devfs_allocv(dmp->dm_rootdir, mp, LK_EXCLUSIVE, &vp); if (error) return (error); vp->v_vflag |= VV_ROOT; *vpp = vp; return (0); }
static int devfs_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap) { struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp); *ap->a_vpp = NULL; if (!devfs_node_is_accessible(dnode)) return ENOENT; lockmgr(&devfs_lock, LK_EXCLUSIVE); if (dnode->parent != NULL) { devfs_allocv(ap->a_vpp, dnode->parent); vn_unlock(*ap->a_vpp); } lockmgr(&devfs_lock, LK_RELEASE); return ((*ap->a_vpp == NULL) ? ENOENT : 0); }
static int devfs_spec_open(struct vop_open_args *ap) { struct vnode *vp = ap->a_vp; struct vnode *orig_vp = NULL; struct devfs_node *node = DEVFS_NODE(vp); struct devfs_node *newnode; cdev_t dev, ndev = NULL; int error = 0; if (node) { if (node->d_dev == NULL) return ENXIO; if (!devfs_node_is_accessible(node)) return ENOENT; } if ((dev = vp->v_rdev) == NULL) return ENXIO; vn_lock(vp, LK_UPGRADE | LK_RETRY); if (node && ap->a_fp) { devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_open: -1.1-\n"); lockmgr(&devfs_lock, LK_EXCLUSIVE); ndev = devfs_clone(dev, node->d_dir.d_name, node->d_dir.d_namlen, ap->a_mode, ap->a_cred); if (ndev != NULL) { newnode = devfs_create_device_node( DEVFS_MNTDATA(vp->v_mount)->root_node, ndev, NULL, NULL); /* XXX: possibly destroy device if this happens */ if (newnode != NULL) { dev = ndev; devfs_link_dev(dev); devfs_debug(DEVFS_DEBUG_DEBUG, "parent here is: %s, node is: |%s|\n", ((node->parent->node_type == Nroot) ? "ROOT!" : node->parent->d_dir.d_name), newnode->d_dir.d_name); devfs_debug(DEVFS_DEBUG_DEBUG, "test: %s\n", ((struct devfs_node *)(TAILQ_LAST(DEVFS_DENODE_HEAD(node->parent), devfs_node_head)))->d_dir.d_name); /* * orig_vp is set to the original vp if we cloned. */ /* node->flags |= DEVFS_CLONED; */ devfs_allocv(&vp, newnode); orig_vp = ap->a_vp; ap->a_vp = vp; } } lockmgr(&devfs_lock, LK_RELEASE); } devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_open() called on %s! \n", dev->si_name); /* * Make this field valid before any I/O in ->d_open */ if (!dev->si_iosize_max) /* XXX: old DFLTPHYS == 64KB dependency */ dev->si_iosize_max = min(MAXPHYS,64*1024); if (dev_dflags(dev) & D_TTY) vsetflags(vp, VISTTY); /* * Open underlying device */ vn_unlock(vp); error = dev_dopen(dev, ap->a_mode, S_IFCHR, ap->a_cred, ap->a_fp); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* * Clean up any cloned vp if we error out. */ if (error) { if (orig_vp) { vput(vp); ap->a_vp = orig_vp; /* orig_vp = NULL; */ } return error; } /* * This checks if the disk device is going to be opened for writing. * It will be only allowed in the cases where securelevel permits it * and it's not mounted R/W. */ if ((dev_dflags(dev) & D_DISK) && (ap->a_mode & FWRITE) && (ap->a_cred != FSCRED)) { /* Very secure mode. No open for writing allowed */ if (securelevel >= 2) return EPERM; /* * If it is mounted R/W, do not allow to open for writing. * In the case it's mounted read-only but securelevel * is >= 1, then do not allow opening for writing either. */ if (vfs_mountedon(vp)) { if (!(dev->si_mountpoint->mnt_flag & MNT_RDONLY)) return EBUSY; else if (securelevel >= 1) return EPERM; } } if (dev_dflags(dev) & D_TTY) { if (dev->si_tty) { struct tty *tp; tp = dev->si_tty; if (!tp->t_stop) { devfs_debug(DEVFS_DEBUG_DEBUG, "devfs: no t_stop\n"); tp->t_stop = nottystop; } } } if (vn_isdisk(vp, NULL)) { if (!dev->si_bsize_phys) dev->si_bsize_phys = DEV_BSIZE; vinitvmio(vp, IDX_TO_OFF(INT_MAX), PAGE_SIZE, -1); } vop_stdopen(ap); #if 0 if (node) nanotime(&node->atime); #endif /* * If we replaced the vp the vop_stdopen() call will have loaded * it into fp->f_data and vref()d the vp, giving us two refs. So * instead of just unlocking it here we have to vput() it. */ if (orig_vp) vput(vp); /* Ugly pty magic, to make pty devices appear once they are opened */ if (node && (node->flags & DEVFS_PTY) == DEVFS_PTY) node->flags &= ~DEVFS_INVISIBLE; if (ap->a_fp) { KKASSERT(ap->a_fp->f_type == DTYPE_VNODE); KKASSERT((ap->a_fp->f_flag & FMASK) == (ap->a_mode & FMASK)); ap->a_fp->f_ops = &devfs_dev_fileops; KKASSERT(ap->a_fp->f_data == (void *)vp); } return 0; }
static int devfs_vop_nresolve(struct vop_nresolve_args *ap) { struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp); struct devfs_node *node, *found = NULL; struct namecache *ncp; struct vnode *vp = NULL; int error = 0; int len; int depth; ncp = ap->a_nch->ncp; len = ncp->nc_nlen; if (!devfs_node_is_accessible(dnode)) return ENOENT; lockmgr(&devfs_lock, LK_EXCLUSIVE); if ((dnode->node_type != Nroot) && (dnode->node_type != Ndir)) { error = ENOENT; cache_setvp(ap->a_nch, NULL); goto out; } TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(dnode), link) { if (len == node->d_dir.d_namlen) { if (!memcmp(ncp->nc_name, node->d_dir.d_name, len)) { found = node; break; } } } if (found) { depth = 0; while ((found->node_type == Nlink) && (found->link_target)) { if (depth >= 8) { devfs_debug(DEVFS_DEBUG_SHOW, "Recursive link or depth >= 8"); break; } found = found->link_target; ++depth; } if (!(found->flags & DEVFS_HIDDEN)) devfs_allocv(/*ap->a_dvp->v_mount, */ &vp, found); } if (vp == NULL) { error = ENOENT; cache_setvp(ap->a_nch, NULL); goto out; } KKASSERT(vp); vn_unlock(vp); cache_setvp(ap->a_nch, vp); vrele(vp); out: lockmgr(&devfs_lock, LK_RELEASE); return error; }