/* * Sets *vpp to the root procfs vnode, referenced and exclusively locked */ int devfs_vfs_root(struct mount *mp, struct vnode **vpp) { int ret; devfs_debug(DEVFS_DEBUG_DEBUG, "(vfsops) devfs_root() called!\n"); lockmgr(&devfs_lock, LK_EXCLUSIVE); ret = devfs_allocv(vpp, DEVFS_MNTDATA(mp)->root_node); lockmgr(&devfs_lock, LK_RELEASE); return ret; }
/* * Get file system statistics. */ static int devfs_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred) { devfs_debug(DEVFS_DEBUG_DEBUG, "(vfsops) devfs_stat() called!\n"); sbp->f_bsize = DEV_BSIZE; sbp->f_iosize = DEV_BSIZE; sbp->f_blocks = 2; /* avoid divide by zero in some df's */ sbp->f_bfree = 0; sbp->f_bavail = 0; sbp->f_files = (DEVFS_MNTDATA(mp))?(DEVFS_MNTDATA(mp)->file_count):0; sbp->f_ffree = 0; if (sbp != &mp->mnt_stat) { sbp->f_type = mp->mnt_vfc->vfc_typenum; bcopy(&mp->mnt_stat.f_fsid, &sbp->f_fsid, sizeof(sbp->f_fsid)); bcopy(mp->mnt_stat.f_mntfromname, sbp->f_mntfromname, MNAMELEN); } return (0); }
/* * unmount system call */ static int devfs_vfs_unmount(struct mount *mp, int mntflags) { int error = 0; int flags = 0; devfs_debug(DEVFS_DEBUG_DEBUG, "(vfsops) devfs_unmount() called!\n"); if (mntflags & MNT_FORCE) flags |= FORCECLOSE; error = vflush(mp, 0, flags); if (error) return (error); lockmgr(&devfs_lock, LK_EXCLUSIVE); devfs_tracer_orphan_count(mp, 1); lockmgr(&devfs_lock, LK_RELEASE); devfs_mount_del(DEVFS_MNTDATA(mp)); kfree(mp->mnt_data, M_DEVFS); mp->mnt_data = NULL; return (0); }
/* * VFS Operations. * * mount system call */ static int devfs_vfs_mount(struct mount *mp, char *path, caddr_t data, struct ucred *cred) { struct devfs_mount_info info; struct devfs_mnt_data *mnt; size_t size; int error; devfs_debug(DEVFS_DEBUG_DEBUG, "(vfsops) devfs_mount() called!\n"); if (mp->mnt_flag & MNT_UPDATE) return (EOPNOTSUPP); if (data == NULL) { bzero(&info, sizeof(info)); } else { if ((error = copyin(data, &info, sizeof(info))) != 0) return (error); } mp->mnt_flag |= MNT_LOCAL; mp->mnt_kern_flag |= MNTK_NOSTKMNT | MNTK_ALL_MPSAFE; mp->mnt_data = NULL; vfs_getnewfsid(mp); size = sizeof("devfs") - 1; bcopy("devfs", mp->mnt_stat.f_mntfromname, size); bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); copyinstr(path, mp->mnt_stat.f_mntonname, sizeof(mp->mnt_stat.f_mntonname) -1, &size); devfs_vfs_statfs(mp, &mp->mnt_stat, cred); /* * XXX: save other mount info passed from userland or so. */ mnt = kmalloc(sizeof(*mnt), M_DEVFS, M_WAITOK | M_ZERO); lockmgr(&devfs_lock, LK_EXCLUSIVE); mp->mnt_data = (qaddr_t)mnt; if (info.flags & DEVFS_MNT_JAIL) mnt->jailed = 1; else mnt->jailed = jailed(cred); mnt->leak_count = 0; mnt->file_count = 0; mnt->mp = mp; TAILQ_INIT(&mnt->orphan_list); mnt->root_node = devfs_allocp(Nroot, "", NULL, mp, NULL); KKASSERT(mnt->root_node); lockmgr(&devfs_lock, LK_RELEASE); vfs_add_vnodeops(mp, &devfs_vnode_norm_vops, &mp->mnt_vn_norm_ops); vfs_add_vnodeops(mp, &devfs_vnode_dev_vops, &mp->mnt_vn_spec_ops); devfs_debug(DEVFS_DEBUG_DEBUG, "calling devfs_mount_add\n"); devfs_mount_add(mnt); return (0); }
static int devfs_spec_open(struct vop_open_args *ap) { struct vnode *vp = ap->a_vp; struct vnode *orig_vp = NULL; struct devfs_node *node = DEVFS_NODE(vp); struct devfs_node *newnode; cdev_t dev, ndev = NULL; int error = 0; if (node) { if (node->d_dev == NULL) return ENXIO; if (!devfs_node_is_accessible(node)) return ENOENT; } if ((dev = vp->v_rdev) == NULL) return ENXIO; vn_lock(vp, LK_UPGRADE | LK_RETRY); if (node && ap->a_fp) { devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_open: -1.1-\n"); lockmgr(&devfs_lock, LK_EXCLUSIVE); ndev = devfs_clone(dev, node->d_dir.d_name, node->d_dir.d_namlen, ap->a_mode, ap->a_cred); if (ndev != NULL) { newnode = devfs_create_device_node( DEVFS_MNTDATA(vp->v_mount)->root_node, ndev, NULL, NULL); /* XXX: possibly destroy device if this happens */ if (newnode != NULL) { dev = ndev; devfs_link_dev(dev); devfs_debug(DEVFS_DEBUG_DEBUG, "parent here is: %s, node is: |%s|\n", ((node->parent->node_type == Nroot) ? "ROOT!" : node->parent->d_dir.d_name), newnode->d_dir.d_name); devfs_debug(DEVFS_DEBUG_DEBUG, "test: %s\n", ((struct devfs_node *)(TAILQ_LAST(DEVFS_DENODE_HEAD(node->parent), devfs_node_head)))->d_dir.d_name); /* * orig_vp is set to the original vp if we cloned. */ /* node->flags |= DEVFS_CLONED; */ devfs_allocv(&vp, newnode); orig_vp = ap->a_vp; ap->a_vp = vp; } } lockmgr(&devfs_lock, LK_RELEASE); } devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_open() called on %s! \n", dev->si_name); /* * Make this field valid before any I/O in ->d_open */ if (!dev->si_iosize_max) /* XXX: old DFLTPHYS == 64KB dependency */ dev->si_iosize_max = min(MAXPHYS,64*1024); if (dev_dflags(dev) & D_TTY) vsetflags(vp, VISTTY); /* * Open underlying device */ vn_unlock(vp); error = dev_dopen(dev, ap->a_mode, S_IFCHR, ap->a_cred, ap->a_fp); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* * Clean up any cloned vp if we error out. */ if (error) { if (orig_vp) { vput(vp); ap->a_vp = orig_vp; /* orig_vp = NULL; */ } return error; } /* * This checks if the disk device is going to be opened for writing. * It will be only allowed in the cases where securelevel permits it * and it's not mounted R/W. */ if ((dev_dflags(dev) & D_DISK) && (ap->a_mode & FWRITE) && (ap->a_cred != FSCRED)) { /* Very secure mode. No open for writing allowed */ if (securelevel >= 2) return EPERM; /* * If it is mounted R/W, do not allow to open for writing. * In the case it's mounted read-only but securelevel * is >= 1, then do not allow opening for writing either. */ if (vfs_mountedon(vp)) { if (!(dev->si_mountpoint->mnt_flag & MNT_RDONLY)) return EBUSY; else if (securelevel >= 1) return EPERM; } } if (dev_dflags(dev) & D_TTY) { if (dev->si_tty) { struct tty *tp; tp = dev->si_tty; if (!tp->t_stop) { devfs_debug(DEVFS_DEBUG_DEBUG, "devfs: no t_stop\n"); tp->t_stop = nottystop; } } } if (vn_isdisk(vp, NULL)) { if (!dev->si_bsize_phys) dev->si_bsize_phys = DEV_BSIZE; vinitvmio(vp, IDX_TO_OFF(INT_MAX), PAGE_SIZE, -1); } vop_stdopen(ap); #if 0 if (node) nanotime(&node->atime); #endif /* * If we replaced the vp the vop_stdopen() call will have loaded * it into fp->f_data and vref()d the vp, giving us two refs. So * instead of just unlocking it here we have to vput() it. */ if (orig_vp) vput(vp); /* Ugly pty magic, to make pty devices appear once they are opened */ if (node && (node->flags & DEVFS_PTY) == DEVFS_PTY) node->flags &= ~DEVFS_INVISIBLE; if (ap->a_fp) { KKASSERT(ap->a_fp->f_type == DTYPE_VNODE); KKASSERT((ap->a_fp->f_flag & FMASK) == (ap->a_mode & FMASK)); ap->a_fp->f_ops = &devfs_dev_fileops; KKASSERT(ap->a_fp->f_data == (void *)vp); } return 0; }
int vfs_mountroot_devfs(void) { struct vnode *vp; struct nchandle nch; struct nlookupdata nd; struct mount *mp; struct vfsconf *vfsp; int error; struct ucred *cred = proc0.p_ucred; const char *devfs_path, *init_chroot; char *dev_malloced = NULL; if ((init_chroot = kgetenv("init_chroot")) != NULL) { size_t l; l = strlen(init_chroot) + sizeof("/dev"); dev_malloced = kmalloc(l, M_MOUNT, M_WAITOK); ksnprintf(dev_malloced, l, "%s/dev", init_chroot); devfs_path = dev_malloced; } else { devfs_path = "/dev"; } /* * Lookup the requested path and extract the nch and vnode. */ error = nlookup_init_raw(&nd, devfs_path, UIO_SYSSPACE, NLC_FOLLOW, cred, &rootnch); if (error == 0) { devfs_debug(DEVFS_DEBUG_DEBUG, "vfs_mountroot_devfs: nlookup_init is ok...\n"); if ((error = nlookup(&nd)) == 0) { devfs_debug(DEVFS_DEBUG_DEBUG, "vfs_mountroot_devfs: nlookup is ok...\n"); if (nd.nl_nch.ncp->nc_vp == NULL) { devfs_debug(DEVFS_DEBUG_SHOW, "vfs_mountroot_devfs: nlookup: simply not found\n"); error = ENOENT; } } } if (dev_malloced != NULL) kfree(dev_malloced, M_MOUNT), dev_malloced = NULL; devfs_path = NULL; if (error) { nlookup_done(&nd); devfs_debug(DEVFS_DEBUG_SHOW, "vfs_mountroot_devfs: nlookup failed, error: %d\n", error); return (error); } /* * Extract the locked+refd ncp and cleanup the nd structure */ nch = nd.nl_nch; cache_zero(&nd.nl_nch); nlookup_done(&nd); /* * now we have the locked ref'd nch and unreferenced vnode. */ vp = nch.ncp->nc_vp; if ((error = vget(vp, LK_EXCLUSIVE)) != 0) { cache_put(&nch); devfs_debug(DEVFS_DEBUG_SHOW, "vfs_mountroot_devfs: vget failed\n"); return (error); } cache_unlock(&nch); if ((error = vinvalbuf(vp, V_SAVE, 0, 0)) != 0) { cache_drop(&nch); vput(vp); devfs_debug(DEVFS_DEBUG_SHOW, "vfs_mountroot_devfs: vinvalbuf failed\n"); return (error); } if (vp->v_type != VDIR) { cache_drop(&nch); vput(vp); devfs_debug(DEVFS_DEBUG_SHOW, "vfs_mountroot_devfs: vp is not VDIR\n"); return (ENOTDIR); } vfsp = vfsconf_find_by_name("devfs"); vsetflags(vp, VMOUNT); /* * Allocate and initialize the filesystem. */ mp = kmalloc(sizeof(struct mount), M_MOUNT, M_ZERO|M_WAITOK); mount_init(mp); vfs_busy(mp, LK_NOWAIT); mp->mnt_op = vfsp->vfc_vfsops; mp->mnt_vfc = vfsp; vfsp->vfc_refcount++; mp->mnt_stat.f_type = vfsp->vfc_typenum; mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK; strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); mp->mnt_stat.f_owner = cred->cr_uid; vn_unlock(vp); /* * Mount the filesystem. */ error = VFS_MOUNT(mp, "/dev", NULL, cred); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* * Put the new filesystem on the mount list after root. The mount * point gets its own mnt_ncmountpt (unless the VFS already set one * up) which represents the root of the mount. The lookup code * detects the mount point going forward and checks the root of * the mount going backwards. * * It is not necessary to invalidate or purge the vnode underneath * because elements under the mount will be given their own glue * namecache record. */ if (!error) { if (mp->mnt_ncmountpt.ncp == NULL) { /* * allocate, then unlock, but leave the ref intact */ cache_allocroot(&mp->mnt_ncmountpt, mp, NULL); cache_unlock(&mp->mnt_ncmountpt); } mp->mnt_ncmounton = nch; /* inherits ref */ nch.ncp->nc_flag |= NCF_ISMOUNTPT; /* XXX get the root of the fs and cache_setvp(mnt_ncmountpt...) */ vclrflags(vp, VMOUNT); mountlist_insert(mp, MNTINS_LAST); vn_unlock(vp); //checkdirs(&mp->mnt_ncmounton, &mp->mnt_ncmountpt); error = vfs_allocate_syncvnode(mp); if (error) { devfs_debug(DEVFS_DEBUG_SHOW, "vfs_mountroot_devfs: vfs_allocate_syncvnode failed\n"); } vfs_unbusy(mp); error = VFS_START(mp, 0); vrele(vp); } else { vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_coherency_ops); vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_journal_ops); vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_norm_ops); vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_spec_ops); vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_fifo_ops); vclrflags(vp, VMOUNT); mp->mnt_vfc->vfc_refcount--; vfs_unbusy(mp); kfree(mp, M_MOUNT); cache_drop(&nch); vput(vp); devfs_debug(DEVFS_DEBUG_SHOW, "vfs_mountroot_devfs: mount failed\n"); } devfs_debug(DEVFS_DEBUG_DEBUG, "rootmount_devfs done with error: %d\n", error); return (error); }
static int devfs_vop_readdir(struct vop_readdir_args *ap) { struct devfs_node *dnode = DEVFS_NODE(ap->a_vp); struct devfs_node *node; int cookie_index; int ncookies; int error2; int error; int r; off_t *cookies; off_t saveoff; devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_readdir() called!\n"); if (ap->a_uio->uio_offset < 0 || ap->a_uio->uio_offset > INT_MAX) return (EINVAL); error = vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY | LK_FAILRECLAIM); if (error) return (error); if (!devfs_node_is_accessible(dnode)) { vn_unlock(ap->a_vp); return ENOENT; } lockmgr(&devfs_lock, LK_EXCLUSIVE); saveoff = ap->a_uio->uio_offset; if (ap->a_ncookies) { ncookies = ap->a_uio->uio_resid / 16 + 1; /* Why / 16 ?? */ if (ncookies > 256) ncookies = 256; cookies = kmalloc(256 * sizeof(off_t), M_TEMP, M_WAITOK); cookie_index = 0; } else { ncookies = -1; cookies = NULL; cookie_index = 0; } nanotime(&dnode->atime); if (saveoff == 0) { r = vop_write_dirent(&error, ap->a_uio, dnode->d_dir.d_ino, DT_DIR, 1, "."); if (r) goto done; if (cookies) cookies[cookie_index] = saveoff; saveoff++; cookie_index++; if (cookie_index == ncookies) goto done; } if (saveoff == 1) { if (dnode->parent) { r = vop_write_dirent(&error, ap->a_uio, dnode->parent->d_dir.d_ino, DT_DIR, 2, ".."); } else { r = vop_write_dirent(&error, ap->a_uio, dnode->d_dir.d_ino, DT_DIR, 2, ".."); } if (r) goto done; if (cookies) cookies[cookie_index] = saveoff; saveoff++; cookie_index++; if (cookie_index == ncookies) goto done; } TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(dnode), link) { if ((node->flags & DEVFS_HIDDEN) || (node->flags & DEVFS_INVISIBLE)) { continue; } /* * If the node type is a valid devfs alias, then we make * sure that the target isn't hidden. If it is, we don't * show the link in the directory listing. */ if ((node->node_type == Nlink) && (node->link_target != NULL) && (node->link_target->flags & DEVFS_HIDDEN)) continue; if (node->cookie < saveoff) continue; saveoff = node->cookie; error2 = vop_write_dirent(&error, ap->a_uio, node->d_dir.d_ino, node->d_dir.d_type, node->d_dir.d_namlen, node->d_dir.d_name); if (error2) break; saveoff++; if (cookies) cookies[cookie_index] = node->cookie; ++cookie_index; if (cookie_index == ncookies) break; } done: lockmgr(&devfs_lock, LK_RELEASE); vn_unlock(ap->a_vp); ap->a_uio->uio_offset = saveoff; if (error && cookie_index == 0) { if (cookies) { kfree(cookies, M_TEMP); *ap->a_ncookies = 0; *ap->a_cookies = NULL; } } else { if (cookies) { *ap->a_ncookies = cookie_index; *ap->a_cookies = cookies; } } return (error); }
static int devfs_vop_nresolve(struct vop_nresolve_args *ap) { struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp); struct devfs_node *node, *found = NULL; struct namecache *ncp; struct vnode *vp = NULL; int error = 0; int len; int depth; ncp = ap->a_nch->ncp; len = ncp->nc_nlen; if (!devfs_node_is_accessible(dnode)) return ENOENT; lockmgr(&devfs_lock, LK_EXCLUSIVE); if ((dnode->node_type != Nroot) && (dnode->node_type != Ndir)) { error = ENOENT; cache_setvp(ap->a_nch, NULL); goto out; } TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(dnode), link) { if (len == node->d_dir.d_namlen) { if (!memcmp(ncp->nc_name, node->d_dir.d_name, len)) { found = node; break; } } } if (found) { depth = 0; while ((found->node_type == Nlink) && (found->link_target)) { if (depth >= 8) { devfs_debug(DEVFS_DEBUG_SHOW, "Recursive link or depth >= 8"); break; } found = found->link_target; ++depth; } if (!(found->flags & DEVFS_HIDDEN)) devfs_allocv(/*ap->a_dvp->v_mount, */ &vp, found); } if (vp == NULL) { error = ENOENT; cache_setvp(ap->a_nch, NULL); goto out; } KKASSERT(vp); vn_unlock(vp); cache_setvp(ap->a_nch, vp); vrele(vp); out: lockmgr(&devfs_lock, LK_RELEASE); return error; }
/* * spec_getpages() - get pages associated with device vnode. * * Note that spec_read and spec_write do not use the buffer cache, so we * must fully implement getpages here. */ static int devfs_spec_getpages(struct vop_getpages_args *ap) { vm_offset_t kva; int error; int i, pcount, size; struct buf *bp; vm_page_t m; vm_ooffset_t offset; int toff, nextoff, nread; struct vnode *vp = ap->a_vp; int blksiz; int gotreqpage; error = 0; pcount = round_page(ap->a_count) / PAGE_SIZE; /* * Calculate the offset of the transfer and do sanity check. */ offset = IDX_TO_OFF(ap->a_m[0]->pindex) + ap->a_offset; /* * Round up physical size for real devices. We cannot round using * v_mount's block size data because v_mount has nothing to do with * the device. i.e. it's usually '/dev'. We need the physical block * size for the device itself. * * We can't use v_rdev->si_mountpoint because it only exists when the * block device is mounted. However, we can use v_rdev. */ if (vn_isdisk(vp, NULL)) blksiz = vp->v_rdev->si_bsize_phys; else blksiz = DEV_BSIZE; size = (ap->a_count + blksiz - 1) & ~(blksiz - 1); bp = getpbuf_kva(NULL); kva = (vm_offset_t)bp->b_data; /* * Map the pages to be read into the kva. */ pmap_qenter(kva, ap->a_m, pcount); /* Build a minimal buffer header. */ bp->b_cmd = BUF_CMD_READ; bp->b_bcount = size; bp->b_resid = 0; bsetrunningbufspace(bp, size); bp->b_bio1.bio_offset = offset; bp->b_bio1.bio_done = devfs_spec_getpages_iodone; mycpu->gd_cnt.v_vnodein++; mycpu->gd_cnt.v_vnodepgsin += pcount; /* Do the input. */ vn_strategy(ap->a_vp, &bp->b_bio1); crit_enter(); /* We definitely need to be at splbio here. */ while (bp->b_cmd != BUF_CMD_DONE) tsleep(bp, 0, "spread", 0); crit_exit(); if (bp->b_flags & B_ERROR) { if (bp->b_error) error = bp->b_error; else error = EIO; } /* * If EOF is encountered we must zero-extend the result in order * to ensure that the page does not contain garabge. When no * error occurs, an early EOF is indicated if b_bcount got truncated. * b_resid is relative to b_bcount and should be 0, but some devices * might indicate an EOF with b_resid instead of truncating b_bcount. */ nread = bp->b_bcount - bp->b_resid; if (nread < ap->a_count) bzero((caddr_t)kva + nread, ap->a_count - nread); pmap_qremove(kva, pcount); gotreqpage = 0; for (i = 0, toff = 0; i < pcount; i++, toff = nextoff) { nextoff = toff + PAGE_SIZE; m = ap->a_m[i]; m->flags &= ~PG_ZERO; /* * NOTE: vm_page_undirty/clear_dirty etc do not clear the * pmap modified bit. pmap modified bit should have * already been cleared. */ if (nextoff <= nread) { m->valid = VM_PAGE_BITS_ALL; vm_page_undirty(m); } else if (toff < nread) { /* * Since this is a VM request, we have to supply the * unaligned offset to allow vm_page_set_valid() * to zero sub-DEV_BSIZE'd portions of the page. */ vm_page_set_valid(m, 0, nread - toff); vm_page_clear_dirty_end_nonincl(m, 0, nread - toff); } else { m->valid = 0; vm_page_undirty(m); } if (i != ap->a_reqpage) { /* * Just in case someone was asking for this page we * now tell them that it is ok to use. */ if (!error || (m->valid == VM_PAGE_BITS_ALL)) { if (m->valid) { if (m->flags & PG_REFERENCED) { vm_page_activate(m); } else { vm_page_deactivate(m); } vm_page_wakeup(m); } else { vm_page_free(m); } } else { vm_page_free(m); } } else if (m->valid) { gotreqpage = 1; /* * Since this is a VM request, we need to make the * entire page presentable by zeroing invalid sections. */ if (m->valid != VM_PAGE_BITS_ALL) vm_page_zero_invalid(m, FALSE); } } if (!gotreqpage) { m = ap->a_m[ap->a_reqpage]; devfs_debug(DEVFS_DEBUG_WARNING, "spec_getpages:(%s) I/O read failure: (error=%d) bp %p vp %p\n", devtoname(vp->v_rdev), error, bp, bp->b_vp); devfs_debug(DEVFS_DEBUG_WARNING, " size: %d, resid: %d, a_count: %d, valid: 0x%x\n", size, bp->b_resid, ap->a_count, m->valid); devfs_debug(DEVFS_DEBUG_WARNING, " nread: %d, reqpage: %d, pindex: %lu, pcount: %d\n", nread, ap->a_reqpage, (u_long)m->pindex, pcount); /* * Free the buffer header back to the swap buffer pool. */ relpbuf(bp, NULL); return VM_PAGER_ERROR; } /* * Free the buffer header back to the swap buffer pool. */ relpbuf(bp, NULL); if (DEVFS_NODE(ap->a_vp)) nanotime(&DEVFS_NODE(ap->a_vp)->mtime); return VM_PAGER_OK; }
/* * Chunked up transfer completion routine - chain transfers until done * * NOTE: MPSAFE callback. */ static void devfs_spec_strategy_done(struct bio *nbio) { struct buf *nbp = nbio->bio_buf; struct bio *bio = nbio->bio_caller_info1.ptr; /* original bio */ struct buf *bp = bio->bio_buf; /* original bp */ int chunksize = nbio->bio_caller_info2.index; /* chunking */ int boffset = nbp->b_data - bp->b_data; if (nbp->b_flags & B_ERROR) { /* * An error terminates the chain, propogate the error back * to the original bp */ bp->b_flags |= B_ERROR; bp->b_error = nbp->b_error; bp->b_resid = bp->b_bcount - boffset + (nbp->b_bcount - nbp->b_resid); #if SPEC_CHAIN_DEBUG & 1 devfs_debug(DEVFS_DEBUG_DEBUG, "spec_strategy: chain %p error %d bcount %d/%d\n", bp, bp->b_error, bp->b_bcount, bp->b_bcount - bp->b_resid); #endif } else if (nbp->b_resid) { /* * A short read or write terminates the chain */ bp->b_error = nbp->b_error; bp->b_resid = bp->b_bcount - boffset + (nbp->b_bcount - nbp->b_resid); #if SPEC_CHAIN_DEBUG & 1 devfs_debug(DEVFS_DEBUG_DEBUG, "spec_strategy: chain %p short read(1) " "bcount %d/%d\n", bp, bp->b_bcount - bp->b_resid, bp->b_bcount); #endif } else if (nbp->b_bcount != nbp->b_bufsize) { /* * A short read or write can also occur by truncating b_bcount */ #if SPEC_CHAIN_DEBUG & 1 devfs_debug(DEVFS_DEBUG_DEBUG, "spec_strategy: chain %p short read(2) " "bcount %d/%d\n", bp, nbp->b_bcount + boffset, bp->b_bcount); #endif bp->b_error = 0; bp->b_bcount = nbp->b_bcount + boffset; bp->b_resid = nbp->b_resid; } else if (nbp->b_bcount + boffset == bp->b_bcount) { /* * No more data terminates the chain */ #if SPEC_CHAIN_DEBUG & 1 devfs_debug(DEVFS_DEBUG_DEBUG, "spec_strategy: chain %p finished bcount %d\n", bp, bp->b_bcount); #endif bp->b_error = 0; bp->b_resid = 0; } else { /* * Continue the chain */ boffset += nbp->b_bcount; nbp->b_data = bp->b_data + boffset; nbp->b_bcount = bp->b_bcount - boffset; if (nbp->b_bcount > chunksize) nbp->b_bcount = chunksize; nbp->b_bio1.bio_done = devfs_spec_strategy_done; nbp->b_bio1.bio_offset = bio->bio_offset + boffset; #if SPEC_CHAIN_DEBUG & 1 devfs_debug(DEVFS_DEBUG_DEBUG, "spec_strategy: chain %p offset %d/%d bcount %d\n", bp, boffset, bp->b_bcount, nbp->b_bcount); #endif dev_dstrategy(nbp->b_vp->v_rdev, &nbp->b_bio1); return; } /* * Fall through to here on termination. biodone(bp) and * clean up and free nbp. */ biodone(bio); BUF_UNLOCK(nbp); uninitbufbio(nbp); kfree(nbp, M_DEVBUF); }
/* * Convert a vnode strategy call into a device strategy call. Vnode strategy * calls are not limited to device DMA limits so we have to deal with the * case. * * spec_strategy(struct vnode *a_vp, struct bio *a_bio) */ static int devfs_spec_strategy(struct vop_strategy_args *ap) { struct bio *bio = ap->a_bio; struct buf *bp = bio->bio_buf; struct buf *nbp; struct vnode *vp; struct mount *mp; int chunksize; int maxiosize; if (bp->b_cmd != BUF_CMD_READ && LIST_FIRST(&bp->b_dep) != NULL) buf_start(bp); /* * Collect statistics on synchronous and asynchronous read * and write counts for disks that have associated filesystems. */ vp = ap->a_vp; KKASSERT(vp->v_rdev != NULL); /* XXX */ if (vn_isdisk(vp, NULL) && (mp = vp->v_rdev->si_mountpoint) != NULL) { if (bp->b_cmd == BUF_CMD_READ) { if (bp->b_flags & BIO_SYNC) mp->mnt_stat.f_syncreads++; else mp->mnt_stat.f_asyncreads++; } else { if (bp->b_flags & BIO_SYNC) mp->mnt_stat.f_syncwrites++; else mp->mnt_stat.f_asyncwrites++; } } /* * Device iosize limitations only apply to read and write. Shortcut * the I/O if it fits. */ if ((maxiosize = vp->v_rdev->si_iosize_max) == 0) { devfs_debug(DEVFS_DEBUG_DEBUG, "%s: si_iosize_max not set!\n", dev_dname(vp->v_rdev)); maxiosize = MAXPHYS; } #if SPEC_CHAIN_DEBUG & 2 maxiosize = 4096; #endif if (bp->b_bcount <= maxiosize || (bp->b_cmd != BUF_CMD_READ && bp->b_cmd != BUF_CMD_WRITE)) { dev_dstrategy_chain(vp->v_rdev, bio); return (0); } /* * Clone the buffer and set up an I/O chain to chunk up the I/O. */ nbp = kmalloc(sizeof(*bp), M_DEVBUF, M_INTWAIT|M_ZERO); initbufbio(nbp); buf_dep_init(nbp); BUF_LOCK(nbp, LK_EXCLUSIVE); BUF_KERNPROC(nbp); nbp->b_vp = vp; nbp->b_flags = B_PAGING | (bp->b_flags & B_BNOCLIP); nbp->b_data = bp->b_data; nbp->b_bio1.bio_done = devfs_spec_strategy_done; nbp->b_bio1.bio_offset = bio->bio_offset; nbp->b_bio1.bio_caller_info1.ptr = bio; /* * Start the first transfer */ if (vn_isdisk(vp, NULL)) chunksize = vp->v_rdev->si_bsize_phys; else chunksize = DEV_BSIZE; chunksize = maxiosize / chunksize * chunksize; #if SPEC_CHAIN_DEBUG & 1 devfs_debug(DEVFS_DEBUG_DEBUG, "spec_strategy chained I/O chunksize=%d\n", chunksize); #endif nbp->b_cmd = bp->b_cmd; nbp->b_bcount = chunksize; nbp->b_bufsize = chunksize; /* used to detect a short I/O */ nbp->b_bio1.bio_caller_info2.index = chunksize; #if SPEC_CHAIN_DEBUG & 1 devfs_debug(DEVFS_DEBUG_DEBUG, "spec_strategy: chain %p offset %d/%d bcount %d\n", bp, 0, bp->b_bcount, nbp->b_bcount); #endif dev_dstrategy(vp->v_rdev, &nbp->b_bio1); if (DEVFS_NODE(vp)) { nanotime(&DEVFS_NODE(vp)->atime); nanotime(&DEVFS_NODE(vp)->mtime); } return (0); }
/* * MPALMOSTSAFE - acquires mplock */ static int devfs_fo_ioctl(struct file *fp, u_long com, caddr_t data, struct ucred *ucred, struct sysmsg *msg) { #if 0 struct devfs_node *node; #endif struct vnode *vp; struct vnode *ovp; cdev_t dev; int error; struct fiodname_args *name_args; size_t namlen; const char *name; vp = ((struct vnode *)fp->f_data); if ((dev = vp->v_rdev) == NULL) return EBADF; /* device was revoked */ reference_dev(dev); #if 0 node = DEVFS_NODE(vp); #endif devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_fo_ioctl() called! for dev %s\n", dev->si_name); if (com == FIODTYPE) { *(int *)data = dev_dflags(dev) & D_TYPEMASK; error = 0; goto out; } else if (com == FIODNAME) { name_args = (struct fiodname_args *)data; name = dev->si_name; namlen = strlen(name) + 1; devfs_debug(DEVFS_DEBUG_DEBUG, "ioctl, got: FIODNAME for %s\n", name); if (namlen <= name_args->len) error = copyout(dev->si_name, name_args->name, namlen); else error = EINVAL; devfs_debug(DEVFS_DEBUG_DEBUG, "ioctl stuff: error: %d\n", error); goto out; } error = dev_dioctl(dev, com, data, fp->f_flag, ucred, msg, fp); #if 0 if (node) { nanotime(&node->atime); nanotime(&node->mtime); } #endif if (com == TIOCSCTTY) { devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_fo_ioctl: got TIOCSCTTY on %s\n", dev->si_name); } if (error == 0 && com == TIOCSCTTY) { struct proc *p = curthread->td_proc; struct session *sess; devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_fo_ioctl: dealing with TIOCSCTTY on %s\n", dev->si_name); if (p == NULL) { error = ENOTTY; goto out; } sess = p->p_session; /* * Do nothing if reassigning same control tty */ if (sess->s_ttyvp == vp) { error = 0; goto out; } /* * Get rid of reference to old control tty */ ovp = sess->s_ttyvp; vref(vp); sess->s_ttyvp = vp; if (ovp) vrele(ovp); } out: release_dev(dev); devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_fo_ioctl() finished! \n"); return (error); }
static int devfs_spec_close(struct vop_close_args *ap) { struct devfs_node *node; struct proc *p = curproc; struct vnode *vp = ap->a_vp; cdev_t dev = vp->v_rdev; int error = 0; int needrelock; /* * We do special tests on the opencount so unfortunately we need * an exclusive lock. */ vn_lock(vp, LK_UPGRADE | LK_RETRY); if (dev) devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() called on %s! \n", dev->si_name); else devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() called, null vode!\n"); /* * A couple of hacks for devices and tty devices. The * vnode ref count cannot be used to figure out the * last close, but we can use v_opencount now that * revoke works properly. * * Detect the last close on a controlling terminal and clear * the session (half-close). */ if (dev) reference_dev(dev); if (p && vp->v_opencount <= 1 && vp == p->p_session->s_ttyvp) { p->p_session->s_ttyvp = NULL; vrele(vp); } /* * Vnodes can be opened and closed multiple times. Do not really * close the device unless (1) it is being closed forcibly, * (2) the device wants to track closes, or (3) this is the last * vnode doing its last close on the device. * * XXX the VXLOCK (force close) case can leave vnodes referencing * a closed device. This might not occur now that our revoke is * fixed. */ devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() -1- \n"); if (dev && ((vp->v_flag & VRECLAIMED) || (dev_dflags(dev) & D_TRACKCLOSE) || (vp->v_opencount == 1))) { /* * Ugly pty magic, to make pty devices disappear again once * they are closed. */ node = DEVFS_NODE(ap->a_vp); if (node && (node->flags & DEVFS_PTY)) node->flags |= DEVFS_INVISIBLE; /* * Unlock around dev_dclose(), unless the vnode is * undergoing a vgone/reclaim (during umount). */ needrelock = 0; if ((vp->v_flag & VRECLAIMED) == 0 && vn_islocked(vp)) { needrelock = 1; vn_unlock(vp); } /* * WARNING! If the device destroys itself the devfs node * can disappear here. * * WARNING! vn_lock() will fail if the vp is in a VRECLAIM, * which can occur during umount. */ error = dev_dclose(dev, ap->a_fflag, S_IFCHR, ap->a_fp); /* node is now stale */ if (needrelock) { if (vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_FAILRECLAIM) != 0) { panic("devfs_spec_close: vnode %p " "unexpectedly could not be relocked", vp); } } } else { error = 0; } devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() -2- \n"); /* * Track the actual opens and closes on the vnode. The last close * disassociates the rdev. If the rdev is already disassociated or * the opencount is already 0, the vnode might have been revoked * and no further opencount tracking occurs. */ if (dev) release_dev(dev); if (vp->v_opencount > 0) vop_stdclose(ap); return(error); }