static int vnopen(dev_t dev, int flags, __unused int devtype, __unused proc_t p) { struct vn_softc *vn; int unit; unit = vnunit(dev); if (vnunit(dev) >= NVNDEVICE) { return (ENXIO); } vn = vn_table + unit; if ((flags & FWRITE) && (vn->sc_flags & VNF_READONLY)) return (EACCES); return(0); }
static int vnsize(dev_t dev) { int secsize; struct vn_softc *vn; int unit; unit = vnunit(dev); if (vnunit(dev) >= NVNDEVICE) { return (-1); } vn = vn_table + unit; if ((vn->sc_flags & VNF_INITED) == 0) secsize = -1; else secsize = vn->sc_secsize; return (secsize); }
static int vnsize(dev_t dev) { int secsize; struct vn_softc *vn; int unit; boolean_t funnel_state; unit = vnunit(dev); if (vnunit(dev) >= NVNDEVICE) { return (-1); } funnel_state = thread_funnel_set(kernel_flock, TRUE); vn = vn_table + unit; if ((vn->sc_flags & VNF_INITED) == 0) secsize = -1; else secsize = vn->sc_secsize; (void) thread_funnel_set(kernel_flock, funnel_state); return (secsize); }
/* ARGSUSED */ static int vnioctl(dev_t dev, u_long cmd, caddr_t data, __unused int flag, proc_t p, int is_char) { struct vn_softc *vn; struct vn_ioctl_64 *viop; int error; u_int32_t *f; u_int64_t * o; int unit; struct vfsioattr ioattr; struct vn_ioctl_64 user_vnio; struct vfs_context context; unit = vnunit(dev); if (vnunit(dev) >= NVNDEVICE) { return (ENXIO); } vn = vn_table + unit; error = proc_suser(p); if (error) { goto done; } context.vc_thread = current_thread(); context.vc_ucred = vn->sc_cred; viop = (struct vn_ioctl_64 *)data; f = (u_int32_t *)data; o = (u_int64_t *)data; switch (cmd) { #ifdef __LP64__ case VNIOCDETACH32: case VNIOCDETACH: #else case VNIOCDETACH: case VNIOCDETACH64: #endif case DKIOCGETBLOCKSIZE: case DKIOCSETBLOCKSIZE: case DKIOCGETMAXBLOCKCOUNTREAD: case DKIOCGETMAXBLOCKCOUNTWRITE: case DKIOCGETMAXSEGMENTCOUNTREAD: case DKIOCGETMAXSEGMENTCOUNTWRITE: case DKIOCGETMAXSEGMENTBYTECOUNTREAD: case DKIOCGETMAXSEGMENTBYTECOUNTWRITE: case DKIOCGETBLOCKCOUNT: case DKIOCGETBLOCKCOUNT32: if ((vn->sc_flags & VNF_INITED) == 0) { error = ENXIO; goto done; } break; default: break; } if (vn->sc_vp != NULL) vfs_ioattr(vnode_mount(vn->sc_vp), &ioattr); else bzero(&ioattr, sizeof(ioattr)); switch (cmd) { case DKIOCISVIRTUAL: *f = 1; break; case DKIOCGETMAXBLOCKCOUNTREAD: *o = ioattr.io_maxreadcnt / vn->sc_secsize; break; case DKIOCGETMAXBLOCKCOUNTWRITE: *o = ioattr.io_maxwritecnt / vn->sc_secsize; break; case DKIOCGETMAXBYTECOUNTREAD: *o = ioattr.io_maxreadcnt; break; case DKIOCGETMAXBYTECOUNTWRITE: *o = ioattr.io_maxwritecnt; break; case DKIOCGETMAXSEGMENTCOUNTREAD: *o = ioattr.io_segreadcnt; break; case DKIOCGETMAXSEGMENTCOUNTWRITE: *o = ioattr.io_segwritecnt; break; case DKIOCGETMAXSEGMENTBYTECOUNTREAD: *o = ioattr.io_maxsegreadsize; break; case DKIOCGETMAXSEGMENTBYTECOUNTWRITE: *o = ioattr.io_maxsegwritesize; break; case DKIOCGETBLOCKSIZE: *f = vn->sc_secsize; break; case DKIOCSETBLOCKSIZE: if (is_char) { /* can only set block size on block device */ error = ENODEV; break; } if (*f < DEV_BSIZE) { error = EINVAL; break; } if (vn->sc_shadow_vp != NULL) { if (*f == (unsigned)vn->sc_secsize) { break; } /* can't change the block size if already shadowing */ error = EBUSY; break; } vn->sc_secsize = *f; /* recompute the size in terms of the new blocksize */ vn->sc_size = vn->sc_fsize / vn->sc_secsize; break; case DKIOCISWRITABLE: *f = 1; break; case DKIOCGETBLOCKCOUNT32: *f = vn->sc_size; break; case DKIOCGETBLOCKCOUNT: *o = vn->sc_size; break; #ifdef __LP64__ case VNIOCSHADOW32: case VNIOCSHADOW: #else case VNIOCSHADOW: case VNIOCSHADOW64: #endif if (vn->sc_shadow_vp != NULL) { error = EBUSY; break; } if (vn->sc_vp == NULL) { /* much be attached before we can shadow */ error = EINVAL; break; } if (!proc_is64bit(p)) { /* downstream code expects LP64 version of vn_ioctl structure */ vn_ioctl_to_64((struct vn_ioctl_32 *)viop, &user_vnio); viop = &user_vnio; } if (viop->vn_file == USER_ADDR_NULL) { error = EINVAL; break; } error = vniocattach_shadow(vn, viop, dev, 0, p); break; #ifdef __LP64__ case VNIOCATTACH32: case VNIOCATTACH: #else case VNIOCATTACH: case VNIOCATTACH64: #endif if (is_char) { /* attach only on block device */ error = ENODEV; break; } if (vn->sc_flags & VNF_INITED) { error = EBUSY; break; } if (!proc_is64bit(p)) { /* downstream code expects LP64 version of vn_ioctl structure */ vn_ioctl_to_64((struct vn_ioctl_32 *)viop, &user_vnio); viop = &user_vnio; } if (viop->vn_file == USER_ADDR_NULL) { error = EINVAL; break; } error = vniocattach_file(vn, viop, dev, 0, p); break; #ifdef __LP64__ case VNIOCDETACH32: case VNIOCDETACH: #else case VNIOCDETACH: case VNIOCDETACH64: #endif if (is_char) { /* detach only on block device */ error = ENODEV; break; } /* Note: spec_open won't open a mounted block device */ /* * XXX handle i/o in progress. Return EBUSY, or wait, or * flush the i/o. * XXX handle multiple opens of the device. Return EBUSY, * or revoke the fd's. * How are these problems handled for removable and failing * hardware devices? (Hint: They are not) */ vnclear(vn, &context); break; case VNIOCGSET: vn_options |= *f; *f = vn_options; break; case VNIOCGCLEAR: vn_options &= ~(*f); *f = vn_options; break; case VNIOCUSET: vn->sc_options |= *f; *f = vn->sc_options; break; case VNIOCUCLEAR: vn->sc_options &= ~(*f); *f = vn->sc_options; break; default: error = ENOTTY; break; } done: return(error); }
static void vnstrategy(struct buf *bp) { struct vn_softc *vn; int error = 0; long sz; /* in sc_secsize chunks */ daddr64_t blk_num; struct vnode * shadow_vp = NULL; struct vnode * vp = NULL; struct vfs_context context; vn = vn_table + vnunit(buf_device(bp)); if ((vn->sc_flags & VNF_INITED) == 0) { error = ENXIO; goto done; } context.vc_thread = current_thread(); context.vc_ucred = vn->sc_cred; buf_setresid(bp, buf_count(bp)); /* * Check for required alignment. Transfers must be a valid * multiple of the sector size. */ blk_num = buf_blkno(bp); if (buf_count(bp) % vn->sc_secsize != 0) { error = EINVAL; goto done; } sz = howmany(buf_count(bp), vn->sc_secsize); /* * If out of bounds return an error. If at the EOF point, * simply read or write less. */ if (blk_num >= 0 && (u_int64_t)blk_num >= vn->sc_size) { if (blk_num > 0 && (u_int64_t)blk_num > vn->sc_size) { error = EINVAL; } goto done; } /* * If the request crosses EOF, truncate the request. */ if ((blk_num + sz) > 0 && ((u_int64_t)(blk_num + sz)) > vn->sc_size) { buf_setcount(bp, (vn->sc_size - blk_num) * vn->sc_secsize); buf_setresid(bp, buf_count(bp)); } vp = vn->sc_vp; if (vp == NULL) { error = ENXIO; goto done; } error = vnode_getwithvid(vp, vn->sc_vid); if (error != 0) { /* the vnode is no longer available, abort */ error = ENXIO; vnclear(vn, &context); goto done; } shadow_vp = vn->sc_shadow_vp; if (shadow_vp != NULL) { error = vnode_getwithvid(shadow_vp, vn->sc_shadow_vid); if (error != 0) { /* the vnode is no longer available, abort */ error = ENXIO; vnode_put(vn->sc_vp); vnclear(vn, &context); goto done; } } error = vn_readwrite_io(vn, bp, &context); vnode_put(vp); if (shadow_vp != NULL) { vnode_put(shadow_vp); } done: if (error) { buf_seterror(bp, error); } buf_biodone(bp); return; }
static int vnwrite(dev_t dev, struct uio *uio, int ioflag) { struct vfs_context context; int error; off_t offset; proc_t p; user_ssize_t resid; struct vn_softc * vn; int unit; unit = vnunit(dev); if (vnunit(dev) >= NVNDEVICE) { return (ENXIO); } p = current_proc(); vn = vn_table + unit; if ((vn->sc_flags & VNF_INITED) == 0) { error = ENXIO; goto done; } if (vn->sc_flags & VNF_READONLY) { error = EROFS; goto done; } context.vc_thread = current_thread(); context.vc_ucred = vn->sc_cred; error = vnode_getwithvid(vn->sc_vp, vn->sc_vid); if (error != 0) { /* the vnode is no longer available, abort */ error = ENXIO; vnclear(vn, &context); goto done; } resid = uio_resid(uio); offset = uio_offset(uio); /* * If out of bounds return an error. If at the EOF point, * simply write less. */ if (offset >= (off_t)vn->sc_fsize) { if (offset > (off_t)vn->sc_fsize) { error = EINVAL; } goto done; } /* * If the request crosses EOF, truncate the request. */ if ((offset + resid) > (off_t)vn->sc_fsize) { resid = (off_t)vn->sc_fsize - offset; uio_setresid(uio, resid); } if (vn->sc_shadow_vp != NULL) { error = vnode_getwithvid(vn->sc_shadow_vp, vn->sc_shadow_vid); if (error != 0) { /* the vnode is no longer available, abort */ error = ENXIO; vnode_put(vn->sc_vp); vnclear(vn, &context); goto done; } error = vnwrite_shadow(vn, uio, ioflag, &context); vnode_put(vn->sc_shadow_vp); } else { error = VNOP_WRITE(vn->sc_vp, uio, ioflag, &context); } vnode_put(vn->sc_vp); done: return (error); }