/* ARGSUSED */ static int auto_open(vnode_t **vpp, int flag, cred_t *cred, caller_context_t *ct) { vnode_t *newvp; int error; AUTOFS_DPRINT((4, "auto_open: *vpp=%p\n", (void *)*vpp)); error = auto_trigger_mount(*vpp, cred, &newvp); if (error) goto done; if (newvp != NULL) { /* * Node is now mounted on. */ VN_RELE(*vpp); *vpp = newvp; error = VOP_ACCESS(*vpp, VREAD, 0, cred, ct); if (!error) error = VOP_OPEN(vpp, flag, cred, ct); } done: AUTOFS_DPRINT((5, "auto_open: *vpp=%p error=%d\n", (void *)*vpp, error)); return (error); }
/* * Enable an EA using the passed filesystem, backing vnode, attribute name, * namespace, and proc. Will perform a VOP_OPEN() on the vp, so expects vp * to be locked when passed in. The vnode will be returned unlocked, * regardless of success/failure of the function. As a result, the caller * will always need to vrele(), but not vput(). */ static int ufs_extattr_enable_with_open(struct ufsmount *ump, struct vnode *vp, int attrnamespace, const char *attrname, struct thread *td) { int error; error = VOP_OPEN(vp, FREAD|FWRITE, td->td_ucred, td, NULL); if (error) { printf("ufs_extattr_enable_with_open.VOP_OPEN(): failed " "with %d\n", error); VOP_UNLOCK(vp, 0); return (error); } error = VOP_ADD_WRITECOUNT(vp, 1); if (error != 0) { VOP_CLOSE(vp, FREAD | FWRITE, td->td_ucred, td); VOP_UNLOCK(vp, 0); return (error); } CTR3(KTR_VFS, "%s: vp %p v_writecount increased to %d", __func__, vp, vp->v_writecount); vref(vp); VOP_UNLOCK(vp, 0); error = ufs_extattr_enable(ump, attrnamespace, attrname, vp, td); if (error != 0) vn_close(vp, FREAD|FWRITE, td->td_ucred, td); return (error); }
/*ARGSUSED*/ int cttyopen(dev_t dev, int flag, int mode, struct proc *p) { struct vnode *ttyvp = cttyvp(p); int error; if (ttyvp == NULL) return (ENXIO); vn_lock(ttyvp, LK_EXCLUSIVE | LK_RETRY, p); #ifdef PARANOID /* * Since group is tty and mode is 620 on most terminal lines * and since sessions protect terminals from processes outside * your session, this check is probably no longer necessary. * Since it inhibits setuid root programs that later switch * to another user from accessing /dev/tty, we have decided * to delete this test. (mckusick 5/93) */ error = VOP_ACCESS(ttyvp, (flag&FREAD ? VREAD : 0) | (flag&FWRITE ? VWRITE : 0), p->p_ucred, p); if (!error) #endif /* PARANOID */ error = VOP_OPEN(ttyvp, flag, NOCRED, p); VOP_UNLOCK(ttyvp, 0, p); return (error); }
/* * This opens /dev/tty. Because multiple opens of /dev/tty only * generate a single open to the actual tty, the file modes are * locked to FREAD|FWRITE. */ static int cttyopen(struct dev_open_args *ap) { struct proc *p = curproc; struct vnode *ttyvp; int error; KKASSERT(p); retry: if ((ttyvp = cttyvp(p)) == NULL) return (ENXIO); if (ttyvp->v_flag & VCTTYISOPEN) return (0); /* * Messy interlock, don't let the vnode go away while we try to * lock it and check for race after we might have blocked. */ vhold(ttyvp); vn_lock(ttyvp, LK_EXCLUSIVE | LK_RETRY); if (ttyvp != cttyvp(p) || (ttyvp->v_flag & VCTTYISOPEN)) { kprintf("Warning: cttyopen: race avoided\n"); vn_unlock(ttyvp); vdrop(ttyvp); goto retry; } vsetflags(ttyvp, VCTTYISOPEN); error = VOP_OPEN(ttyvp, FREAD|FWRITE, ap->a_cred, NULL); if (error) vclrflags(ttyvp, VCTTYISOPEN); vn_unlock(ttyvp); vdrop(ttyvp); return(error); }
/* * Enable an EA using the passed filesystem, backing vnode, attribute name, * namespace, and proc. Will perform a VOP_OPEN() on the vp, so expects vp * to be locked when passed in. The vnode will be returned unlocked, * regardless of success/failure of the function. As a result, the caller * will always need to vrele(), but not vput(). */ static int ufs_extattr_enable_with_open(struct myfs_ufsmount *ump, struct vnode *vp, int attrnamespace, const char *attrname, struct thread *td) { int error; error = VOP_OPEN(vp, FREAD|FWRITE, td->td_ucred, td, NULL); if (error) { printf("ufs_extattr_enable_with_open.VOP_OPEN(): failed " "with %d\n", error); VOP_UNLOCK(vp, 0); return (error); } vp->v_writecount++; vref(vp); VOP_UNLOCK(vp, 0); error = ufs_extattr_enable(ump, attrnamespace, attrname, vp, td); if (error != 0) vn_close(vp, FREAD|FWRITE, td->td_ucred, td); return (error); }
/* * New Leaf driver open entry point. We make a vnode and go through specfs * in order to obtain open close exclusions guarantees. Note that we drop * OTYP_LYR if it was specified - we are going through specfs and it provides * last close semantics (FKLYR is provided to open(9E)). Also, since * spec_open will drive attach via e_ddi_hold_devi_by_dev for a makespecvp * vnode with no SDIP_SET on the common snode, the dev_lopen caller no longer * needs to call ddi_hold_installed_driver. */ int dev_lopen(dev_t *devp, int flag, int otype, struct cred *cred) { struct vnode *vp; int error; struct vnode *cvp; vp = makespecvp(*devp, (otype == OTYP_BLK) ? VBLK : VCHR); error = VOP_OPEN(&vp, flag | FKLYR, cred, NULL); if (error == 0) { /* Pick up the (possibly) new dev_t value. */ *devp = vp->v_rdev; /* * Place extra hold on the common vnode, which contains the * open count, so that it is not destroyed by the VN_RELE of * the shadow makespecvp vnode below. */ cvp = STOV(VTOCS(vp)); VN_HOLD(cvp); } /* release the shadow makespecvp vnode. */ VN_RELE(vp); return (error); }
static int iso_mountroot(struct mount *mp) { struct iso_args args; struct vnode *rootvp; int error; if ((error = bdevvp(rootdev, &rootvp))) { kprintf("iso_mountroot: can't find rootvp\n"); return (error); } args.flags = ISOFSMNT_ROOT; vn_lock(rootvp, LK_EXCLUSIVE | LK_RETRY); error = VOP_OPEN(rootvp, FREAD, FSCRED, NULL); vn_unlock(rootvp); if (error) return (error); args.ssector = iso_get_ssector(rootdev); vn_lock(rootvp, LK_EXCLUSIVE | LK_RETRY); VOP_CLOSE(rootvp, FREAD, NULL); vn_unlock(rootvp); if (bootverbose) kprintf("iso_mountroot(): using session at block %d\n", args.ssector); if ((error = iso_mountfs(rootvp, mp, &args)) != 0) return (error); cd9660_statfs(mp, &mp->mnt_stat, proc0.p_ucred); return (0); }
static int v7fs_openfs(struct vnode *devvp, struct mount *mp, struct lwp *l) { kauth_cred_t cred = l->l_cred; int oflags; int error; /* Flush buffer */ vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); if ((error = vinvalbuf(devvp, V_SAVE, cred, l, 0, 0))) goto unlock_exit; /* Open block device */ oflags = FREAD; if ((mp->mnt_flag & MNT_RDONLY) == 0) oflags |= FWRITE; if ((error = VOP_OPEN(devvp, oflags, NOCRED)) != 0) { DPRINTF("VOP_OPEN=%d\n", error); goto unlock_exit; } return 0; /* lock held */ unlock_exit: VOP_UNLOCK(devvp); return error; }
/* * This opens and closes the appropriate device with minor number - * hopefully, it will cause the driver to attach and register a controller * with us */ static vnode_t * rsmops_device_open(const char *major_name, const minor_t minor_num) { major_t maj; vnode_t *vp; int ret; if (minor_num == (minor_t)-1) { return (NULL); } maj = ddi_name_to_major((char *)major_name); if (maj == (major_t)-1) { return (NULL); } vp = makespecvp(makedevice(maj, minor_num), VCHR); ret = VOP_OPEN(&vp, FREAD|FWRITE, CRED(), NULL); if (ret == 0) { return (vp); } else { VN_RELE(vp); return (NULL); } }
/* * union_open: * * run open VOP. When opening the underlying vnode we have to mimic * vn_open. What we *really* need to do to avoid screwups if the * open semantics change is to call vn_open(). For example, ufs blows * up if you open a file but do not vmio it prior to writing. * * union_open(struct vnode *a_vp, int a_mode, * struct ucred *a_cred, struct thread *a_td) */ static int union_open(struct vop_open_args *ap) { struct union_node *un = VTOUNION(ap->a_vp); struct vnode *tvp; int mode = ap->a_mode; struct ucred *cred = ap->a_cred; struct thread *td = ap->a_td; int error = 0; int tvpisupper = 1; /* * If there is an existing upper vp then simply open that. * The upper vp takes precedence over the lower vp. When opening * a lower vp for writing copy it to the uppervp and then open the * uppervp. * * At the end of this section tvp will be left locked. */ if ((tvp = union_lock_upper(un, td)) == NULLVP) { /* * If the lower vnode is being opened for writing, then * copy the file contents to the upper vnode and open that, * otherwise can simply open the lower vnode. */ tvp = un->un_lowervp; if ((ap->a_mode & FWRITE) && (tvp->v_type == VREG)) { int docopy = !(mode & O_TRUNC); error = union_copyup(un, docopy, cred, td); tvp = union_lock_upper(un, td); } else { un->un_openl++; vref(tvp); vn_lock(tvp, LK_EXCLUSIVE | LK_RETRY); tvpisupper = 0; } } /* * We are holding the correct vnode, open it. Note * that in DragonFly, VOP_OPEN is responsible for associating * a VM object with the vnode if the vnode is mappable or the * underlying filesystem uses buffer cache calls on it. */ if (error == 0) error = VOP_OPEN(tvp, mode, cred, NULL); /* * Release any locks held */ if (tvpisupper) { if (tvp) union_unlock_upper(tvp, td); } else { vput(tvp); } return (error); }
static int unionfs_advlock(void *v) { struct vop_advlock_args *ap = v; int error; struct unionfs_node *unp; struct unionfs_node_status *unsp; struct vnode *vp; struct vnode *uvp; kauth_cred_t cred; UNIONFS_INTERNAL_DEBUG("unionfs_advlock: enter\n"); vp = ap->a_vp; cred = kauth_cred_get(); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); unp = VTOUNIONFS(ap->a_vp); uvp = unp->un_uppervp; if (uvp == NULLVP) { error = unionfs_copyfile(unp, 1, cred); if (error != 0) goto unionfs_advlock_abort; uvp = unp->un_uppervp; unionfs_get_node_status(unp, &unsp); if (unsp->uns_lower_opencnt > 0) { /* try reopen the vnode */ error = VOP_OPEN(uvp, unsp->uns_lower_openmode, cred); if (error) goto unionfs_advlock_abort; unsp->uns_upper_opencnt++; VOP_CLOSE(unp->un_lowervp, unsp->uns_lower_openmode, cred); unsp->uns_lower_opencnt--; } else unionfs_tryrem_node_status(unp, unsp); } VOP_UNLOCK(vp); error = VOP_ADVLOCK(uvp, ap->a_id, ap->a_op, ap->a_fl, ap->a_flags); UNIONFS_INTERNAL_DEBUG("unionfs_advlock: leave (%d)\n", error); return error; unionfs_advlock_abort: VOP_UNLOCK(vp); UNIONFS_INTERNAL_DEBUG("unionfs_advlock: leave (%d)\n", error); return error; }
int cnopen(dev_t dev, int flag, int mode, struct lwp *l) { dev_t cndev; int unit, error; unit = minor(dev); if (unit > 1) return ENODEV; if (cn_tab == NULL) return (0); /* * always open the 'real' console device, so we don't get nailed * later. This follows normal device semantics; they always get * open() calls. */ cndev = cn_tab->cn_dev; #if NNULLCONS > 0 if (cndev == NODEV) { nullconsattach(0); } #else /* NNULLCONS > 0 */ if (cndev == NODEV) { /* * This is most likely an error in the console attach * code. Panicking looks better than jumping into nowhere * through cdevsw below.... */ panic("cnopen: no console device"); } #endif /* NNULLCONS > 0 */ if (dev == cndev) { /* * This causes cnopen() to be called recursively, which * is generally a bad thing. It is often caused when * dev == 0 and cn_dev has not been set, but was probably * initialised to 0. */ panic("cnopen: cn_tab->cn_dev == dev"); } if (cn_devvp[unit] != NULLVP) return 0; if ((error = cdevvp(cndev, &cn_devvp[unit])) != 0) printf("cnopen: unable to get vnode reference\n"); error = vn_lock(cn_devvp[unit], LK_EXCLUSIVE | LK_RETRY); if (error == 0) { error = VOP_OPEN(cn_devvp[unit], flag, kauth_cred_get()); VOP_UNLOCK(cn_devvp[unit]); } return error; }
/* * This opens /dev/tty. Because multiple opens of /dev/tty only * generate a single open to the actual tty, the file modes are * locked to FREAD|FWRITE. */ static int cttyopen(struct dev_open_args *ap) { struct proc *p = curproc; struct vnode *ttyvp; int error; KKASSERT(p); retry: if ((ttyvp = cttyvp(p)) == NULL) return (ENXIO); if (ttyvp->v_flag & VCTTYISOPEN) return (0); /* * Messy interlock, don't let the vnode go away while we try to * lock it and check for race after we might have blocked. * * WARNING! The device open (devfs_spec_open()) temporarily * releases the vnode lock on ttyvp when issuing the * dev_dopen(), which means that the VCTTYISOPEn flag * can race during the VOP_OPEN(). * * If something does race we have to undo our potentially * extra open. */ vhold(ttyvp); vn_lock(ttyvp, LK_EXCLUSIVE | LK_RETRY); if (ttyvp != cttyvp(p) || (ttyvp->v_flag & VCTTYISOPEN)) { kprintf("Warning: cttyopen: race-1 avoided\n"); vn_unlock(ttyvp); vdrop(ttyvp); goto retry; } error = VOP_OPEN(ttyvp, FREAD|FWRITE, ap->a_cred, NULL); /* * Race against ctty close or change. This case has been validated * and occurs every so often during synth builds. */ if (ttyvp != cttyvp(p) || (ttyvp->v_flag & VCTTYISOPEN)) { if (error == 0) VOP_CLOSE(ttyvp, FREAD|FWRITE, NULL); vn_unlock(ttyvp); vdrop(ttyvp); goto retry; } if (error == 0) vsetflags(ttyvp, VCTTYISOPEN); vn_unlock(ttyvp); vdrop(ttyvp); return(error); }
int RUMP_VOP_OPEN(struct vnode *vp, int mode, struct kauth_cred *cred) { int error; rump_schedule(); error = VOP_OPEN(vp, mode, cred); rump_unschedule(); return error; }
int cttyopen(dev_t dev, int flag, int mode, struct proc *p) { struct vnode *ttyvp = cttyvp(p); int error; if (ttyvp == NULL) return (ENXIO); vn_lock(ttyvp, LK_EXCLUSIVE | LK_RETRY, p); error = VOP_OPEN(ttyvp, flag, NOCRED); VOP_UNLOCK(ttyvp, 0); return (error); }
STATIC int xfs_file_open( struct inode *inode, struct file *filp) { vnode_t *vp = vn_from_inode(inode); int error; if (!(filp->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS) return -EFBIG; VOP_OPEN(vp, NULL, error); return -error; }
STATIC int linvfs_open( struct inode *inode, struct file *filp) { vnode_t *vp = LINVFS_GET_VP(inode); vnode_t *newvp; int error; ASSERT(vp); VOP_OPEN(vp, &newvp, 0, get_current_cred(), error); return -error; }
STATIC int linvfs_open( struct inode *inode, struct file *filp) { vnode_t *vp = LINVFS_GET_VP(inode); int error; if (!(filp->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS) return -EFBIG; ASSERT(vp); VOP_OPEN(vp, NULL, error); return -error; }
/* * Lookup the provided name in the filesystem. If the file exists, * is a valid block device, and isn't being used by anyone else, * set *vpp to the file's vnode. */ int dk_lookup(struct pathbuf *pb, struct lwp *l, struct vnode **vpp) { struct nameidata nd; struct vnode *vp; int error; if (l == NULL) return ESRCH; /* Is ESRCH the best choice? */ NDINIT(&nd, LOOKUP, FOLLOW, pb); if ((error = vn_open(&nd, FREAD | FWRITE, 0)) != 0) { DPRINTF((DKDB_FOLLOW|DKDB_INIT), ("%s: vn_open error = %d\n", __func__, error)); return error; } vp = nd.ni_vp; if (vp->v_type != VBLK) { error = ENOTBLK; goto out; } /* Reopen as anonymous vnode to protect against forced unmount. */ if ((error = bdevvp(vp->v_rdev, vpp)) != 0) goto out; VOP_UNLOCK(vp); if ((error = vn_close(vp, FREAD | FWRITE, l->l_cred)) != 0) { vrele(*vpp); return error; } if ((error = VOP_OPEN(*vpp, FREAD | FWRITE, l->l_cred)) != 0) { vrele(*vpp); return error; } mutex_enter((*vpp)->v_interlock); (*vpp)->v_writecount++; mutex_exit((*vpp)->v_interlock); IFDEBUG(DKDB_VNODE, vprint("dk_lookup: vnode info", *vpp)); return 0; out: VOP_UNLOCK(vp); (void) vn_close(vp, FREAD | FWRITE, l->l_cred); return error; }
int vnode_fop_open( INODE_T *ino_p, FILE_T *file_p ) { int status = 0; VNODE_T *avp; VNODE_T *vp; CALL_DATA_T cd; /* No asserts on BKL; locking protocol is changing */ ASSERT(MDKI_INOISOURS(ino_p)); if (!MDKI_INOISMVFS(ino_p)) { MDKI_VFS_LOG(VFS_LOG_ERR, "%s shouldn't be called on shadow?" " (files swapped at open): vp %p fp %p\n", __func__, ino_p, file_p); return -ENOSYS; } if ((status = generic_file_open(ino_p, file_p))) { return status; } avp = ITOV(ino_p); vp = avp; mdki_linux_init_call_data(&cd); status = VOP_OPEN(&vp, vnlayer_filep_to_flags(file_p), &cd, (file_ctx *)file_p); status = mdki_errno_unix_to_linux(status); mdki_linux_destroy_call_data(&cd); MDKI_TRACE(TRACE_OPEN, "%s opened vp=%p fp=%p pvt=%p pcnt=%ld\n", __func__, vp, file_p, REALFILE(file_p), REALFILE(file_p) ? (long)F_COUNT(REALFILE(file_p)) : 0); if (avp != vp) { printk("switcheroo on open? %p became %p\n", avp, vp); /* XXX */ BUG(); } return status; }
static int hammer_setup_device(struct vnode **devvpp, const char *dev_path, int ronly) { int error; struct nlookupdata nd; /* * Get the device vnode */ if (*devvpp == NULL) { error = nlookup_init(&nd, dev_path, UIO_SYSSPACE, NLC_FOLLOW); if (error == 0) error = nlookup(&nd); if (error == 0) error = cache_vref(&nd.nl_nch, nd.nl_cred, devvpp); nlookup_done(&nd); } else { error = 0; } if (error == 0) { if (vn_isdisk(*devvpp, &error)) { error = vfs_mountedon(*devvpp); } } if (error == 0 && vcount(*devvpp) > 0) error = EBUSY; if (error == 0) { vn_lock(*devvpp, LK_EXCLUSIVE | LK_RETRY); error = vinvalbuf(*devvpp, V_SAVE, 0, 0); if (error == 0) { error = VOP_OPEN(*devvpp, (ronly ? FREAD : FREAD|FWRITE), FSCRED, NULL); } vn_unlock(*devvpp); } if (error && *devvpp) { vrele(*devvpp); *devvpp = NULL; } return (error); }
/* * Enable an EA using the passed file system, backing vnode, attribute name, * namespace, and proc. Will perform a VOP_OPEN() on the vp, so expects vp * to be locked when passed in. The vnode will be returned unlocked, * regardless of success/failure of the function. As a result, the caller * will always need to vrele(), but not vput(). */ static int ufs_extattr_enable_with_open(struct ufsmount *ump, struct vnode *vp, int attrnamespace, const char *attrname, struct proc *p) { int error; error = VOP_OPEN(vp, FREAD|FWRITE, p->p_ucred, p); if (error) { printf("ufs_extattr_enable_with_open.VOP_OPEN(): failed " "with %d\n", error); VOP_UNLOCK(vp, 0, p); return (error); } #if 0 /* - XXX */ /* * XXX: Note, should VOP_CLOSE() if vfs_object_create() fails, but due * to a similar piece of code in vn_open(), we don't. */ if (vn_canvmio(vp) == TRUE) if ((error = vfs_object_create(vp, p, p->p_ucred)) != 0) { /* * XXX: bug replicated from vn_open(): should * VOP_CLOSE() here. */ VOP_UNLOCK(vp, 0, p); return (error); } #endif vp->v_writecount++; vref(vp); VOP_UNLOCK(vp, 0, p); error = ufs_extattr_enable(ump, attrnamespace, attrname, vp, p); if (error != 0) vn_close(vp, FREAD|FWRITE, p->p_ucred, p); return (error); }
/* * Hacked up version of vn_open. We _only_ handle ptys and only open * them with FREAD|FWRITE and never deal with creat or stuff like that. * * We need it because we have to fake up root credentials to open the pty. */ static int ptm_vn_open(struct nameidata *ndp) { struct proc *p = ndp->ni_cnd.cn_proc; struct ucred *cred; struct vattr vattr; struct vnode *vp; int error; if ((error = namei(ndp)) != 0) return (error); vp = ndp->ni_vp; if (vp->v_type != VCHR) { error = EINVAL; goto bad; } /* * Get us a fresh cred with root privileges. */ cred = crget(); error = VOP_OPEN(vp, FREAD|FWRITE, cred, p); if (!error) { /* update atime/mtime */ VATTR_NULL(&vattr); getnanotime(&vattr.va_atime); vattr.va_mtime = vattr.va_atime; vattr.va_vaflags |= VA_UTIMES_NULL; (void)VOP_SETATTR(vp, &vattr, p->p_ucred, p); } crfree(cred); if (error) goto bad; vp->v_writecount++; return (0); bad: vput(vp); return (error); }
/* * Hacked up version of vn_open. We _only_ handle ptys and only open * them with FREAD|FWRITE and never deal with creat or stuff like that. * * We need it because we have to fake up root credentials to open the pty. */ int pty_vn_open(struct vnode *vp, struct lwp *l) { int error; if (vp->v_type != VCHR) { vput(vp); return EINVAL; } error = VOP_OPEN(vp, FREAD|FWRITE, lwp0.l_cred); if (error) { vput(vp); return error; } vp->v_writecount++; return 0; }
int vn_opendisk(const char *devname, int fmode, struct vnode **vpp) { struct vnode *vp; int error; if (strncmp(devname, "/dev/", 5) == 0) devname += 5; if ((vp = getsynthvnode(devname)) == NULL) { error = ENODEV; } else { error = VOP_OPEN(vp, fmode, proc0.p_ucred, NULL); vn_unlock(vp); if (error) { vrele(vp); vp = NULL; } } *vpp = vp; return (error); }
/* ARGSUSED */ static int xattr_dir_open(vnode_t **vpp, int flags, cred_t *cr, caller_context_t *ct) { vnode_t *realvp; int error; if (flags & FWRITE) { return (EACCES); } /* * If there is a real extended attribute directory, * let the underlying FS see the VOP_OPEN call; * otherwise just return zero. */ error = xattr_dir_realdir(*vpp, &realvp, LOOKUP_XATTR, cr, ct); if (error == 0) { error = VOP_OPEN(&realvp, flags, cr, ct); } else { error = 0; } return (error); }
int msdosfs_mountfs(struct vnode *devvp, struct mount *mp, struct proc *p, struct msdosfs_args *argp) { struct msdosfsmount *pmp; struct buf *bp; dev_t dev = devvp->v_rdev; union bootsector *bsp; struct byte_bpb33 *b33; struct byte_bpb50 *b50; struct byte_bpb710 *b710; extern struct vnode *rootvp; u_int8_t SecPerClust; int ronly, error, bmapsiz; uint32_t fat_max_clusters; /* * Disallow multiple mounts of the same device. * Disallow mounting of a device that is currently in use * (except for root, which might share swap device for miniroot). * Flush out any old buffers remaining from a previous use. */ if ((error = vfs_mountedon(devvp)) != 0) return (error); if (vcount(devvp) > 1 && devvp != rootvp) return (EBUSY); vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p); error = vinvalbuf(devvp, V_SAVE, p->p_ucred, p, 0, 0); VOP_UNLOCK(devvp, 0, p); if (error) return (error); ronly = (mp->mnt_flag & MNT_RDONLY) != 0; error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, p); if (error) return (error); bp = NULL; /* both used in error_exit */ pmp = NULL; /* * Read the boot sector of the filesystem, and then check the * boot signature. If not a dos boot sector then error out. */ if ((error = bread(devvp, 0, 4096, NOCRED, &bp)) != 0) goto error_exit; bp->b_flags |= B_AGE; bsp = (union bootsector *)bp->b_data; b33 = (struct byte_bpb33 *)bsp->bs33.bsBPB; b50 = (struct byte_bpb50 *)bsp->bs50.bsBPB; b710 = (struct byte_bpb710 *)bsp->bs710.bsPBP; pmp = malloc(sizeof *pmp, M_MSDOSFSMNT, M_WAITOK | M_ZERO); pmp->pm_mountp = mp; /* * Compute several useful quantities from the bpb in the * bootsector. Copy in the dos 5 variant of the bpb then fix up * the fields that are different between dos 5 and dos 3.3. */ SecPerClust = b50->bpbSecPerClust; pmp->pm_BytesPerSec = getushort(b50->bpbBytesPerSec); pmp->pm_ResSectors = getushort(b50->bpbResSectors); pmp->pm_FATs = b50->bpbFATs; pmp->pm_RootDirEnts = getushort(b50->bpbRootDirEnts); pmp->pm_Sectors = getushort(b50->bpbSectors); pmp->pm_FATsecs = getushort(b50->bpbFATsecs); pmp->pm_SecPerTrack = getushort(b50->bpbSecPerTrack); pmp->pm_Heads = getushort(b50->bpbHeads); pmp->pm_Media = b50->bpbMedia; /* Determine the number of DEV_BSIZE blocks in a MSDOSFS sector */ pmp->pm_BlkPerSec = pmp->pm_BytesPerSec / DEV_BSIZE; if (!pmp->pm_BytesPerSec || !SecPerClust || pmp->pm_SecPerTrack > 64) { error = EFTYPE; goto error_exit; } if (pmp->pm_Sectors == 0) { pmp->pm_HiddenSects = getulong(b50->bpbHiddenSecs); pmp->pm_HugeSectors = getulong(b50->bpbHugeSectors); } else { pmp->pm_HiddenSects = getushort(b33->bpbHiddenSecs); pmp->pm_HugeSectors = pmp->pm_Sectors; } if (pmp->pm_RootDirEnts == 0) { if (pmp->pm_Sectors || pmp->pm_FATsecs || getushort(b710->bpbFSVers)) { error = EINVAL; goto error_exit; } pmp->pm_fatmask = FAT32_MASK; pmp->pm_fatmult = 4; pmp->pm_fatdiv = 1; pmp->pm_FATsecs = getulong(b710->bpbBigFATsecs); if (getushort(b710->bpbExtFlags) & FATMIRROR) pmp->pm_curfat = getushort(b710->bpbExtFlags) & FATNUM; else pmp->pm_flags |= MSDOSFS_FATMIRROR; } else pmp->pm_flags |= MSDOSFS_FATMIRROR; /* * More sanity checks: * MSDOSFS sectors per cluster: >0 && power of 2 * MSDOSFS sector size: >= DEV_BSIZE && power of 2 * HUGE sector count: >0 * FAT sectors: >0 */ if ((SecPerClust == 0) || (SecPerClust & (SecPerClust - 1)) || (pmp->pm_BytesPerSec < DEV_BSIZE) || (pmp->pm_BytesPerSec & (pmp->pm_BytesPerSec - 1)) || (pmp->pm_HugeSectors == 0) || (pmp->pm_FATsecs == 0)) { error = EINVAL; goto error_exit; } pmp->pm_HugeSectors *= pmp->pm_BlkPerSec; pmp->pm_HiddenSects *= pmp->pm_BlkPerSec; pmp->pm_FATsecs *= pmp->pm_BlkPerSec; pmp->pm_fatblk = pmp->pm_ResSectors * pmp->pm_BlkPerSec; SecPerClust *= pmp->pm_BlkPerSec; if (FAT32(pmp)) { pmp->pm_rootdirblk = getulong(b710->bpbRootClust); pmp->pm_firstcluster = pmp->pm_fatblk + (pmp->pm_FATs * pmp->pm_FATsecs); pmp->pm_fsinfo = getushort(b710->bpbFSInfo) * pmp->pm_BlkPerSec; } else { pmp->pm_rootdirblk = pmp->pm_fatblk + (pmp->pm_FATs * pmp->pm_FATsecs); pmp->pm_rootdirsize = (pmp->pm_RootDirEnts * sizeof(struct direntry) + DEV_BSIZE - 1) / DEV_BSIZE; pmp->pm_firstcluster = pmp->pm_rootdirblk + pmp->pm_rootdirsize; } pmp->pm_nmbrofclusters = (pmp->pm_HugeSectors - pmp->pm_firstcluster) / SecPerClust; pmp->pm_maxcluster = pmp->pm_nmbrofclusters + 1; pmp->pm_fatsize = pmp->pm_FATsecs * DEV_BSIZE; if (pmp->pm_fatmask == 0) { if (pmp->pm_maxcluster <= ((CLUST_RSRVD - CLUST_FIRST) & FAT12_MASK)) { /* * This will usually be a floppy disk. This size makes * sure that one fat entry will not be split across * multiple blocks. */ pmp->pm_fatmask = FAT12_MASK; pmp->pm_fatmult = 3; pmp->pm_fatdiv = 2; } else { pmp->pm_fatmask = FAT16_MASK; pmp->pm_fatmult = 2; pmp->pm_fatdiv = 1; } } if (FAT12(pmp)) pmp->pm_fatblocksize = 3 * pmp->pm_BytesPerSec; else pmp->pm_fatblocksize = MAXBSIZE; /* * We now have the number of sectors in each FAT, so can work * out how many clusters can be represented in a FAT. Let's * make sure the file system doesn't claim to have more clusters * than this. * * We perform the calculation like we do to avoid integer overflow. * * This will give us a count of clusters. They are numbered * from 0, so the max cluster value is one less than the value * we end up with. */ fat_max_clusters = pmp->pm_fatsize / pmp->pm_fatmult; fat_max_clusters *= pmp->pm_fatdiv; if (pmp->pm_maxcluster >= fat_max_clusters) { #ifndef SMALL_KERNEL printf("msdosfs: reducing max cluster to %d from %d " "due to FAT size\n", fat_max_clusters - 1, pmp->pm_maxcluster); #endif pmp->pm_maxcluster = fat_max_clusters - 1; } pmp->pm_fatblocksec = pmp->pm_fatblocksize / DEV_BSIZE; pmp->pm_bnshift = ffs(DEV_BSIZE) - 1; /* * Compute mask and shift value for isolating cluster relative byte * offsets and cluster numbers from a file offset. */ pmp->pm_bpcluster = SecPerClust * DEV_BSIZE; pmp->pm_crbomask = pmp->pm_bpcluster - 1; pmp->pm_cnshift = ffs(pmp->pm_bpcluster) - 1; /* * Check for valid cluster size * must be a power of 2 */ if (pmp->pm_bpcluster ^ (1 << pmp->pm_cnshift)) { error = EFTYPE; goto error_exit; } /* * Release the bootsector buffer. */ brelse(bp); bp = NULL; /* * Check FSInfo */ if (pmp->pm_fsinfo) { struct fsinfo *fp; if ((error = bread(devvp, pmp->pm_fsinfo, fsi_size(pmp), NOCRED, &bp)) != 0) goto error_exit; fp = (struct fsinfo *)bp->b_data; if (!bcmp(fp->fsisig1, "RRaA", 4) && !bcmp(fp->fsisig2, "rrAa", 4) && !bcmp(fp->fsisig3, "\0\0\125\252", 4) && !bcmp(fp->fsisig4, "\0\0\125\252", 4)) /* Valid FSInfo. */ ; else pmp->pm_fsinfo = 0; /* XXX make sure this tiny buf doesn't come back in fillinusemap! */ SET(bp->b_flags, B_INVAL); brelse(bp); bp = NULL; } /* * Check and validate (or perhaps invalidate?) the fsinfo structure? XXX */ /* * Allocate memory for the bitmap of allocated clusters, and then * fill it in. */ bmapsiz = (pmp->pm_maxcluster + N_INUSEBITS - 1) / N_INUSEBITS; if (bmapsiz == 0 || SIZE_MAX / bmapsiz < sizeof(*pmp->pm_inusemap)) { /* detect multiplicative integer overflow */ error = EINVAL; goto error_exit; } pmp->pm_inusemap = malloc(bmapsiz * sizeof(*pmp->pm_inusemap), M_MSDOSFSFAT, M_WAITOK | M_CANFAIL); if (pmp->pm_inusemap == NULL) { error = EINVAL; goto error_exit; } /* * fillinusemap() needs pm_devvp. */ pmp->pm_dev = dev; pmp->pm_devvp = devvp; /* * Have the inuse map filled in. */ if ((error = fillinusemap(pmp)) != 0) goto error_exit; /* * If they want fat updates to be synchronous then let them suffer * the performance degradation in exchange for the on disk copy of * the fat being correct just about all the time. I suppose this * would be a good thing to turn on if the kernel is still flakey. */ if (mp->mnt_flag & MNT_SYNCHRONOUS) pmp->pm_flags |= MSDOSFSMNT_WAITONFAT; /* * Finish up. */ if (ronly) pmp->pm_flags |= MSDOSFSMNT_RONLY; else pmp->pm_fmod = 1; mp->mnt_data = (qaddr_t)pmp; mp->mnt_stat.f_fsid.val[0] = (long)dev; mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum; #ifdef QUOTA /* * If we ever do quotas for DOS filesystems this would be a place * to fill in the info in the msdosfsmount structure. You dolt, * quotas on dos filesystems make no sense because files have no * owners on dos filesystems. of course there is some empty space * in the directory entry where we could put uid's and gid's. */ #endif devvp->v_specmountpoint = mp; return (0); error_exit: devvp->v_specmountpoint = NULL; if (bp) brelse(bp); vn_lock(devvp, LK_EXCLUSIVE|LK_RETRY, p); (void) VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, NOCRED, p); VOP_UNLOCK(devvp, 0, p); if (pmp) { if (pmp->pm_inusemap) free(pmp->pm_inusemap, M_MSDOSFSFAT); free(pmp, M_MSDOSFSMNT); mp->mnt_data = (qaddr_t)0; } return (error); }
/* * Common code for vnode open operations. * Check permissions, and call the VOP_OPEN or VOP_CREATE routine. */ int vn_open(struct nameidata *ndp, int fmode, int cmode) { struct vnode *vp; struct proc *p = ndp->ni_cnd.cn_proc; struct ucred *cred = p->p_ucred; struct vattr va; int error; if ((fmode & (FREAD|FWRITE)) == 0) return (EINVAL); if ((fmode & (O_TRUNC | FWRITE)) == O_TRUNC) return (EINVAL); if (fmode & O_CREAT) { ndp->ni_cnd.cn_nameiop = CREATE; ndp->ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF; if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0) ndp->ni_cnd.cn_flags |= FOLLOW; if ((error = namei(ndp)) != 0) return (error); if (ndp->ni_vp == NULL) { VATTR_NULL(&va); va.va_type = VREG; va.va_mode = cmode; error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp, &ndp->ni_cnd, &va); if (error) return (error); fmode &= ~O_TRUNC; vp = ndp->ni_vp; } else { VOP_ABORTOP(ndp->ni_dvp, &ndp->ni_cnd); if (ndp->ni_dvp == ndp->ni_vp) vrele(ndp->ni_dvp); else vput(ndp->ni_dvp); ndp->ni_dvp = NULL; vp = ndp->ni_vp; if (fmode & O_EXCL) { error = EEXIST; goto bad; } fmode &= ~O_CREAT; } } else { ndp->ni_cnd.cn_nameiop = LOOKUP; ndp->ni_cnd.cn_flags = ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) | LOCKLEAF; if ((error = namei(ndp)) != 0) return (error); vp = ndp->ni_vp; } if (vp->v_type == VSOCK) { error = EOPNOTSUPP; goto bad; } if (vp->v_type == VLNK) { error = EMLINK; goto bad; } if ((fmode & O_CREAT) == 0) { if (fmode & FREAD) { if ((error = VOP_ACCESS(vp, VREAD, cred, p)) != 0) goto bad; } if (fmode & FWRITE) { if (vp->v_type == VDIR) { error = EISDIR; goto bad; } if ((error = vn_writechk(vp)) != 0 || (error = VOP_ACCESS(vp, VWRITE, cred, p)) != 0) goto bad; } } if ((fmode & O_TRUNC) && vp->v_type == VREG) { VATTR_NULL(&va); va.va_size = 0; if ((error = VOP_SETATTR(vp, &va, cred, p)) != 0) goto bad; } if ((error = VOP_OPEN(vp, fmode, cred, p)) != 0) goto bad; if (vp->v_flag & VCLONED) { struct cloneinfo *cip = (struct cloneinfo *) vp->v_data; vp->v_flag &= ~VCLONED; ndp->ni_vp = cip->ci_vp; /* return cloned vnode */ vp->v_data = cip->ci_data; /* restore v_data */ VOP_UNLOCK(vp, 0, p); /* keep a reference */ vp = ndp->ni_vp; /* for the increment below */ free(cip, M_TEMP); } if (fmode & FWRITE) vp->v_writecount++; return (0); bad: vput(vp); return (error); }
static int coff_load_file(struct thread *td, char *name) { struct proc *p = td->td_proc; struct vmspace *vmspace = p->p_vmspace; int error; struct nameidata nd; struct vnode *vp; struct vattr attr; struct filehdr *fhdr; struct aouthdr *ahdr; struct scnhdr *scns; char *ptr = 0; int nscns; unsigned long text_offset = 0, text_address = 0, text_size = 0; unsigned long data_offset = 0, data_address = 0, data_size = 0; unsigned long bss_size = 0; int i; NDINIT(&nd, LOOKUP, ISOPEN | LOCKLEAF | FOLLOW | SAVENAME, UIO_SYSSPACE, name, td); error = namei(&nd); if (error) return error; vp = nd.ni_vp; if (vp == NULL) return ENOEXEC; if (vp->v_writecount) { error = ETXTBSY; goto fail; } if ((error = VOP_GETATTR(vp, &attr, td->td_ucred, td)) != 0) goto fail; if ((vp->v_mount->mnt_flag & MNT_NOEXEC) || ((attr.va_mode & 0111) == 0) || (attr.va_type != VREG)) goto fail; if (attr.va_size == 0) { error = ENOEXEC; goto fail; } if ((error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td)) != 0) goto fail; if ((error = VOP_OPEN(vp, FREAD, td->td_ucred, td, NULL)) != 0) goto fail; /* * Lose the lock on the vnode. It's no longer needed, and must not * exist for the pagefault paging to work below. */ VOP_UNLOCK(vp, 0, td); if ((error = vm_mmap(kernel_map, (vm_offset_t *) &ptr, PAGE_SIZE, VM_PROT_READ, VM_PROT_READ, 0, OBJT_VNODE, vp, 0)) != 0) goto unlocked_fail; fhdr = (struct filehdr *)ptr; if (fhdr->f_magic != I386_COFF) { error = ENOEXEC; goto dealloc_and_fail; } nscns = fhdr->f_nscns; if ((nscns * sizeof(struct scnhdr)) > PAGE_SIZE) { /* * XXX -- just fail. I'm so lazy. */ error = ENOEXEC; goto dealloc_and_fail; } ahdr = (struct aouthdr*)(ptr + sizeof(struct filehdr)); scns = (struct scnhdr*)(ptr + sizeof(struct filehdr) + sizeof(struct aouthdr)); for (i = 0; i < nscns; i++) { if (scns[i].s_flags & STYP_NOLOAD) continue; else if (scns[i].s_flags & STYP_TEXT) { text_address = scns[i].s_vaddr; text_size = scns[i].s_size; text_offset = scns[i].s_scnptr; } else if (scns[i].s_flags & STYP_DATA) { data_address = scns[i].s_vaddr; data_size = scns[i].s_size; data_offset = scns[i].s_scnptr; } else if (scns[i].s_flags & STYP_BSS) { bss_size = scns[i].s_size; } } if ((error = load_coff_section(vmspace, vp, text_offset, (caddr_t)(void *)(uintptr_t)text_address, text_size, text_size, VM_PROT_READ | VM_PROT_EXECUTE)) != 0) { goto dealloc_and_fail; } if ((error = load_coff_section(vmspace, vp, data_offset, (caddr_t)(void *)(uintptr_t)data_address, data_size + bss_size, data_size, VM_PROT_ALL)) != 0) { goto dealloc_and_fail; } error = 0; dealloc_and_fail: if (vm_map_remove(kernel_map, (vm_offset_t) ptr, (vm_offset_t) ptr + PAGE_SIZE)) panic("%s vm_map_remove failed", __func__); fail: VOP_UNLOCK(vp, 0, td); unlocked_fail: NDFREE(&nd, NDF_ONLY_PNBUF); vrele(nd.ni_vp); return error; }
int udf_mountfs(struct vnode *devvp, struct mount *mp, uint32_t lb, struct proc *p) { struct buf *bp = NULL; struct anchor_vdp avdp; struct umount *ump = NULL; struct part_desc *pd; struct logvol_desc *lvd; struct fileset_desc *fsd; struct extfile_entry *xfentry; struct file_entry *fentry; uint32_t sector, size, mvds_start, mvds_end; uint32_t fsd_offset = 0; uint16_t part_num = 0, fsd_part = 0; int error = EINVAL; int logvol_found = 0, part_found = 0, fsd_found = 0; int bsize; /* * Disallow multiple mounts of the same device. * Disallow mounting of a device that is currently in use * (except for root, which might share swap device for miniroot). * Flush out any old buffers remaining from a previous use. */ if ((error = vfs_mountedon(devvp))) return (error); if (vcount(devvp) > 1 && devvp != rootvp) return (EBUSY); vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p); error = vinvalbuf(devvp, V_SAVE, p->p_ucred, p, 0, 0); VOP_UNLOCK(devvp, 0, p); if (error) return (error); error = VOP_OPEN(devvp, FREAD, FSCRED, p); if (error) return (error); ump = malloc(sizeof(*ump), M_UDFMOUNT, M_WAITOK | M_ZERO); mp->mnt_data = (qaddr_t) ump; mp->mnt_stat.f_fsid.val[0] = devvp->v_rdev; mp->mnt_stat.f_fsid.val[1] = makefstype(MOUNT_UDF); mp->mnt_flag |= MNT_LOCAL; ump->um_mountp = mp; ump->um_dev = devvp->v_rdev; ump->um_devvp = devvp; bsize = 2048; /* Should probe the media for its size. */ /* * Get the Anchor Volume Descriptor Pointer from sector 256. * Should also check sector n - 256, n, and 512. */ sector = 256; if ((error = bread(devvp, sector * btodb(bsize), bsize, NOCRED, &bp)) != 0) goto bail; if ((error = udf_checktag((struct desc_tag *)bp->b_data, TAGID_ANCHOR))) goto bail; bcopy(bp->b_data, &avdp, sizeof(struct anchor_vdp)); brelse(bp); bp = NULL; /* * Extract the Partition Descriptor and Logical Volume Descriptor * from the Volume Descriptor Sequence. * Should we care about the partition type right now? * What about multiple partitions? */ mvds_start = letoh32(avdp.main_vds_ex.loc); mvds_end = mvds_start + (letoh32(avdp.main_vds_ex.len) - 1) / bsize; for (sector = mvds_start; sector < mvds_end; sector++) { if ((error = bread(devvp, sector * btodb(bsize), bsize, NOCRED, &bp)) != 0) { printf("Can't read sector %d of VDS\n", sector); goto bail; } lvd = (struct logvol_desc *)bp->b_data; if (!udf_checktag(&lvd->tag, TAGID_LOGVOL)) { ump->um_bsize = letoh32(lvd->lb_size); ump->um_bmask = ump->um_bsize - 1; ump->um_bshift = ffs(ump->um_bsize) - 1; fsd_part = letoh16(lvd->_lvd_use.fsd_loc.loc.part_num); fsd_offset = letoh32(lvd->_lvd_use.fsd_loc.loc.lb_num); if (udf_find_partmaps(ump, lvd)) break; logvol_found = 1; } pd = (struct part_desc *)bp->b_data; if (!udf_checktag(&pd->tag, TAGID_PARTITION)) { part_found = 1; part_num = letoh16(pd->part_num); ump->um_len = ump->um_reallen = letoh32(pd->part_len); ump->um_start = ump->um_realstart = letoh32(pd->start_loc); } brelse(bp); bp = NULL; if ((part_found) && (logvol_found)) break; } if (!part_found || !logvol_found) { error = EINVAL; goto bail; } if (ISSET(ump->um_flags, UDF_MNT_USES_META)) { /* Read Metadata File 'File Entry' to find Metadata file. */ struct long_ad *la; sector = ump->um_start + ump->um_meta_start; /* Set in udf_get_mpartmap() */ if ((error = RDSECTOR(devvp, sector, ump->um_bsize, &bp)) != 0) { printf("Cannot read sector %d for Metadata File Entry\n", sector); error = EINVAL; goto bail; } xfentry = (struct extfile_entry *)bp->b_data; fentry = (struct file_entry *)bp->b_data; if (udf_checktag(&xfentry->tag, TAGID_EXTFENTRY) == 0) la = (struct long_ad *)&xfentry->data[letoh32(xfentry->l_ea)]; else if (udf_checktag(&fentry->tag, TAGID_FENTRY) == 0) la = (struct long_ad *)&fentry->data[letoh32(fentry->l_ea)]; else { printf("Invalid Metadata File FE @ sector %d! (tag.id %d)\n", sector, fentry->tag.id); error = EINVAL; goto bail; } ump->um_meta_start = letoh32(la->loc.lb_num); ump->um_meta_len = letoh32(la->len); if (bp != NULL) { brelse(bp); bp = NULL; } } else if (fsd_part != part_num) { printf("FSD does not lie within the partition!\n"); error = EINVAL; goto bail; } mtx_init(&ump->um_hashmtx, IPL_NONE); ump->um_hashtbl = hashinit(UDF_HASHTBLSIZE, M_UDFMOUNT, M_WAITOK, &ump->um_hashsz); /* Get the VAT, if needed */ if (ump->um_flags & UDF_MNT_FIND_VAT) { error = udf_vat_get(ump, lb); if (error) goto bail; } /* * Grab the Fileset Descriptor * Thanks to Chuck McCrobie <*****@*****.**> for pointing * me in the right direction here. */ if (ISSET(ump->um_flags, UDF_MNT_USES_META)) sector = ump->um_meta_start; else sector = fsd_offset; udf_vat_map(ump, §or); if ((error = RDSECTOR(devvp, sector, ump->um_bsize, &bp)) != 0) { printf("Cannot read sector %d of FSD\n", sector); goto bail; } fsd = (struct fileset_desc *)bp->b_data; if (!udf_checktag(&fsd->tag, TAGID_FSD)) { fsd_found = 1; bcopy(&fsd->rootdir_icb, &ump->um_root_icb, sizeof(struct long_ad)); if (ISSET(ump->um_flags, UDF_MNT_USES_META)) { ump->um_root_icb.loc.lb_num += ump->um_meta_start; ump->um_root_icb.loc.part_num = part_num; } } brelse(bp); bp = NULL; if (!fsd_found) { printf("Couldn't find the fsd\n"); error = EINVAL; goto bail; } /* * Find the file entry for the root directory. */ sector = letoh32(ump->um_root_icb.loc.lb_num); size = letoh32(ump->um_root_icb.len); udf_vat_map(ump, §or); if ((error = udf_readlblks(ump, sector, size, &bp)) != 0) { printf("Cannot read sector %d\n", sector); goto bail; } xfentry = (struct extfile_entry *)bp->b_data; fentry = (struct file_entry *)bp->b_data; error = udf_checktag(&xfentry->tag, TAGID_EXTFENTRY); if (error) { error = udf_checktag(&fentry->tag, TAGID_FENTRY); if (error) { printf("Invalid root file entry!\n"); goto bail; } } brelse(bp); bp = NULL; devvp->v_specmountpoint = mp; return (0); bail: if (ump->um_hashtbl != NULL) free(ump->um_hashtbl, M_UDFMOUNT); if (ump != NULL) { free(ump, M_UDFMOUNT); mp->mnt_data = NULL; mp->mnt_flag &= ~MNT_LOCAL; } if (bp != NULL) brelse(bp); vn_lock(devvp, LK_EXCLUSIVE|LK_RETRY, p); VOP_CLOSE(devvp, FREAD, FSCRED, p); VOP_UNLOCK(devvp, 0, p); return (error); }