/* Try to invalidate pages, for "fs flush" or "fs flushv"; or * try to free pages, when deleting a file. * * Locking: the vcache entry's lock is held. It may be dropped and * re-obtained. * * Since we drop and re-obtain the lock, we can't guarantee that there won't * be some pages around when we return, newly created by concurrent activity. */ void osi_VM_TryToSmush(struct vcache *avc, afs_ucred_t *acred, int sync) { struct vnode *vp; int tries, code; int islocked; vp = AFSTOV(avc); VI_LOCK(vp); if (vp->v_iflag & VI_DOOMED) { VI_UNLOCK(vp); return; } VI_UNLOCK(vp); islocked = islocked_vnode(vp); if (islocked == LK_EXCLOTHER) panic("Trying to Smush over someone else's lock"); else if (islocked == LK_SHARED) { afs_warn("Trying to Smush with a shared lock"); lock_vnode(vp, LK_UPGRADE); } else if (!islocked) lock_vnode(vp, LK_EXCLUSIVE); if (vp->v_bufobj.bo_object != NULL) { AFS_VM_OBJECT_WLOCK(vp->v_bufobj.bo_object); /* * Do we really want OBJPC_SYNC? OBJPC_INVAL would be * faster, if invalidation is really what we are being * asked to do. (It would make more sense, too, since * otherwise this function is practically identical to * osi_VM_StoreAllSegments().) -GAW */ /* * Dunno. We no longer resemble osi_VM_StoreAllSegments, * though maybe that's wrong, now. And OBJPC_SYNC is the * common thing in 70 file systems, it seems. Matt. */ vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC); AFS_VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object); } tries = 5; code = osi_vinvalbuf(vp, V_SAVE, PCATCH, 0); while (code && (tries > 0)) { afs_warn("TryToSmush retrying vinvalbuf"); code = osi_vinvalbuf(vp, V_SAVE, PCATCH, 0); --tries; } if (islocked == LK_SHARED) lock_vnode(vp, LK_DOWNGRADE); else if (!islocked) unlock_vnode(vp); }
/*===========================================================================* * put_vnode * *===========================================================================*/ PUBLIC void put_vnode(struct vnode *vp) { /* Decrease vnode's usage counter and decrease inode's usage counter in the * corresponding FS process. Decreasing the fs_count each time we decrease the * ref count would lead to poor performance. Instead, only decrease fs_count * when the ref count hits zero. However, this could lead to fs_count to wrap. * To prevent this, we drop the counter to 1 when the counter hits 256. * We maintain fs_count as a sanity check to make sure VFS and the FS are in * sync. */ int r, lock_vp; ASSERTVP(vp); /* Lock vnode. It's quite possible this thread already has a lock on this * vnode. That's no problem, because the reference counter will not decrease * to zero in that case. However, if the counter does decrease to zero *and* * is already locked, we have a consistency problem somewhere. */ lock_vp = lock_vnode(vp, VNODE_OPCL); if (vp->v_ref_count > 1) { /* Decrease counter */ vp->v_ref_count--; if (vp->v_fs_count > 256) vnode_clean_refs(vp); if (lock_vp != EBUSY) unlock_vnode(vp); return; } /* If we already had a lock, there is a consistency problem */ assert(lock_vp != EBUSY); tll_upgrade(&vp->v_lock); /* Make sure nobody else accesses this vnode */ /* A vnode that's not in use can't be put back. */ if (vp->v_ref_count <= 0) panic("put_vnode failed: bad v_ref_count %d\n", vp->v_ref_count); /* fs_count should indicate that the file is in use. */ if (vp->v_fs_count <= 0) panic("put_vnode failed: bad v_fs_count %d\n", vp->v_fs_count); /* Tell FS we don't need this inode to be open anymore. */ r = req_putnode(vp->v_fs_e, vp->v_inode_nr, vp->v_fs_count); if (r != OK) { printf("VFS: putnode failed: %d\n", r); util_stacktrace(); } /* This inode could've been mapped. If so, tell mapped FS to close it as * well. If mapped onto same FS, this putnode is not needed. */ if (vp->v_mapfs_e != NONE && vp->v_mapfs_e != vp->v_fs_e) req_putnode(vp->v_mapfs_e, vp->v_mapinode_nr, vp->v_mapfs_count); vp->v_fs_count = 0; vp->v_ref_count = 0; vp->v_mapfs_count = 0; unlock_vnode(vp); }
/*===========================================================================* * do_filp_gc * *===========================================================================*/ void *do_filp_gc(void *UNUSED(arg)) { struct filp *f; struct vnode *vp; for (f = &filp[0]; f < &filp[NR_FILPS]; f++) { if (!(f->filp_state & FS_INVALIDATED)) continue; if (f->filp_mode == FILP_CLOSED || f->filp_vno == NULL) { /* File was already closed before gc could kick in */ assert(f->filp_count <= 0); f->filp_state &= ~FS_INVALIDATED; f->filp_count = 0; continue; } assert(f->filp_vno != NULL); vp = f->filp_vno; /* Synchronize with worker thread that might hold a lock on the vp */ lock_vnode(vp, VNODE_OPCL); unlock_vnode(vp); /* If garbage collection was invoked due to a failed device open * request, then common_open has already cleaned up and we have * nothing to do. */ if (!(f->filp_state & FS_INVALIDATED)) { continue; } /* If garbage collection was invoked due to a failed device close * request, the close_filp has already cleaned up and we have nothing * to do. */ if (f->filp_mode != FILP_CLOSED) { assert(f->filp_count == 0); f->filp_count = 1; /* So lock_filp and close_filp will do * their job */ lock_filp(f, VNODE_READ); close_filp(f); } f->filp_state &= ~FS_INVALIDATED; } thread_cleanup(NULL); return(NULL); }
/* Try to discard pages, in order to recycle a vcache entry. * * We also make some sanity checks: ref count, open count, held locks. * * We also do some non-VM-related chores, such as releasing the cred pointer * (for AIX and Solaris) and releasing the gnode (for AIX). * * Locking: afs_xvcache lock is held. If it is dropped and re-acquired, * *slept should be set to warn the caller. * * Formerly, afs_xvcache was dropped and re-acquired for Solaris, but now it * is not dropped and re-acquired for any platform. It may be that *slept is * therefore obsolescent. * * OSF/1 Locking: VN_LOCK has been called. * We do not lock the vnode here, but instead require that it be exclusive * locked by code calling osi_VM_StoreAllSegments directly, or scheduling it * from the bqueue - Matt * Maybe better to just call vnode_pager_setsize()? */ int osi_VM_FlushVCache(struct vcache *avc, int *slept) { struct vm_object *obj; struct vnode *vp; if (VREFCOUNT(avc) > 1) return EBUSY; if (avc->opens) return EBUSY; /* if a lock is held, give up */ if (CheckLock(&avc->lock)) return EBUSY; return(0); AFS_GUNLOCK(); vp = AFSTOV(avc); #ifndef AFS_FBSD70_ENV lock_vnode(vp); #endif if (VOP_GETVOBJECT(vp, &obj) == 0) { VM_OBJECT_LOCK(obj); vm_object_page_remove(obj, 0, 0, FALSE); #if 1 if (obj->ref_count == 0) { simple_lock(&vp->v_interlock); vgonel(vp, curthread); vp->v_tag = VT_AFS; SetAfsVnode(vp); } #endif VM_OBJECT_UNLOCK(obj); } #ifndef AFS_FBSD70_ENV unlock_vnode(vp); #endif AFS_GLOCK(); return 0; }
/*===========================================================================* * new_node * *===========================================================================*/ static struct vnode *new_node(struct lookup *resolve, int oflags, mode_t bits) { /* Try to create a new inode and return a pointer to it. If the inode already exists, return a pointer to it as well, but set err_code accordingly. NULL is returned if the path cannot be resolved up to the last directory, or when the inode cannot be created due to permissions or otherwise. */ struct vnode *dirp, *vp; struct vmnt *dir_vmp, *vp_vmp; int r; struct node_details res; struct lookup findnode; char *path; path = resolve->l_path; /* For easy access */ lookup_init(&findnode, path, resolve->l_flags, &dir_vmp, &dirp); findnode.l_vmnt_lock = VMNT_WRITE; findnode.l_vnode_lock = VNODE_WRITE; /* dir node */ /* When O_CREAT and O_EXCL flags are set, the path may not be named by a * symbolic link. */ if (oflags & O_EXCL) findnode.l_flags |= PATH_RET_SYMLINK; /* See if the path can be opened down to the last directory. */ if ((dirp = last_dir(&findnode, fp)) == NULL) return(NULL); /* The final directory is accessible. Get final component of the path. */ lookup_init(&findnode, findnode.l_path, findnode.l_flags, &vp_vmp, &vp); findnode.l_vmnt_lock = VMNT_WRITE; findnode.l_vnode_lock = (oflags & O_TRUNC) ? VNODE_WRITE : VNODE_OPCL; vp = advance(dirp, &findnode, fp); assert(vp_vmp == NULL); /* Lookup to last dir should have yielded lock * on vmp or final component does not exist. * Either way, vp_vmp ought to be not set. */ /* The combination of a symlink with absolute path followed by a danglink * symlink results in a new path that needs to be re-resolved entirely. */ if (path[0] == '/') { unlock_vnode(dirp); unlock_vmnt(dir_vmp); put_vnode(dirp); if (vp != NULL) { unlock_vnode(vp); put_vnode(vp); } return new_node(resolve, oflags, bits); } if (vp == NULL && err_code == ENOENT) { /* Last path component does not exist. Make a new directory entry. */ if ((vp = get_free_vnode()) == NULL) { /* Can't create new entry: out of vnodes. */ unlock_vnode(dirp); unlock_vmnt(dir_vmp); put_vnode(dirp); return(NULL); } lock_vnode(vp, VNODE_OPCL); if ((r = forbidden(fp, dirp, W_BIT|X_BIT)) != OK || (r = req_create(dirp->v_fs_e, dirp->v_inode_nr,bits, fp->fp_effuid, fp->fp_effgid, path, &res)) != OK ) { /* Can't create inode either due to permissions or some other * problem. In case r is EEXIST, we might be dealing with a * dangling symlink.*/ if (r == EEXIST) { struct vnode *slp, *old_wd; /* Resolve path up to symlink */ findnode.l_flags = PATH_RET_SYMLINK; findnode.l_vnode_lock = VNODE_READ; findnode.l_vnode = &slp; slp = advance(dirp, &findnode, fp); if (slp != NULL) { if (S_ISLNK(slp->v_mode)) { /* Get contents of link */ r = req_rdlink(slp->v_fs_e, slp->v_inode_nr, VFS_PROC_NR, (vir_bytes) path, PATH_MAX - 1, 0); if (r < 0) { /* Failed to read link */ unlock_vnode(slp); unlock_vnode(dirp); unlock_vmnt(dir_vmp); put_vnode(slp); put_vnode(dirp); err_code = r; return(NULL); } path[r] = '\0'; /* Terminate path */ } unlock_vnode(slp); put_vnode(slp); } /* Try to create the inode the dangling symlink was * pointing to. We have to use dirp as starting point * as there might be multiple successive symlinks * crossing multiple mountpoints. * Unlock vnodes and vmnts as we're going to recurse. */ unlock_vnode(dirp); unlock_vnode(vp); unlock_vmnt(dir_vmp); old_wd = fp->fp_wd; /* Save orig. working dirp */ fp->fp_wd = dirp; vp = new_node(resolve, oflags, bits); fp->fp_wd = old_wd; /* Restore */ if (vp != NULL) { put_vnode(dirp); *(resolve->l_vnode) = vp; return(vp); } r = err_code; } if (r == EEXIST) err_code = EIO; /* Impossible, we have verified that * the last component doesn't exist and * is not a dangling symlink. */ else err_code = r; unlock_vnode(dirp); unlock_vnode(vp); unlock_vmnt(dir_vmp); put_vnode(dirp); return(NULL); } /* Store results and mark vnode in use */ vp->v_fs_e = res.fs_e; vp->v_inode_nr = res.inode_nr; vp->v_mode = res.fmode; vp->v_size = res.fsize; vp->v_uid = res.uid; vp->v_gid = res.gid; vp->v_sdev = res.dev; vp->v_vmnt = dirp->v_vmnt; vp->v_dev = vp->v_vmnt->m_dev; vp->v_fs_count = 1; vp->v_ref_count = 1; } else { /* Either last component exists, or there is some other problem. */ if (vp != NULL) { r = EEXIST; /* File exists or a symlink names a file while * O_EXCL is set. */ } else r = err_code; /* Other problem. */ } err_code = r; /* When dirp equals vp, we shouldn't release the lock as a vp is locked only * once. Releasing the lock would cause the resulting vp not be locked and * cause mayhem later on. */ if (dirp != vp) { unlock_vnode(dirp); } unlock_vmnt(dir_vmp); put_vnode(dirp); *(resolve->l_vnode) = vp; return(vp); }
/*===========================================================================* * do_pipe * *===========================================================================*/ int do_pipe() { /* Perform the pipe(fil_des) system call. */ register struct fproc *rfp; int r; struct filp *fil_ptr0, *fil_ptr1; int fil_des[2]; /* reply goes here */ struct vnode *vp; struct vmnt *vmp; struct node_details res; /* Get a lock on PFS */ if ((vmp = find_vmnt(PFS_PROC_NR)) == NULL) panic("PFS gone"); if ((r = lock_vmnt(vmp, VMNT_READ)) != OK) return(r); /* See if a free vnode is available */ if ((vp = get_free_vnode()) == NULL) { unlock_vmnt(vmp); return(err_code); } lock_vnode(vp, VNODE_OPCL); /* Acquire two file descriptors. */ rfp = fp; if ((r = get_fd(0, R_BIT, &fil_des[0], &fil_ptr0)) != OK) { unlock_vnode(vp); unlock_vmnt(vmp); return(r); } rfp->fp_filp[fil_des[0]] = fil_ptr0; FD_SET(fil_des[0], &rfp->fp_filp_inuse); fil_ptr0->filp_count = 1; /* mark filp in use */ if ((r = get_fd(0, W_BIT, &fil_des[1], &fil_ptr1)) != OK) { rfp->fp_filp[fil_des[0]] = NULL; FD_CLR(fil_des[0], &rfp->fp_filp_inuse); fil_ptr0->filp_count = 0; /* mark filp free */ unlock_filp(fil_ptr0); unlock_vnode(vp); unlock_vmnt(vmp); return(r); } rfp->fp_filp[fil_des[1]] = fil_ptr1; FD_SET(fil_des[1], &rfp->fp_filp_inuse); fil_ptr1->filp_count = 1; /* Create a named pipe inode on PipeFS */ r = req_newnode(PFS_PROC_NR, fp->fp_effuid, fp->fp_effgid, I_NAMED_PIPE, NO_DEV, &res); if (r != OK) { rfp->fp_filp[fil_des[0]] = NULL; FD_CLR(fil_des[0], &rfp->fp_filp_inuse); fil_ptr0->filp_count = 0; rfp->fp_filp[fil_des[1]] = NULL; FD_CLR(fil_des[1], &rfp->fp_filp_inuse); fil_ptr1->filp_count = 0; unlock_filp(fil_ptr1); unlock_filp(fil_ptr0); unlock_vnode(vp); unlock_vmnt(vmp); return(r); } /* Fill in vnode */ vp->v_fs_e = res.fs_e; vp->v_mapfs_e = res.fs_e; vp->v_inode_nr = res.inode_nr; vp->v_mapinode_nr = res.inode_nr; vp->v_mode = res.fmode; vp->v_fs_count = 1; vp->v_mapfs_count = 1; vp->v_ref_count = 1; vp->v_size = 0; vp->v_vmnt = NULL; vp->v_dev = NO_DEV; /* Fill in filp objects */ fil_ptr0->filp_vno = vp; dup_vnode(vp); fil_ptr1->filp_vno = vp; fil_ptr0->filp_flags = O_RDONLY; fil_ptr1->filp_flags = O_WRONLY; m_out.reply_i1 = fil_des[0]; m_out.reply_i2 = fil_des[1]; unlock_filps(fil_ptr0, fil_ptr1); unlock_vmnt(vmp); return(OK); }
/*===========================================================================* * create_pipe * *===========================================================================*/ static int create_pipe(int fil_des[2], int flags) { register struct fproc *rfp; int r; struct filp *fil_ptr0, *fil_ptr1; struct vnode *vp; struct vmnt *vmp; struct node_details res; /* Get a lock on PFS */ if ((vmp = find_vmnt(PFS_PROC_NR)) == NULL) panic("PFS gone"); if ((r = lock_vmnt(vmp, VMNT_READ)) != OK) return(r); /* See if a free vnode is available */ if ((vp = get_free_vnode()) == NULL) { unlock_vmnt(vmp); return(err_code); } lock_vnode(vp, VNODE_OPCL); /* Acquire two file descriptors. */ rfp = fp; if ((r = get_fd(fp, 0, R_BIT, &fil_des[0], &fil_ptr0)) != OK) { unlock_vnode(vp); unlock_vmnt(vmp); return(r); } rfp->fp_filp[fil_des[0]] = fil_ptr0; fil_ptr0->filp_count = 1; /* mark filp in use */ if ((r = get_fd(fp, 0, W_BIT, &fil_des[1], &fil_ptr1)) != OK) { rfp->fp_filp[fil_des[0]] = NULL; fil_ptr0->filp_count = 0; /* mark filp free */ unlock_filp(fil_ptr0); unlock_vnode(vp); unlock_vmnt(vmp); return(r); } rfp->fp_filp[fil_des[1]] = fil_ptr1; fil_ptr1->filp_count = 1; /* Create a named pipe inode on PipeFS */ r = req_newnode(PFS_PROC_NR, fp->fp_effuid, fp->fp_effgid, I_NAMED_PIPE, NO_DEV, &res); if (r != OK) { rfp->fp_filp[fil_des[0]] = NULL; fil_ptr0->filp_count = 0; rfp->fp_filp[fil_des[1]] = NULL; fil_ptr1->filp_count = 0; unlock_filp(fil_ptr1); unlock_filp(fil_ptr0); unlock_vnode(vp); unlock_vmnt(vmp); return(r); } /* Fill in vnode */ vp->v_fs_e = res.fs_e; vp->v_mapfs_e = res.fs_e; vp->v_inode_nr = res.inode_nr; vp->v_mapinode_nr = res.inode_nr; vp->v_mode = res.fmode; vp->v_fs_count = 1; vp->v_mapfs_count = 1; vp->v_ref_count = 1; vp->v_size = 0; vp->v_vmnt = NULL; vp->v_dev = NO_DEV; /* Fill in filp objects */ fil_ptr0->filp_vno = vp; dup_vnode(vp); fil_ptr1->filp_vno = vp; fil_ptr0->filp_flags = O_RDONLY | (flags & ~O_ACCMODE); fil_ptr1->filp_flags = O_WRONLY | (flags & ~O_ACCMODE); if (flags & O_CLOEXEC) { FD_SET(fil_des[0], &rfp->fp_cloexec_set); FD_SET(fil_des[1], &rfp->fp_cloexec_set); } unlock_filps(fil_ptr0, fil_ptr1); unlock_vmnt(vmp); return(OK); }
/*===========================================================================* * mount_fs * *===========================================================================*/ int mount_fs( dev_t dev, char mountpoint[PATH_MAX], endpoint_t fs_e, int rdonly, char mount_label[LABEL_MAX] ) { int i, r = OK, found, isroot, mount_root, con_reqs, slot; struct fproc *tfp, *rfp; struct dmap *dp; struct vnode *root_node, *vp = NULL; struct vmnt *new_vmp, *parent_vmp; char *label; struct node_details res; struct lookup resolve; /* Look up block device driver label when dev is not a pseudo-device */ label = ""; if (!is_nonedev(dev)) { /* Get driver process' endpoint */ dp = &dmap[major(dev)]; if (dp->dmap_driver == NONE) { printf("VFS: no driver for dev %d\n", dev); return(EINVAL); } label = dp->dmap_label; assert(strlen(label) > 0); } /* Scan vmnt table to see if dev already mounted. If not, find a free slot.*/ found = FALSE; for (i = 0; i < NR_MNTS; ++i) { if (vmnt[i].m_dev == dev) found = TRUE; } if (found) { return(EBUSY); } else if ((new_vmp = get_free_vmnt()) == NULL) { return(ENOMEM); } if ((r = lock_vmnt(new_vmp, VMNT_EXCL)) != OK) return(r); isroot = (strcmp(mountpoint, "/") == 0); mount_root = (isroot && have_root < 2); /* Root can be mounted twice: * 1: ramdisk * 2: boot disk (e.g., harddisk) */ if (!mount_root) { /* Get vnode of mountpoint */ lookup_init(&resolve, mountpoint, PATH_NOFLAGS, &parent_vmp, &vp); resolve.l_vmnt_lock = VMNT_EXCL; resolve.l_vnode_lock = VNODE_WRITE; if ((vp = eat_path(&resolve, fp)) == NULL) r = err_code; else if (vp->v_ref_count == 1) { /*Tell FS on which vnode it is mounted (glue into mount tree)*/ r = req_mountpoint(vp->v_fs_e, vp->v_inode_nr); } else r = EBUSY; if (vp != NULL) { /* Quickly unlock to allow back calls (from e.g. FUSE) to * relock */ unlock_vmnt(parent_vmp); } if (r != OK) { if (vp != NULL) { unlock_vnode(vp); put_vnode(vp); } unlock_vmnt(new_vmp); return(r); } } /* We'll need a vnode for the root inode */ if ((root_node = get_free_vnode()) == NULL) { if (vp != NULL) { unlock_vnode(vp); put_vnode(vp); } unlock_vmnt(new_vmp); return(err_code); } lock_vnode(root_node, VNODE_OPCL); /* Record process as a system process */ if (isokendpt(fs_e, &slot) != OK) { if (vp != NULL) { unlock_vnode(vp); put_vnode(vp); } unlock_vnode(root_node); unlock_vmnt(new_vmp); return(EINVAL); } rfp = &fproc[slot]; rfp->fp_flags |= FP_SRV_PROC; /* File Servers are also services */ /* Store some essential vmnt data first */ new_vmp->m_fs_e = fs_e; new_vmp->m_dev = dev; if (rdonly) new_vmp->m_flags |= VMNT_READONLY; else new_vmp->m_flags &= ~VMNT_READONLY; /* Tell FS which device to mount */ new_vmp->m_flags |= VMNT_MOUNTING; r = req_readsuper(fs_e, label, dev, rdonly, isroot, &res, &con_reqs); new_vmp->m_flags &= ~VMNT_MOUNTING; if (r != OK) { mark_vmnt_free(new_vmp); unlock_vnode(root_node); if (vp != NULL) { unlock_vnode(vp); put_vnode(vp); } unlock_vmnt(new_vmp); return(r); } lock_bsf(); /* Fill in root node's fields */ root_node->v_fs_e = res.fs_e; root_node->v_inode_nr = res.inode_nr; root_node->v_mode = res.fmode; root_node->v_uid = res.uid; root_node->v_gid = res.gid; root_node->v_size = res.fsize; root_node->v_sdev = NO_DEV; root_node->v_fs_count = 1; root_node->v_ref_count = 1; /* Root node is indeed on the partition */ root_node->v_vmnt = new_vmp; root_node->v_dev = new_vmp->m_dev; if (con_reqs == 0) new_vmp->m_comm.c_max_reqs = 1; /* Default if FS doesn't tell us */ else new_vmp->m_comm.c_max_reqs = con_reqs; new_vmp->m_comm.c_cur_reqs = 0; if (mount_root) { /* Superblock and root node already read. * Nothing else can go wrong. Perform the mount. */ new_vmp->m_root_node = root_node; new_vmp->m_mounted_on = NULL; strlcpy(new_vmp->m_label, mount_label, LABEL_MAX); if (is_nonedev(dev)) alloc_nonedev(dev); update_bspec(dev, fs_e, 0 /* Don't send new driver endpoint */); ROOT_DEV = dev; ROOT_FS_E = fs_e; /* Replace all root and working directories */ for (i = 0, tfp = fproc; i < NR_PROCS; i++, tfp++) { if (tfp->fp_pid == PID_FREE) continue; #define MAKEROOT(what) { \ if (what) put_vnode(what); \ dup_vnode(root_node); \ what = root_node; \ } MAKEROOT(tfp->fp_rd); MAKEROOT(tfp->fp_wd); } unlock_vnode(root_node); unlock_vmnt(new_vmp); have_root++; /* We have a (new) root */ unlock_bsf(); return(OK); } /* File types may not conflict. */ if (!S_ISDIR(vp->v_mode) && S_ISDIR(root_node->v_mode)) r = EISDIR; /* If error, return the super block and both inodes; release the vmnt. */ if (r != OK) { unlock_vnode(vp); unlock_vnode(root_node); mark_vmnt_free(new_vmp); unlock_vmnt(new_vmp); put_vnode(vp); put_vnode(root_node); unlock_bsf(); return(r); } /* Nothing else can go wrong. Perform the mount. */ new_vmp->m_mounted_on = vp; new_vmp->m_root_node = root_node; strlcpy(new_vmp->m_label, mount_label, LABEL_MAX); /* Allocate the pseudo device that was found, if not using a real device. */ if (is_nonedev(dev)) alloc_nonedev(dev); /* The new FS will handle block I/O requests for its device now. */ if (!(new_vmp->m_flags & VMNT_FORCEROOTBSF)) update_bspec(dev, fs_e, 0 /* Don't send new driver endpoint */); unlock_vnode(vp); unlock_vnode(root_node); unlock_vmnt(new_vmp); unlock_bsf(); return(OK); }