/* * Add a vnode operations (vnops) vector to the global list. */ void vfs_nadd_vnodeops_sysinit(void *data) { struct vop_ops *ops = data; vfs_add_vnodeops(NULL, ops, NULL); /* mount, template, newcopy */ }
/* ARGSUSED */ static int linprocfs_mount(struct mount *mp, char *path, caddr_t data, struct ucred *cred) { size_t size; int error; if (mp->mnt_flag & MNT_UPDATE) return (EOPNOTSUPP); if (mp->mnt_vfc->vfc_refcount == 1 && (error = at_exit(linprocfs_exit))) { kprintf("linprocfs: cannot register linprocfs_exit with at_exit\n"); return(error); } mp->mnt_flag |= MNT_LOCAL; mp->mnt_kern_flag |= MNTK_NOSTKMNT; mp->mnt_data = 0; vfs_getnewfsid(mp); vfs_add_vnodeops(mp, &linprocfs_vnode_vops, &mp->mnt_vn_norm_ops); size = sizeof("linprocfs") - 1; bcopy("linprocfs", mp->mnt_stat.f_mntfromname, size); bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); (void)linprocfs_statfs(mp, &mp->mnt_stat, cred); return (0); }
int afs_module_handler(module_t mod, int what, void *arg) { static int inited = 0; int error = 0; switch (what) { case MOD_LOAD: if (inited) { printf("afs cannot be MOD_LOAD'd more than once\n"); error = EBUSY; break; } memset(&afs_vfsconf, 0, sizeof(struct vfsconf)); #ifdef AFS_FBSD53_ENV afs_vfsconf.vfc_version = VFS_VERSION; #endif strcpy(afs_vfsconf.vfc_name, "AFS"); afs_vfsconf.vfc_vfsops = &afs_vfsops; afs_vfsconf.vfc_typenum = -1; /* set by vfs_register */ afs_vfsconf.vfc_flags = VFCF_NETWORK; if ((error = vfs_register(&afs_vfsconf)) != 0) break; vfs_add_vnodeops(&afs_vnodeop_opv_desc); inited = 1; break; case MOD_UNLOAD: #ifndef RXK_LISTENER_ENV /* shutdown is incomplete unless RXK_LISTENER_ENV */ printf("afs: I can't be unloaded yet\n"); return -1; #endif if (!inited) { error = 0; break; } if ((error = vfs_unregister(&afs_vfsconf)) != 0) { break; } vfs_rm_vnodeops(&afs_vnodeop_opv_desc); break; } return (error); }
/* * Mount the per-process file descriptors (/dev/fd) */ static int fdesc_mount(struct mount *mp, char *path, caddr_t data, struct ucred *cred) { int error = 0; struct fdescmount *fmp; struct vnode *rvp; if (path == NULL) panic("fdesc_mount: cannot mount as root"); /* * Update is a no-op */ if (mp->mnt_flag & MNT_UPDATE) return (EOPNOTSUPP); vfs_add_vnodeops(mp, &fdesc_vnode_vops, &mp->mnt_vn_norm_ops); error = fdesc_allocvp(Froot, FD_ROOT, mp, &rvp); if (error) return (error); fmp = kmalloc(sizeof(struct fdescmount), M_FDESCMNT, M_WAITOK); /* XXX */ rvp->v_type = VDIR; vsetflags(rvp, VROOT); fmp->f_root = rvp; /* XXX -- don't mark as local to work around fts() problems */ /*mp->mnt_flag |= MNT_LOCAL;*/ mp->mnt_data = (qaddr_t) fmp; vfs_getnewfsid(mp); bzero(mp->mnt_stat.f_mntfromname, MNAMELEN); bcopy("fdesc", mp->mnt_stat.f_mntfromname, sizeof("fdesc")); fdesc_statfs(mp, &mp->mnt_stat, cred); return (0); }
static int hammer_vfs_mount(struct mount *mp, char *mntpt, caddr_t data, struct ucred *cred) { struct hammer_mount_info info; hammer_mount_t hmp; hammer_volume_t rootvol; struct vnode *rootvp; struct vnode *devvp = NULL; const char *upath; /* volume name in userspace */ char *path; /* volume name in system space */ int error; int i; int master_id; char *next_volume_ptr = NULL; /* * Accept hammer_mount_info. mntpt is NULL for root mounts at boot. */ if (mntpt == NULL) { bzero(&info, sizeof(info)); info.asof = 0; info.hflags = 0; info.nvolumes = 1; next_volume_ptr = mp->mnt_stat.f_mntfromname; /* Count number of volumes separated by ':' */ for (char *p = next_volume_ptr; *p != '\0'; ++p) { if (*p == ':') { ++info.nvolumes; } } mp->mnt_flag &= ~MNT_RDONLY; /* mount R/W */ } else { if ((error = copyin(data, &info, sizeof(info))) != 0) return (error); } /* * updating or new mount */ if (mp->mnt_flag & MNT_UPDATE) { hmp = (void *)mp->mnt_data; KKASSERT(hmp != NULL); } else { if (info.nvolumes <= 0 || info.nvolumes > HAMMER_MAX_VOLUMES) return (EINVAL); hmp = NULL; } /* * master-id validation. The master id may not be changed by a * mount update. */ if (info.hflags & HMNT_MASTERID) { if (hmp && hmp->master_id != info.master_id) { kprintf("hammer: cannot change master id " "with mount update\n"); return(EINVAL); } master_id = info.master_id; if (master_id < -1 || master_id >= HAMMER_MAX_MASTERS) return (EINVAL); } else { if (hmp) master_id = hmp->master_id; else master_id = 0; } /* * Internal mount data structure */ if (hmp == NULL) { hmp = kmalloc(sizeof(*hmp), M_HAMMER, M_WAITOK | M_ZERO); mp->mnt_data = (qaddr_t)hmp; hmp->mp = mp; /*TAILQ_INIT(&hmp->recycle_list);*/ /* * Make sure kmalloc type limits are set appropriately. * * Our inode kmalloc group is sized based on maxvnodes * (controlled by the system, not us). */ kmalloc_create(&hmp->m_misc, "HAMMER-others"); kmalloc_create(&hmp->m_inodes, "HAMMER-inodes"); kmalloc_raise_limit(hmp->m_inodes, 0); /* unlimited */ hmp->root_btree_beg.localization = 0x00000000U; hmp->root_btree_beg.obj_id = -0x8000000000000000LL; hmp->root_btree_beg.key = -0x8000000000000000LL; hmp->root_btree_beg.create_tid = 1; hmp->root_btree_beg.delete_tid = 1; hmp->root_btree_beg.rec_type = 0; hmp->root_btree_beg.obj_type = 0; hmp->root_btree_end.localization = 0xFFFFFFFFU; hmp->root_btree_end.obj_id = 0x7FFFFFFFFFFFFFFFLL; hmp->root_btree_end.key = 0x7FFFFFFFFFFFFFFFLL; hmp->root_btree_end.create_tid = 0xFFFFFFFFFFFFFFFFULL; hmp->root_btree_end.delete_tid = 0; /* special case */ hmp->root_btree_end.rec_type = 0xFFFFU; hmp->root_btree_end.obj_type = 0; hmp->krate.freq = 1; /* maximum reporting rate (hz) */ hmp->krate.count = -16; /* initial burst */ hmp->sync_lock.refs = 1; hmp->free_lock.refs = 1; hmp->undo_lock.refs = 1; hmp->blkmap_lock.refs = 1; hmp->snapshot_lock.refs = 1; hmp->volume_lock.refs = 1; TAILQ_INIT(&hmp->delay_list); TAILQ_INIT(&hmp->flush_group_list); TAILQ_INIT(&hmp->objid_cache_list); TAILQ_INIT(&hmp->undo_lru_list); TAILQ_INIT(&hmp->reclaim_list); RB_INIT(&hmp->rb_dedup_crc_root); RB_INIT(&hmp->rb_dedup_off_root); TAILQ_INIT(&hmp->dedup_lru_list); } hmp->hflags &= ~HMNT_USERFLAGS; hmp->hflags |= info.hflags & HMNT_USERFLAGS; hmp->master_id = master_id; if (info.asof) { mp->mnt_flag |= MNT_RDONLY; hmp->asof = info.asof; } else { hmp->asof = HAMMER_MAX_TID; } hmp->volume_to_remove = -1; /* * Re-open read-write if originally read-only, or vise-versa. * * When going from read-only to read-write execute the stage2 * recovery if it has not already been run. */ if (mp->mnt_flag & MNT_UPDATE) { lwkt_gettoken(&hmp->fs_token); error = 0; if (hmp->ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) { kprintf("HAMMER read-only -> read-write\n"); hmp->ronly = 0; RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL, hammer_adjust_volume_mode, NULL); rootvol = hammer_get_root_volume(hmp, &error); if (rootvol) { hammer_recover_flush_buffers(hmp, rootvol, 1); error = hammer_recover_stage2(hmp, rootvol); bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap, sizeof(hmp->blockmap)); hammer_rel_volume(rootvol, 0); } RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL, hammer_reload_inode, NULL); /* kernel clears MNT_RDONLY */ } else if (hmp->ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) { kprintf("HAMMER read-write -> read-only\n"); hmp->ronly = 1; /* messy */ RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL, hammer_reload_inode, NULL); hmp->ronly = 0; hammer_flusher_sync(hmp); hammer_flusher_sync(hmp); hammer_flusher_sync(hmp); hmp->ronly = 1; RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL, hammer_adjust_volume_mode, NULL); } lwkt_reltoken(&hmp->fs_token); return(error); } RB_INIT(&hmp->rb_vols_root); RB_INIT(&hmp->rb_inos_root); RB_INIT(&hmp->rb_redo_root); RB_INIT(&hmp->rb_nods_root); RB_INIT(&hmp->rb_undo_root); RB_INIT(&hmp->rb_resv_root); RB_INIT(&hmp->rb_bufs_root); RB_INIT(&hmp->rb_pfsm_root); hmp->ronly = ((mp->mnt_flag & MNT_RDONLY) != 0); RB_INIT(&hmp->volu_root); RB_INIT(&hmp->undo_root); RB_INIT(&hmp->data_root); RB_INIT(&hmp->meta_root); RB_INIT(&hmp->lose_root); TAILQ_INIT(&hmp->iorun_list); lwkt_token_init(&hmp->fs_token, "hammerfs"); lwkt_token_init(&hmp->io_token, "hammerio"); lwkt_gettoken(&hmp->fs_token); /* * Load volumes */ path = objcache_get(namei_oc, M_WAITOK); hmp->nvolumes = -1; for (i = 0; i < info.nvolumes; ++i) { if (mntpt == NULL) { /* * Root mount. */ KKASSERT(next_volume_ptr != NULL); strcpy(path, ""); if (*next_volume_ptr != '/') { /* relative path */ strcpy(path, "/dev/"); } int k; for (k = strlen(path); k < MAXPATHLEN-1; ++k) { if (*next_volume_ptr == '\0') { break; } else if (*next_volume_ptr == ':') { ++next_volume_ptr; break; } else { path[k] = *next_volume_ptr; ++next_volume_ptr; } } path[k] = '\0'; error = 0; cdev_t dev = kgetdiskbyname(path); error = bdevvp(dev, &devvp); if (error) { kprintf("hammer_mountroot: can't find devvp\n"); } } else { error = copyin(&info.volumes[i], &upath, sizeof(char *)); if (error == 0) error = copyinstr(upath, path, MAXPATHLEN, NULL); } if (error == 0) error = hammer_install_volume(hmp, path, devvp); if (error) break; } objcache_put(namei_oc, path); /* * Make sure we found a root volume */ if (error == 0 && hmp->rootvol == NULL) { kprintf("hammer_mount: No root volume found!\n"); error = EINVAL; } /* * Check that all required volumes are available */ if (error == 0 && hammer_mountcheck_volumes(hmp)) { kprintf("hammer_mount: Missing volumes, cannot mount!\n"); error = EINVAL; } if (error) { /* called with fs_token held */ hammer_free_hmp(mp); return (error); } /* * No errors, setup enough of the mount point so we can lookup the * root vnode. */ mp->mnt_iosize_max = MAXPHYS; mp->mnt_kern_flag |= MNTK_FSMID; mp->mnt_kern_flag |= MNTK_THR_SYNC; /* new vsyncscan semantics */ /* * MPSAFE code. Note that VOPs and VFSops which are not MPSAFE * will acquire a per-mount token prior to entry and release it * on return, so even if we do not specify it we no longer get * the BGL regardlless of how we are flagged. */ mp->mnt_kern_flag |= MNTK_ALL_MPSAFE; /*MNTK_RD_MPSAFE | MNTK_GA_MPSAFE | MNTK_IN_MPSAFE;*/ /* * note: f_iosize is used by vnode_pager_haspage() when constructing * its VOP_BMAP call. */ mp->mnt_stat.f_iosize = HAMMER_BUFSIZE; mp->mnt_stat.f_bsize = HAMMER_BUFSIZE; mp->mnt_vstat.f_frsize = HAMMER_BUFSIZE; mp->mnt_vstat.f_bsize = HAMMER_BUFSIZE; mp->mnt_maxsymlinklen = 255; mp->mnt_flag |= MNT_LOCAL; vfs_add_vnodeops(mp, &hammer_vnode_vops, &mp->mnt_vn_norm_ops); vfs_add_vnodeops(mp, &hammer_spec_vops, &mp->mnt_vn_spec_ops); vfs_add_vnodeops(mp, &hammer_fifo_vops, &mp->mnt_vn_fifo_ops); /* * The root volume's ondisk pointer is only valid if we hold a * reference to it. */ rootvol = hammer_get_root_volume(hmp, &error); if (error) goto failed; /* * Perform any necessary UNDO operations. The recovery code does * call hammer_undo_lookup() so we have to pre-cache the blockmap, * and then re-copy it again after recovery is complete. * * If this is a read-only mount the UNDO information is retained * in memory in the form of dirty buffer cache buffers, and not * written back to the media. */ bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap, sizeof(hmp->blockmap)); /* * Check filesystem version */ hmp->version = rootvol->ondisk->vol_version; if (hmp->version < HAMMER_VOL_VERSION_MIN || hmp->version > HAMMER_VOL_VERSION_MAX) { kprintf("HAMMER: mount unsupported fs version %d\n", hmp->version); error = ERANGE; goto done; } /* * The undo_rec_limit limits the size of flush groups to avoid * blowing out the UNDO FIFO. This calculation is typically in * the tens of thousands and is designed primarily when small * HAMMER filesystems are created. */ hmp->undo_rec_limit = hammer_undo_max(hmp) / 8192 + 100; if (hammer_debug_general & 0x0001) kprintf("HAMMER: undo_rec_limit %d\n", hmp->undo_rec_limit); /* * NOTE: Recover stage1 not only handles meta-data recovery, it * also sets hmp->undo_seqno for HAMMER VERSION 4+ filesystems. */ error = hammer_recover_stage1(hmp, rootvol); if (error) { kprintf("Failed to recover HAMMER filesystem on mount\n"); goto done; } /* * Finish setup now that we have a good root volume. * * The top 16 bits of fsid.val[1] is a pfs id. */ ksnprintf(mp->mnt_stat.f_mntfromname, sizeof(mp->mnt_stat.f_mntfromname), "%s", rootvol->ondisk->vol_name); mp->mnt_stat.f_fsid.val[0] = crc32((char *)&rootvol->ondisk->vol_fsid + 0, 8); mp->mnt_stat.f_fsid.val[1] = crc32((char *)&rootvol->ondisk->vol_fsid + 8, 8); mp->mnt_stat.f_fsid.val[1] &= 0x0000FFFF; mp->mnt_vstat.f_fsid_uuid = rootvol->ondisk->vol_fsid; mp->mnt_vstat.f_fsid = crc32(&mp->mnt_vstat.f_fsid_uuid, sizeof(mp->mnt_vstat.f_fsid_uuid)); /* * Certain often-modified fields in the root volume are cached in * the hammer_mount structure so we do not have to generate lots * of little UNDO structures for them. * * Recopy after recovery. This also has the side effect of * setting our cached undo FIFO's first_offset, which serves to * placemark the FIFO start for the NEXT flush cycle while the * on-disk first_offset represents the LAST flush cycle. */ hmp->next_tid = rootvol->ondisk->vol0_next_tid; hmp->flush_tid1 = hmp->next_tid; hmp->flush_tid2 = hmp->next_tid; bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap, sizeof(hmp->blockmap)); hmp->copy_stat_freebigblocks = rootvol->ondisk->vol0_stat_freebigblocks; hammer_flusher_create(hmp); /* * Locate the root directory using the root cluster's B-Tree as a * starting point. The root directory uses an obj_id of 1. * * FUTURE: Leave the root directory cached referenced but unlocked * in hmp->rootvp (need to flush it on unmount). */ error = hammer_vfs_vget(mp, NULL, 1, &rootvp); if (error) goto done; vput(rootvp); /*vn_unlock(hmp->rootvp);*/ if (hmp->ronly == 0) error = hammer_recover_stage2(hmp, rootvol); /* * If the stage2 recovery fails be sure to clean out all cached * vnodes before throwing away the mount structure or bad things * will happen. */ if (error) vflush(mp, 0, 0); done: if ((mp->mnt_flag & MNT_UPDATE) == 0) { /* New mount */ /* Populate info for mount point (NULL pad)*/ bzero(mp->mnt_stat.f_mntonname, MNAMELEN); size_t size; if (mntpt) { copyinstr(mntpt, mp->mnt_stat.f_mntonname, MNAMELEN -1, &size); } else { /* Root mount */ mp->mnt_stat.f_mntonname[0] = '/'; } } (void)VFS_STATFS(mp, &mp->mnt_stat, cred); hammer_rel_volume(rootvol, 0); failed: /* * Cleanup and return. */ if (error) { /* called with fs_token held */ hammer_free_hmp(mp); } else { lwkt_reltoken(&hmp->fs_token); } return (error); }
/* * Common code for mount and mountroot */ int ntfs_mountfs(struct vnode *devvp, struct mount *mp, struct ntfs_args *argsp, struct ucred *cred) { struct buf *bp; struct ntfsmount *ntmp; cdev_t dev; int error, ronly, ncount, i; struct vnode *vp; char cs_local[ICONV_CSNMAXLEN]; char cs_ntfs[ICONV_CSNMAXLEN]; /* * Disallow multiple mounts of the same device. * Disallow mounting of a device that is currently in use * (except for root, which might share swap device for miniroot). * Flush out any old buffers remaining from a previous use. */ error = vfs_mountedon(devvp); if (error) return (error); ncount = vcount(devvp); if (devvp->v_object) ncount -= 1; if (ncount > 1) return (EBUSY); VN_LOCK(devvp, LK_EXCLUSIVE | LK_RETRY); error = vinvalbuf(devvp, V_SAVE, 0, 0); VOP__UNLOCK(devvp, 0); if (error) return (error); ronly = (mp->mnt_flag & MNT_RDONLY) != 0; VN_LOCK(devvp, LK_EXCLUSIVE | LK_RETRY); error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, NULL); VOP__UNLOCK(devvp, 0); if (error) return (error); dev = devvp->v_rdev; bp = NULL; error = bread(devvp, BBLOCK, BBSIZE, &bp); if (error) goto out; ntmp = kmalloc(sizeof *ntmp, M_NTFSMNT, M_WAITOK | M_ZERO); bcopy( bp->b_data, &ntmp->ntm_bootfile, sizeof(struct bootfile) ); /* * We must not cache the boot block if its size is not exactly * one cluster in order to avoid confusing the buffer cache when * the boot file is read later by ntfs_readntvattr_plain(), which * reads a cluster at a time. */ if (ntfs_cntob(1) != BBSIZE) bp->b_flags |= B_NOCACHE; brelse( bp ); bp = NULL; if (strncmp(ntmp->ntm_bootfile.bf_sysid, NTFS_BBID, NTFS_BBIDLEN)) { error = EINVAL; dprintf(("ntfs_mountfs: invalid boot block\n")); goto out; } { int8_t cpr = ntmp->ntm_mftrecsz; if( cpr > 0 ) ntmp->ntm_bpmftrec = ntmp->ntm_spc * cpr; else ntmp->ntm_bpmftrec = (1 << (-cpr)) / ntmp->ntm_bps; } dprintf(("ntfs_mountfs(): bps: %d, spc: %d, media: %x, mftrecsz: %d (%d sects)\n", ntmp->ntm_bps,ntmp->ntm_spc,ntmp->ntm_bootfile.bf_media, ntmp->ntm_mftrecsz,ntmp->ntm_bpmftrec)); dprintf(("ntfs_mountfs(): mftcn: 0x%x|0x%x\n", (u_int32_t)ntmp->ntm_mftcn,(u_int32_t)ntmp->ntm_mftmirrcn)); ntmp->ntm_mountp = mp; ntmp->ntm_dev = dev; ntmp->ntm_devvp = devvp; ntmp->ntm_uid = argsp->uid; ntmp->ntm_gid = argsp->gid; ntmp->ntm_mode = argsp->mode; ntmp->ntm_flag = argsp->flag; if (argsp->flag & NTFS_MFLAG_KICONV && ntfs_iconv) { bcopy(argsp->cs_local, cs_local, sizeof(cs_local)); bcopy(argsp->cs_ntfs, cs_ntfs, sizeof(cs_ntfs)); ntfs_82u_init(ntmp, cs_local, cs_ntfs); ntfs_u28_init(ntmp, NULL, cs_local, cs_ntfs); } else { ntfs_82u_init(ntmp, NULL, NULL); ntfs_u28_init(ntmp, ntmp->ntm_82u, NULL, NULL); } mp->mnt_data = (qaddr_t)ntmp; dprintf(("ntfs_mountfs(): case-%s,%s uid: %d, gid: %d, mode: %o\n", (ntmp->ntm_flag & NTFS_MFLAG_CASEINS)?"insens.":"sens.", (ntmp->ntm_flag & NTFS_MFLAG_ALLNAMES)?" allnames,":"", ntmp->ntm_uid, ntmp->ntm_gid, ntmp->ntm_mode)); vfs_add_vnodeops(mp, &ntfs_vnode_vops, &mp->mnt_vn_norm_ops); /* * We read in some system nodes to do not allow * reclaim them and to have everytime access to them. */ { int pi[3] = { NTFS_MFTINO, NTFS_ROOTINO, NTFS_BITMAPINO }; for (i=0; i<3; i++) { error = VFS_VGET(mp, NULL, pi[i], &(ntmp->ntm_sysvn[pi[i]])); if(error) goto out1; vsetflags(ntmp->ntm_sysvn[pi[i]], VSYSTEM); vref(ntmp->ntm_sysvn[pi[i]]); vput(ntmp->ntm_sysvn[pi[i]]); } } /* read the Unicode lowercase --> uppercase translation table, * if necessary */ if ((error = ntfs_toupper_use(mp, ntmp))) goto out1; /* * Scan $BitMap and count free clusters */ error = ntfs_calccfree(ntmp, &ntmp->ntm_cfree); if(error) goto out1; /* * Read and translate to internal format attribute * definition file. */ { int num,j; struct attrdef ad; /* Open $AttrDef */ error = VFS_VGET(mp, NULL, NTFS_ATTRDEFINO, &vp); if(error) goto out1; /* Count valid entries */ for(num=0;;num++) { error = ntfs_readattr(ntmp, VTONT(vp), NTFS_A_DATA, NULL, num * sizeof(ad), sizeof(ad), &ad, NULL); if (error) goto out1; if (ad.ad_name[0] == 0) break; } /* Alloc memory for attribute definitions */ ntmp->ntm_ad = kmalloc(num * sizeof(struct ntvattrdef), M_NTFSMNT, M_WAITOK); ntmp->ntm_adnum = num; /* Read them and translate */ for(i=0;i<num;i++){ error = ntfs_readattr(ntmp, VTONT(vp), NTFS_A_DATA, NULL, i * sizeof(ad), sizeof(ad), &ad, NULL); if (error) goto out1; j = 0; do { ntmp->ntm_ad[i].ad_name[j] = ad.ad_name[j]; } while(ad.ad_name[j++]); ntmp->ntm_ad[i].ad_namelen = j - 1; ntmp->ntm_ad[i].ad_type = ad.ad_type; } vput(vp); } mp->mnt_stat.f_fsid.val[0] = dev2udev(dev); mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum; mp->mnt_maxsymlinklen = 0; mp->mnt_flag |= MNT_LOCAL; dev->si_mountpoint = mp; return (0); out1: for(i=0;i<NTFS_SYSNODESNUM;i++) if(ntmp->ntm_sysvn[i]) vrele(ntmp->ntm_sysvn[i]); if (vflush(mp, 0, 0)) dprintf(("ntfs_mountfs: vflush failed\n")); out: dev->si_mountpoint = NULL; if (bp) brelse(bp); vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); (void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, NULL); vn_unlock(devvp); return (error); }
static int dirfs_mount(struct mount *mp, char *path, caddr_t data, struct ucred *cred) { dirfs_mount_t dmp; struct stat st; size_t done, nlen; int error; dbg(1, "called\n"); if (mp->mnt_flag & MNT_UPDATE) { dmp = VFS_TO_DIRFS(mp); if (dmp->dm_rdonly == 0 && (mp->mnt_flag & MNT_RDONLY)) { /* XXX We should make sure all writes are synced */ dmp->dm_rdonly = 1; debug(2, "dirfs read-write -> read-only\n"); } if (dmp->dm_rdonly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) { debug(2, "dirfs read-only -> read-write\n"); dmp->dm_rdonly = 0; } return 0; } dmp = kmalloc(sizeof(*dmp), M_DIRFS, M_WAITOK | M_ZERO); mp->mnt_data = (qaddr_t)dmp; dmp->dm_mount = mp; error = copyinstr(data, &dmp->dm_path, MAXPATHLEN, &done); if (error) { /* Attempt to copy from kernel address */ error = copystr(data, &dmp->dm_path, MAXPATHLEN, &done); if (error) { kfree(dmp, M_DIRFS); goto failure; } } /* Strip / character at the end to avoid problems */ nlen = strnlen(dmp->dm_path, MAXPATHLEN); if (dmp->dm_path[nlen-1] == '/') dmp->dm_path[nlen-1] = 0; /* Make sure host directory exists and it is indeed a directory. */ if ((stat(dmp->dm_path, &st)) == 0) { if (!S_ISDIR(st.st_mode)) { kfree(dmp, M_DIRFS); error = EINVAL; goto failure; } } else { error = errno; goto failure; } lockinit(&dmp->dm_lock, "dfsmnt", 0, LK_CANRECURSE); vfs_add_vnodeops(mp, &dirfs_vnode_vops, &mp->mnt_vn_norm_ops); vfs_getnewfsid(mp); /* Who is running the vkernel */ dmp->dm_uid = getuid(); dmp->dm_gid = getgid(); TAILQ_INIT(&dmp->dm_fdlist); RB_INIT(&dmp->dm_inotree); kmalloc_raise_limit(M_DIRFS_NODE, 0); dirfs_statfs(mp, &mp->mnt_stat, cred); failure: KTR_LOG(dirfs_mount, (dmp->dm_path) ? dmp->dm_path : "NULL", dmp, mp, error); return error; }
/* * mp - path - addr in user space of mount point (ie /usr or whatever) * data - addr in user space of mount params */ static int nwfs_mount(struct mount *mp, char *path, caddr_t data, struct ucred *cred) { struct nwfs_args args; /* will hold data from mount request */ int error; struct nwmount *nmp = NULL; struct ncp_conn *conn = NULL; struct ncp_handle *handle = NULL; struct vnode *vp; char *pc,*pe; if (data == NULL) { nwfs_printf("missing data argument\n"); return 1; } if (mp->mnt_flag & MNT_UPDATE) { nwfs_printf("MNT_UPDATE not implemented"); return (EOPNOTSUPP); } error = copyin(data, (caddr_t)&args, sizeof(struct nwfs_args)); if (error) return (error); if (args.version != NWFS_VERSION) { nwfs_printf("mount version mismatch: kernel=%d, mount=%d\n",NWFS_VERSION,args.version); return (1); } error = ncp_conn_getbyref(args.connRef,curthread,cred,NCPM_EXECUTE,&conn); if (error) { nwfs_printf("invalid connection reference %d\n",args.connRef); return (error); } error = ncp_conn_gethandle(conn, NULL, &handle); if (error) { nwfs_printf("can't get connection handle\n"); return (error); } ncp_conn_unlock(conn,curthread); /* we keep the ref */ mp->mnt_stat.f_iosize = conn->buffer_size; /* We must malloc our own mount info */ nmp = kmalloc(sizeof(struct nwmount), M_NWFSDATA, M_WAITOK | M_USE_RESERVE | M_ZERO); mp->mnt_data = (qaddr_t)nmp; nmp->connh = handle; nmp->n_root = NULL; nmp->n_id = nwfsid++; nmp->m = args; nmp->m.file_mode = (nmp->m.file_mode & (S_IRWXU|S_IRWXG|S_IRWXO)) | S_IFREG; nmp->m.dir_mode = (nmp->m.dir_mode & (S_IRWXU|S_IRWXG|S_IRWXO)) | S_IFDIR; if ((error = nwfs_initnls(nmp)) != 0) goto bad; pc = mp->mnt_stat.f_mntfromname; pe = pc+sizeof(mp->mnt_stat.f_mntfromname); bzero(pc, MNAMELEN); *(pc++) = '/'; pc = index(strncpy(pc, conn->li.server, pe-pc-2),0); if (pc < pe-1) { *(pc++) = ':'; pc=index(strncpy(pc, conn->li.user, pe-pc-2),0); if (pc < pe-1) { *(pc++) = '/'; strncpy(pc, nmp->m.mounted_vol, pe-pc-2); } } /* protect against invalid mount points */ nmp->m.mount_point[sizeof(nmp->m.mount_point)-1] = '\0'; vfs_add_vnodeops(mp, &nwfs_vnode_vops, &mp->mnt_vn_norm_ops); vfs_getnewfsid(mp); error = nwfs_root(mp, &vp); if (error) goto bad; /* * Lose the lock but keep the ref. */ vn_unlock(vp); NCPVODEBUG("rootvp.vrefcnt=%d\n",vp->v_sysref.refcnt); return error; bad: if (nmp) kfree(nmp, M_NWFSDATA); if (handle) ncp_conn_puthandle(handle, NULL, 0); return error; }
/* * VFS Operations. * * mount system call */ static int devfs_vfs_mount(struct mount *mp, char *path, caddr_t data, struct ucred *cred) { struct devfs_mount_info info; struct devfs_mnt_data *mnt; size_t size; int error; devfs_debug(DEVFS_DEBUG_DEBUG, "(vfsops) devfs_mount() called!\n"); if (mp->mnt_flag & MNT_UPDATE) return (EOPNOTSUPP); if (data == NULL) { bzero(&info, sizeof(info)); } else { if ((error = copyin(data, &info, sizeof(info))) != 0) return (error); } mp->mnt_flag |= MNT_LOCAL; mp->mnt_kern_flag |= MNTK_NOSTKMNT | MNTK_ALL_MPSAFE; mp->mnt_data = NULL; vfs_getnewfsid(mp); size = sizeof("devfs") - 1; bcopy("devfs", mp->mnt_stat.f_mntfromname, size); bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); copyinstr(path, mp->mnt_stat.f_mntonname, sizeof(mp->mnt_stat.f_mntonname) -1, &size); devfs_vfs_statfs(mp, &mp->mnt_stat, cred); /* * XXX: save other mount info passed from userland or so. */ mnt = kmalloc(sizeof(*mnt), M_DEVFS, M_WAITOK | M_ZERO); lockmgr(&devfs_lock, LK_EXCLUSIVE); mp->mnt_data = (qaddr_t)mnt; if (info.flags & DEVFS_MNT_JAIL) mnt->jailed = 1; else mnt->jailed = jailed(cred); mnt->leak_count = 0; mnt->file_count = 0; mnt->mp = mp; TAILQ_INIT(&mnt->orphan_list); mnt->root_node = devfs_allocp(Nroot, "", NULL, mp, NULL); KKASSERT(mnt->root_node); lockmgr(&devfs_lock, LK_RELEASE); vfs_add_vnodeops(mp, &devfs_vnode_norm_vops, &mp->mnt_vn_norm_ops); vfs_add_vnodeops(mp, &devfs_vnode_dev_vops, &mp->mnt_vn_spec_ops); devfs_debug(DEVFS_DEBUG_DEBUG, "calling devfs_mount_add\n"); devfs_mount_add(mnt); return (0); }
static int smbfs_mount(struct mount *mp, char *path, caddr_t data, struct ucred *cred) { struct smbfs_args args; /* will hold data from mount request */ struct smbmount *smp = NULL; struct smb_vc *vcp; struct smb_share *ssp = NULL; struct vnode *vp; struct smb_cred scred; int error; char *pc, *pe; if (data == NULL) { kprintf("missing data argument\n"); return EINVAL; } if (mp->mnt_flag & MNT_UPDATE) { kprintf("MNT_UPDATE not implemented"); return EOPNOTSUPP; } error = copyin(data, (caddr_t)&args, sizeof(struct smbfs_args)); if (error) return error; if (args.version != SMBFS_VERSION) { kprintf("mount version mismatch: kernel=%d, mount=%d\n", SMBFS_VERSION, args.version); return EINVAL; } smb_makescred(&scred, curthread, cred); error = smb_dev2share(args.dev, SMBM_EXEC, &scred, &ssp); if (error) { kprintf("invalid device handle %d (%d)\n", args.dev, error); return error; } vcp = SSTOVC(ssp); smb_share_unlock(ssp, 0); mp->mnt_stat.f_iosize = SSTOVC(ssp)->vc_txmax; #ifdef SMBFS_USEZONE smp = zalloc(smbfsmount_zone); #else MALLOC(smp, struct smbmount*, sizeof(*smp), M_SMBFSDATA, M_WAITOK|M_USE_RESERVE); #endif if (smp == NULL) { kprintf("could not alloc smbmount\n"); error = ENOMEM; goto bad; } bzero(smp, sizeof(*smp)); mp->mnt_data = (qaddr_t)smp; smp->sm_cred = crhold(cred); smp->sm_hash = hashinit(desiredvnodes, M_SMBFSHASH, &smp->sm_hashlen); if (smp->sm_hash == NULL) goto bad; lockinit(&smp->sm_hashlock, "smbfsh", 0, 0); smp->sm_share = ssp; smp->sm_root = NULL; smp->sm_args = args; smp->sm_caseopt = args.caseopt; smp->sm_args.file_mode = (smp->sm_args.file_mode & (S_IRWXU|S_IRWXG|S_IRWXO)) | S_IFREG; smp->sm_args.dir_mode = (smp->sm_args.dir_mode & (S_IRWXU|S_IRWXG|S_IRWXO)) | S_IFDIR; /* simple_lock_init(&smp->sm_npslock);*/ pc = mp->mnt_stat.f_mntfromname; pe = pc + sizeof(mp->mnt_stat.f_mntfromname); bzero(pc, MNAMELEN); *pc++ = '/'; *pc++ = '/'; pc=index(strncpy(pc, vcp->vc_username, pe - pc - 2), 0); if (pc < pe-1) { *(pc++) = '@'; pc = index(strncpy(pc, vcp->vc_srvname, pe - pc - 2), 0); if (pc < pe - 1) { *(pc++) = '/'; strncpy(pc, ssp->ss_name, pe - pc - 2); } } /* protect against invalid mount points */ smp->sm_args.mount_point[sizeof(smp->sm_args.mount_point) - 1] = '\0'; vfs_getnewfsid(mp); vfs_add_vnodeops(mp, &smbfs_vnode_vops, &mp->mnt_vn_norm_ops); error = smbfs_root(mp, &vp); if (error) goto bad; vn_unlock(vp); SMBVDEBUG("root.v_sysrefs = %d\n", vp->v_sysref.refcnt); #ifdef DIAGNOSTICS SMBERROR("mp=%p\n", mp); #endif return error; bad: if (smp) { if (smp->sm_cred) crfree(smp->sm_cred); if (smp->sm_hash) kfree(smp->sm_hash, M_SMBFSHASH); lockdestroy(&smp->sm_hashlock); #ifdef SMBFS_USEZONE zfree(smbfsmount_zone, smp); #else kfree(smp, M_SMBFSDATA); #endif } if (ssp) smb_share_put(ssp, &scred); return error; }