/* * cpu_dump: dump the machine-dependent kernel core dump headers. */ static int cpu_dump(void) { int (*dump)(dev_t, daddr_t, void *, size_t); kcore_seg_t seg; cpu_kcore_hdr_t cpuhdr; const struct bdevsw *bdev; bdev = bdevsw_lookup(dumpdev); if (bdev == NULL) return (ENXIO); dump = bdev->d_dump; /* * Generate a segment header. */ CORE_SETMAGIC(seg, KCORE_MAGIC, MID_MACHINE, CORE_CPU); seg.c_size = dump_header_size - ALIGN(sizeof(seg)); (void)dump_header_addbytes(&seg, ALIGN(sizeof(seg))); /* * Add the machine-dependent header info. */ cpuhdr.pdppaddr = PDPpaddr; cpuhdr.nmemsegs = dump_nmemsegs; (void)dump_header_addbytes(&cpuhdr, ALIGN(sizeof(cpuhdr))); /* * Write out the memory segment descriptors. */ return dump_seg_iter(dump_header_addseg); }
static int loaddisk(struct md_conf *md, dev_t ld_dev, struct lwp *lwp) { struct buf *buf; int error; const struct bdevsw *bdp; struct disklabel dl; struct read_info rs; bdp = bdevsw_lookup(ld_dev); if (bdp == NULL) return ENXIO; /* * Initialize our buffer header: */ buf = getiobuf(NULL, false); buf->b_cflags = BC_BUSY; buf->b_dev = ld_dev; buf->b_error = 0; buf->b_proc = lwp->l_proc; /* * Setup read_info: */ rs.bp = buf; rs.nbytes = md->md_size; rs.offset = 0; rs.bufp = md->md_addr; rs.ebufp = (char *)md->md_addr + md->md_size; rs.chunk = RAMD_CHUNK; rs.media_sz = md->md_size; rs.strat = bdp->d_strategy; /* * Open device and try to get some statistics. */ if ((error = bdp->d_open(ld_dev, FREAD | FNONBLOCK, 0, lwp)) != 0) { putiobuf(buf); return error; } if (bdp->d_ioctl(ld_dev, DIOCGDINFO, (void *)&dl, FREAD, lwp) == 0) { /* Read on a cylinder basis */ rs.chunk = dl.d_secsize * dl.d_secpercyl; rs.media_sz = dl.d_secperunit * dl.d_secsize; } #ifdef support_compression if (ri->ramd_flag & RAMD_LCOMP) error = decompress(cpy_uncompressed, md_compressed, &rs); else #endif /* support_compression */ error = ramd_norm_read(&rs); bdp->d_close(ld_dev, FREAD | FNONBLOCK, 0, lwp); putiobuf(buf); return error; }
void findroot(void) { struct disk *dkp; struct partition *pp; device_t *devs; const struct bdevsw *bdev; int i, maj, unit; if (boothowto & RB_ASKNAME) return; /* Don't bother looking */ for (i = 0; genericconf[i] != NULL; i++) { for (unit = 0; unit < genericconf[i]->cd_ndevs; unit++) { if (genericconf[i]->cd_devs[unit] == NULL) continue; /* * Find the disk structure corresponding to the * current device. */ devs = genericconf[i]->cd_devs; if ((dkp = disk_find(device_xname(devs[unit]))) == NULL) continue; if (dkp->dk_driver == NULL || dkp->dk_driver->d_strategy == NULL) continue; maj = devsw_name2blk(genericconf[i]->cd_name, NULL, 0); if (maj == -1) continue; bdev = bdevsw_lookup(makedev(maj, 0)); #ifdef DIAGNOSTIC if (bdev == NULL) panic("findroot: impossible"); #endif if (bdev == NULL || bdev->d_strategy != dkp->dk_driver->d_strategy) continue; /* Open disk; forces read of disklabel. */ if ((*bdev->d_open)(MAKEDISKDEV(maj, unit, 0), FREAD|FNONBLOCK, 0, &lwp0)) continue; (void)(*bdev->d_close)(MAKEDISKDEV(maj, unit, 0), FREAD|FNONBLOCK, 0, &lwp0); pp = &dkp->dk_label->d_partitions[booted_partition]; if (pp->p_size != 0 && pp->p_fstype == FS_BSDFFS) { booted_device = devs[unit]; return; } } } }
static int dump_header_flush(void) { const struct bdevsw *bdev; size_t to_write; int error; bdev = bdevsw_lookup(dumpdev); to_write = roundup(dump_headerbuf_ptr - dump_headerbuf, dbtob(1)); error = bdev->d_dump(dumpdev, dump_header_blkno, dump_headerbuf, to_write); dump_header_blkno += btodb(to_write); dump_headerbuf_ptr = dump_headerbuf; return error; }
/* * cpu_dump: dump the machine-dependent kernel core dump headers. */ int cpu_dump() { int (*dump)(dev_t, daddr_t, void *, size_t); char bf[dbtob(1)]; kcore_seg_t *segp; cpu_kcore_hdr_t *cpuhdrp; phys_ram_seg_t *memsegp; const struct bdevsw *bdev; int i; bdev = bdevsw_lookup(dumpdev); if (bdev == NULL) return (ENXIO); dump = bdev->d_dump; memset(bf, 0, sizeof bf); segp = (kcore_seg_t *)bf; cpuhdrp = (cpu_kcore_hdr_t *)&bf[ALIGN(sizeof(*segp))]; memsegp = (phys_ram_seg_t *)&bf[ ALIGN(sizeof(*segp)) + ALIGN(sizeof(*cpuhdrp))]; /* * Generate a segment header. */ CORE_SETMAGIC(*segp, KCORE_MAGIC, MID_MACHINE, CORE_CPU); segp->c_size = dbtob(1) - ALIGN(sizeof(*segp)); /* * Add the machine-dependent header info. */ cpuhdrp->version = 1; cpuhdrp->PAKernelL1Table = pmap_kernel_L1_addr(); cpuhdrp->UserL1TableSize = 0; cpuhdrp->nmemsegs = bootconfig.dramblocks; cpuhdrp->omemsegs = ALIGN(sizeof(*cpuhdrp)); /* * Fill in the memory segment descriptors. */ for (i = 0; i < bootconfig.dramblocks; i++) { memsegp[i].start = bootconfig.dram[i].address; memsegp[i].size = bootconfig.dram[i].pages * PAGE_SIZE; } return (dump(dumpdev, dumplo, bf, dbtob(1))); }
size_t vndbdevsize(struct vnode *vp, struct proc *p) { struct partinfo pi; struct bdevsw *bsw; dev_t dev; dev = vp->v_rdev; bsw = bdevsw_lookup(dev); if (bsw->d_ioctl == NULL) return (0); if (bsw->d_ioctl(dev, DIOCGPART, (caddr_t)&pi, FREAD, p)) return (0); DNPRINTF(VDB_INIT, "vndbdevsize: size %llu secsize %u\n", DL_GETPSIZE(pi.part), pi.disklab->d_secsize); return (DL_GETPSIZE(pi.part)); }
static int dumpsys_seg(paddr_t maddr, paddr_t bytes) { u_long i, m, n; daddr_t blkno; const struct bdevsw *bdev; int (*dump)(dev_t, daddr_t, void *, size_t); int error; bdev = bdevsw_lookup(dumpdev); dump = bdev->d_dump; blkno = dump_header_blkno; for (i = 0; i < bytes; i += n, dump_totalbytesleft -= n) { /* Print out how many MBs we have left to go. */ if ((dump_totalbytesleft % (1024*1024)) == 0) printf("%lu ", (unsigned long) (dump_totalbytesleft / (1024 * 1024))); /* Limit size for next transfer. */ n = bytes - i; if (n > BYTES_PER_DUMP) n = BYTES_PER_DUMP; for (m = 0; m < n; m += NBPG) pmap_kenter_pa(dumpspace + m, maddr + m, VM_PROT_READ); pmap_update(pmap_kernel()); error = (*dump)(dumpdev, blkno, (void *)dumpspace, n); if (error) return error; maddr += n; blkno += btodb(n); /* XXX? */ #if 0 /* XXX this doesn't work. grr. */ /* operator aborting dump? */ if (sget() != NULL) return EINTR; #endif } dump_header_blkno = blkno; return 0; }
/* * This is called by main to set dumplo and dumpsize. * Dumps always skip the first PAGE_SIZE of disk space * in case there might be a disk label stored there. * If there is extra space, put dump at the end to * reduce the chance that swapping trashes it. * * Sparse dumps can't placed as close to the end as possible, because * savecore(8) has to know where to start reading in the dump device * before it has access to any of the crashed system's state. * * Note also that a sparse dump will never be larger than a full one: * in order to add a phys_ram_seg_t to the header, at least one page * must be removed. */ void cpu_dumpconf(void) { const struct bdevsw *bdev; int nblks, dumpblks; /* size of dump area */ if (dumpdev == NODEV) goto bad; bdev = bdevsw_lookup(dumpdev); if (bdev == NULL) { dumpdev = NODEV; goto bad; } if (bdev->d_psize == NULL) goto bad; nblks = (*bdev->d_psize)(dumpdev); if (nblks <= ctod(1)) goto bad; dumpblks = cpu_dumpsize(); if (dumpblks < 0) goto bad; dumpblks += ctod(cpu_dump_mempagecnt()); /* If dump won't fit (incl. room for possible label): */ if (dumpblks > (nblks - ctod(1))) { /* A sparse dump might (and hopefully will) fit. */ dumplo = ctod(1); } else { /* Put dump at end of partition */ dumplo = nblks - dumpblks; } /* dumpsize is in page units, and doesn't include headers. */ dumpsize = cpu_dump_mempagecnt(); /* Now that we've decided this will work, init ancillary stuff. */ dump_misc_init(); return; bad: dumpsize = 0; }
void cpu_dumpconf() { const struct bdevsw *bdev; int nblks, dumpblks; /* size of dump area */ if (dumpdev == NODEV) return; bdev = bdevsw_lookup(dumpdev); if (bdev == NULL) panic("dumpconf: bad dumpdev=0x%x", dumpdev); if (bdev->d_psize == NULL) return; nblks = (*bdev->d_psize)(dumpdev); if (nblks <= ctod(1)) return; dumpblks = cpu_dumpsize(); if (dumpblks < 0) goto bad; dumpblks += ctod(cpu_dump_mempagecnt()); /* If dump won't fit (incl. room for possible label), punt. */ if (dumpblks > (nblks - ctod(1))) goto bad; /* Put dump at end of partition */ dumplo = nblks - dumpblks; /* dumpsize is in page units, and doesn't include headers. */ dumpsize = cpu_dump_mempagecnt(); return; bad: dumpsize = 0; }
/* chfs_mountfs - init CHFS */ int chfs_mountfs(struct vnode *devvp, struct mount *mp) { struct lwp *l = curlwp; kauth_cred_t cred; devmajor_t flash_major; dev_t dev; struct ufsmount* ump = NULL; struct chfs_mount* chmp; struct vnode *vp; int err = 0; dbg("mountfs()\n"); dev = devvp->v_rdev; cred = l ? l->l_cred : NOCRED; /* Flush out any old buffers remaining from a previous use. */ vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); err = vinvalbuf(devvp, V_SAVE, cred, l, 0, 0); VOP_UNLOCK(devvp); if (err) return (err); /* Setup device. */ flash_major = cdevsw_lookup_major(&flash_cdevsw); if (devvp->v_type != VBLK) err = ENOTBLK; else if (bdevsw_lookup(dev) == NULL) err = ENXIO; else if (major(dev) != flash_major) { dbg("major(dev): %d, flash_major: %d\n", major(dev), flash_major); err = ENODEV; } if (err) { vrele(devvp); return (err); } /* Connect CHFS to UFS. */ ump = kmem_zalloc(sizeof(struct ufsmount), KM_SLEEP); ump->um_fstype = UFS1; ump->um_chfs = kmem_zalloc(sizeof(struct chfs_mount), KM_SLEEP); mutex_init(&ump->um_lock, MUTEX_DEFAULT, IPL_NONE); chmp = ump->um_chfs; /* Initialize erase block handler. */ chmp->chm_ebh = kmem_alloc(sizeof(struct chfs_ebh), KM_SLEEP); dbg("[]opening flash: %u\n", (unsigned int)devvp->v_rdev); err = ebh_open(chmp->chm_ebh, devvp->v_rdev); if (err) { dbg("error while opening flash\n"); goto fail; } //TODO check flash sizes /* Initialize vnode cache's hashtable and eraseblock array. */ chmp->chm_gbl_version = 0; chmp->chm_vnocache_hash = chfs_vnocache_hash_init(); chmp->chm_blocks = kmem_zalloc(chmp->chm_ebh->peb_nr * sizeof(struct chfs_eraseblock), KM_SLEEP); /* Initialize mutexes. */ mutex_init(&chmp->chm_lock_mountfields, MUTEX_DEFAULT, IPL_NONE); mutex_init(&chmp->chm_lock_sizes, MUTEX_DEFAULT, IPL_NONE); mutex_init(&chmp->chm_lock_vnocache, MUTEX_DEFAULT, IPL_NONE); /* Initialize read/write contants. (from UFS) */ chmp->chm_fs_bmask = -4096; chmp->chm_fs_bsize = 4096; chmp->chm_fs_qbmask = 4095; chmp->chm_fs_bshift = 12; chmp->chm_fs_fmask = -2048; chmp->chm_fs_qfmask = 2047; /* Initialize writebuffer. */ chmp->chm_wbuf_pagesize = chmp->chm_ebh->flash_if->page_size; dbg("wbuf size: %zu\n", chmp->chm_wbuf_pagesize); chmp->chm_wbuf = kmem_alloc(chmp->chm_wbuf_pagesize, KM_SLEEP); rw_init(&chmp->chm_lock_wbuf); /* Initialize queues. */ TAILQ_INIT(&chmp->chm_free_queue); TAILQ_INIT(&chmp->chm_clean_queue); TAILQ_INIT(&chmp->chm_dirty_queue); TAILQ_INIT(&chmp->chm_very_dirty_queue); TAILQ_INIT(&chmp->chm_erasable_pending_wbuf_queue); TAILQ_INIT(&chmp->chm_erase_pending_queue); /* Initialize flash-specific constants. */ chfs_calc_trigger_levels(chmp); /* Initialize sizes. */ chmp->chm_nr_free_blocks = 0; chmp->chm_nr_erasable_blocks = 0; chmp->chm_max_vno = 2; chmp->chm_checked_vno = 2; chmp->chm_unchecked_size = 0; chmp->chm_used_size = 0; chmp->chm_dirty_size = 0; chmp->chm_wasted_size = 0; chmp->chm_free_size = chmp->chm_ebh->eb_size * chmp->chm_ebh->peb_nr; /* Build filesystem. */ err = chfs_build_filesystem(chmp); if (err) { /* Armageddon and return. */ chfs_vnocache_hash_destroy(chmp->chm_vnocache_hash); ebh_close(chmp->chm_ebh); err = EIO; goto fail; } /* Initialize UFS. */ mp->mnt_data = ump; mp->mnt_stat.f_fsidx.__fsid_val[0] = (long)dev; mp->mnt_stat.f_fsidx.__fsid_val[1] = makefstype(MOUNT_CHFS); mp->mnt_stat.f_fsid = mp->mnt_stat.f_fsidx.__fsid_val[0]; mp->mnt_stat.f_namemax = MAXNAMLEN; mp->mnt_flag |= MNT_LOCAL; mp->mnt_fs_bshift = PAGE_SHIFT; mp->mnt_dev_bshift = DEV_BSHIFT; mp->mnt_iflag |= IMNT_MPSAFE; ump->um_flags = 0; ump->um_mountp = mp; ump->um_dev = dev; ump->um_devvp = devvp; ump->um_maxfilesize = 1048512 * 1024; /* Allocate the root vnode. */ err = VFS_VGET(mp, CHFS_ROOTINO, &vp); if (err) { dbg("error: %d while allocating root node\n", err); return err; } vput(vp); /* Start GC. */ chfs_gc_thread_start(chmp); mutex_enter(&chmp->chm_lock_mountfields); chfs_gc_trigger(chmp); mutex_exit(&chmp->chm_lock_mountfields); spec_node_setmountedfs(devvp, mp); return 0; fail: kmem_free(chmp->chm_ebh, sizeof(struct chfs_ebh)); kmem_free(chmp, sizeof(struct chfs_mount)); kmem_free(ump, sizeof(struct ufsmount)); return err; }
/* * VFS Operations. * * mount system call */ int cd9660_mount(struct mount *mp, const char *path, void *data, size_t *data_len) { struct lwp *l = curlwp; struct vnode *devvp; struct iso_args *args = data; int error; struct iso_mnt *imp = VFSTOISOFS(mp); if (*data_len < sizeof *args) return EINVAL; if (mp->mnt_flag & MNT_GETARGS) { if (imp == NULL) return EIO; args->fspec = NULL; args->flags = imp->im_flags; *data_len = sizeof (*args); return 0; } if ((mp->mnt_flag & MNT_RDONLY) == 0) return (EROFS); if ((mp->mnt_flag & MNT_UPDATE) && args->fspec == NULL) return EINVAL; /* * Not an update, or updating the name: look up the name * and verify that it refers to a sensible block device. */ error = namei_simple_user(args->fspec, NSM_FOLLOW_NOEMULROOT, &devvp); if (error != 0) return (error); if (devvp->v_type != VBLK) { vrele(devvp); return ENOTBLK; } if (bdevsw_lookup(devvp->v_rdev) == NULL) { vrele(devvp); return ENXIO; } /* * If mount by non-root, then verify that user has necessary * permissions on the device. */ vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT, KAUTH_REQ_SYSTEM_MOUNT_DEVICE, mp, devvp, KAUTH_ARG(VREAD)); VOP_UNLOCK(devvp); if (error) { vrele(devvp); return (error); } if ((mp->mnt_flag & MNT_UPDATE) == 0) { vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); error = VOP_OPEN(devvp, FREAD, FSCRED); VOP_UNLOCK(devvp); if (error) goto fail; error = iso_mountfs(devvp, mp, l, args); if (error) { vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); (void)VOP_CLOSE(devvp, FREAD, NOCRED); VOP_UNLOCK(devvp); goto fail; } } else { vrele(devvp); if (devvp != imp->im_devvp && devvp->v_rdev != imp->im_devvp->v_rdev) return (EINVAL); /* needs translation */ } return set_statvfs_info(path, UIO_USERSPACE, args->fspec, UIO_USERSPACE, mp->mnt_op->vfs_name, mp, l); fail: vrele(devvp); return (error); }
void dodumpsys() { const struct bdevsw *bdev; daddr_t blkno; int psize; int error; int addr; int block; int len; vaddr_t dumpspace; /* flush everything out of caches */ cpu_dcache_wbinv_all(); if (dumpdev == NODEV) return; if (dumpsize == 0) { cpu_dumpconf(); } if (dumplo <= 0 || dumpsize == 0) { printf("\ndump to dev %u,%u not possible\n", major(dumpdev), minor(dumpdev)); delay(5000000); return; } printf("\ndumping to dev %u,%u offset %ld\n", major(dumpdev), minor(dumpdev), dumplo); bdev = bdevsw_lookup(dumpdev); if (bdev == NULL || bdev->d_psize == NULL) return; psize = (*bdev->d_psize)(dumpdev); printf("dump "); if (psize == -1) { printf("area unavailable\n"); return; } if ((error = cpu_dump()) != 0) goto err; blkno = dumplo + cpu_dumpsize(); dumpspace = memhook; error = 0; len = 0; for (block = 0; block < bootconfig.dramblocks && error == 0; ++block) { addr = bootconfig.dram[block].address; for (;addr < (bootconfig.dram[block].address + (bootconfig.dram[block].pages * PAGE_SIZE)); addr += PAGE_SIZE) { if ((len % (1024*1024)) == 0) printf("%d ", len / (1024*1024)); pmap_kenter_pa(dumpspace, addr, VM_PROT_READ); pmap_update(pmap_kernel()); error = (*bdev->d_dump)(dumpdev, blkno, (void *) dumpspace, PAGE_SIZE); if (error) goto err; blkno += btodb(PAGE_SIZE); len += PAGE_SIZE; } } err: switch (error) { case ENXIO: printf("device bad\n"); break; case EFAULT: printf("device not ready\n"); break; case EINVAL: printf("area improper\n"); break; case EIO: printf("i/o error\n"); break; case EINTR: printf("aborted from console\n"); break; case 0: printf("succeeded\n"); break; default: printf("error %d\n", error); break; } printf("\n\n"); delay(5000000); }
int v7fs_mount(struct mount *mp, const char *path, void *data, size_t *data_len) { struct lwp *l = curlwp; struct v7fs_args *args = data; struct v7fs_mount *v7fsmount = (void *)mp->mnt_data; struct vnode *devvp = NULL; int error = 0; bool update = mp->mnt_flag & MNT_UPDATE; DPRINTF("mnt_flag=%x %s\n", mp->mnt_flag, update ? "update" : ""); if (*data_len < sizeof(*args)) return EINVAL; if (mp->mnt_flag & MNT_GETARGS) { if (!v7fsmount) return EIO; args->fspec = NULL; args->endian = v7fsmount->core->endian; *data_len = sizeof(*args); return 0; } DPRINTF("args->fspec=%s endian=%d\n", args->fspec, args->endian); if (args->fspec == NULL) { /* nothing to do. */ return EINVAL; } if (args->fspec != NULL) { /* Look up the name and verify that it's sane. */ error = namei_simple_user(args->fspec, NSM_FOLLOW_NOEMULROOT, &devvp); if (error != 0) return (error); DPRINTF("mount device=%lx\n", (long)devvp->v_rdev); if (!update) { /* * Be sure this is a valid block device */ if (devvp->v_type != VBLK) error = ENOTBLK; else if (bdevsw_lookup(devvp->v_rdev) == NULL) error = ENXIO; } else { KDASSERT(v7fsmount); /* * Be sure we're still naming the same device * used for our initial mount */ if (devvp != v7fsmount->devvp) { DPRINTF("devvp %p != %p rootvp=%p\n", devvp, v7fsmount->devvp, rootvp); if (rootvp == v7fsmount->devvp) { vrele(devvp); devvp = rootvp; vref(devvp); } else { error = EINVAL; } } } } /* * If mount by non-root, then verify that user has necessary * permissions on the device. * * Permission to update a mount is checked higher, so here we presume * updating the mount is okay (for example, as far as securelevel goes) * which leaves us with the normal check. */ if (error == 0) { int accessmode = VREAD; if (update ? (mp->mnt_iflag & IMNT_WANTRDWR) != 0 : (mp->mnt_flag & MNT_RDONLY) == 0) accessmode |= VWRITE; error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT, KAUTH_REQ_SYSTEM_MOUNT_DEVICE, mp, devvp, KAUTH_ARG(accessmode)); } if (error) { vrele(devvp); return error; } if (!update) { if ((error = v7fs_openfs(devvp, mp, l))) { vrele(devvp); return error; } if ((error = v7fs_mountfs(devvp, mp, args->endian))) { v7fs_closefs(devvp, mp); VOP_UNLOCK(devvp); vrele(devvp); return error; } VOP_UNLOCK(devvp); } else if (mp->mnt_flag & MNT_RDONLY) { /* XXX: r/w -> read only */ } return set_statvfs_info(path, UIO_USERSPACE, args->fspec, UIO_USERSPACE, mp->mnt_op->vfs_name, mp, l); }
void dumpsys() { const struct bdevsw *bdev; daddr_t blkno; int psize; int error; int addr; int block; int len; vaddr_t dumpspace; kcore_seg_t *kseg_p; cpu_kcore_hdr_t *chdr_p; char dump_hdr[dbtob(1)]; /* assumes header fits in one block */ /* Save registers. */ savectx(&dumppcb); /* flush everything out of caches */ cpu_dcache_wbinv_all(); cpu_sdcache_wbinv_all(); if (dumpdev == NODEV) return; if (dumpsize == 0) { dumpconf(); if (dumpsize == 0) return; } if (dumplo <= 0) { printf("\ndump to dev %u,%u not possible\n", major(dumpdev), minor(dumpdev)); return; } printf("\ndumping to dev %u,%u offset %ld\n", major(dumpdev), minor(dumpdev), dumplo); #ifdef UVM_SWAP_ENCRYPT uvm_swap_finicrypt_all(); #endif blkno = dumplo; dumpspace = (vaddr_t) memhook; bdev = bdevsw_lookup(dumpdev); if (bdev == NULL || bdev->d_psize == NULL) return; psize = (*bdev->d_psize)(dumpdev); printf("dump "); if (psize == -1) { printf("area unavailable\n"); return; } /* Setup the dump header */ kseg_p = (kcore_seg_t *)dump_hdr; chdr_p = (cpu_kcore_hdr_t *)&dump_hdr[ALIGN(sizeof(*kseg_p))]; bzero(dump_hdr, sizeof(dump_hdr)); CORE_SETMAGIC(*kseg_p, KCORE_MAGIC, MID_MACHINE, CORE_CPU); kseg_p->c_size = sizeof(dump_hdr) - ALIGN(sizeof(*kseg_p)); *chdr_p = cpu_kcore_hdr; error = (*bdev->d_dump)(dumpdev, blkno++, (caddr_t)dump_hdr, sizeof(dump_hdr)); if (error != 0) goto abort; len = 0; for (block = 0; block < bootconfig.dramblocks && error == 0; ++block) { addr = bootconfig.dram[block].address; for (;addr < (bootconfig.dram[block].address + (bootconfig.dram[block].pages * PAGE_SIZE)); addr += PAGE_SIZE) { if ((len % (1024*1024)) == 0) printf("%d ", len / (1024*1024)); pmap_kenter_pa(dumpspace, addr, PROT_READ); pmap_update(pmap_kernel()); error = (*bdev->d_dump)(dumpdev, blkno, (caddr_t) dumpspace, PAGE_SIZE); pmap_kremove(dumpspace, PAGE_SIZE); pmap_update(pmap_kernel()); if (error) break; blkno += btodb(PAGE_SIZE); len += PAGE_SIZE; } } abort: switch (error) { case ENXIO: printf("device bad\n"); break; case EFAULT: printf("device not ready\n"); break; case EINVAL: printf("area improper\n"); break; case EIO: printf("i/o error\n"); break; case EINTR: printf("aborted from console\n"); break; default: printf("succeeded\n"); break; } printf("\n\n"); delay(1000000); }
/* * mp - path - addr in user space of mount point (ie /usr or whatever) * data - addr in user space of mount params including the name of the block * special file to treat as a filesystem. */ int msdosfs_mount(struct mount *mp, const char *path, void *data, size_t *data_len) { struct lwp *l = curlwp; struct vnode *devvp; /* vnode for blk device to mount */ struct msdosfs_args *args = data; /* holds data from mount request */ /* msdosfs specific mount control block */ struct msdosfsmount *pmp = NULL; int error, flags; mode_t accessmode; if (*data_len < sizeof *args) return EINVAL; if (mp->mnt_flag & MNT_GETARGS) { pmp = VFSTOMSDOSFS(mp); if (pmp == NULL) return EIO; args->fspec = NULL; args->uid = pmp->pm_uid; args->gid = pmp->pm_gid; args->mask = pmp->pm_mask; args->flags = pmp->pm_flags; args->version = MSDOSFSMNT_VERSION; args->dirmask = pmp->pm_dirmask; args->gmtoff = pmp->pm_gmtoff; *data_len = sizeof *args; return 0; } /* * If not versioned (i.e. using old mount_msdos(8)), fill in * the additional structure items with suitable defaults. */ if ((args->flags & MSDOSFSMNT_VERSIONED) == 0) { args->version = 1; args->dirmask = args->mask; } /* * Reset GMT offset for pre-v3 mount structure args. */ if (args->version < 3) args->gmtoff = 0; /* * If updating, check whether changing from read-only to * read/write; if there is no device name, that's all we do. */ if (mp->mnt_flag & MNT_UPDATE) { pmp = VFSTOMSDOSFS(mp); error = 0; if (!(pmp->pm_flags & MSDOSFSMNT_RONLY) && (mp->mnt_flag & MNT_RDONLY)) { flags = WRITECLOSE; if (mp->mnt_flag & MNT_FORCE) flags |= FORCECLOSE; error = vflush(mp, NULLVP, flags); } if (!error && (mp->mnt_flag & MNT_RELOAD)) /* not yet implemented */ error = EOPNOTSUPP; if (error) { DPRINTF(("vflush %d\n", error)); return (error); } if ((pmp->pm_flags & MSDOSFSMNT_RONLY) && (mp->mnt_iflag & IMNT_WANTRDWR)) { /* * If upgrade to read-write by non-root, then verify * that user has necessary permissions on the device. * * Permission to update a mount is checked higher, so * here we presume updating the mount is okay (for * example, as far as securelevel goes) which leaves us * with the normal check. */ devvp = pmp->pm_devvp; vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT, KAUTH_REQ_SYSTEM_MOUNT_DEVICE, mp, devvp, KAUTH_ARG(VREAD | VWRITE)); VOP_UNLOCK(devvp); DPRINTF(("KAUTH_REQ_SYSTEM_MOUNT_DEVICE %d\n", error)); if (error) return (error); pmp->pm_flags &= ~MSDOSFSMNT_RONLY; } if (args->fspec == NULL) { DPRINTF(("missing fspec\n")); return EINVAL; } } /* * Not an update, or updating the name: look up the name * and verify that it refers to a sensible block device. */ error = namei_simple_user(args->fspec, NSM_FOLLOW_NOEMULROOT, &devvp); if (error != 0) { DPRINTF(("namei %d\n", error)); return (error); } if (devvp->v_type != VBLK) { DPRINTF(("not block\n")); vrele(devvp); return (ENOTBLK); } if (bdevsw_lookup(devvp->v_rdev) == NULL) { DPRINTF(("no block switch\n")); vrele(devvp); return (ENXIO); } /* * If mount by non-root, then verify that user has necessary * permissions on the device. */ accessmode = VREAD; if ((mp->mnt_flag & MNT_RDONLY) == 0) accessmode |= VWRITE; vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT, KAUTH_REQ_SYSTEM_MOUNT_DEVICE, mp, devvp, KAUTH_ARG(accessmode)); VOP_UNLOCK(devvp); if (error) { DPRINTF(("KAUTH_REQ_SYSTEM_MOUNT_DEVICE %d\n", error)); vrele(devvp); return (error); } if ((mp->mnt_flag & MNT_UPDATE) == 0) { int xflags; if (mp->mnt_flag & MNT_RDONLY) xflags = FREAD; else xflags = FREAD|FWRITE; vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); error = VOP_OPEN(devvp, xflags, FSCRED); VOP_UNLOCK(devvp); if (error) { DPRINTF(("VOP_OPEN %d\n", error)); goto fail; } error = msdosfs_mountfs(devvp, mp, l, args); if (error) { DPRINTF(("msdosfs_mountfs %d\n", error)); vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); (void) VOP_CLOSE(devvp, xflags, NOCRED); VOP_UNLOCK(devvp); goto fail; } #ifdef MSDOSFS_DEBUG /* only needed for the printf below */ pmp = VFSTOMSDOSFS(mp); #endif } else { vrele(devvp); if (devvp != pmp->pm_devvp) { DPRINTF(("devvp %p pmp %p\n", devvp, pmp->pm_devvp)); return (EINVAL); /* needs translation */ } } if ((error = update_mp(mp, args)) != 0) { msdosfs_unmount(mp, MNT_FORCE); DPRINTF(("update_mp %d\n", error)); return error; } #ifdef MSDOSFS_DEBUG printf("msdosfs_mount(): mp %p, pmp %p, inusemap %p\n", mp, pmp, pmp->pm_inusemap); #endif return set_statvfs_info(path, UIO_USERSPACE, args->fspec, UIO_USERSPACE, mp->mnt_op->vfs_name, mp, l); fail: vrele(devvp); return (error); }
/* * VFS Operations. * * mount system call */ int ext2fs_mount(struct mount *mp, const char *path, void *data, size_t *data_len) { struct lwp *l = curlwp; struct vnode *devvp; struct ufs_args *args = data; struct ufsmount *ump = NULL; struct m_ext2fs *fs; int error = 0, flags, update; mode_t accessmode; if (args == NULL) return EINVAL; if (*data_len < sizeof *args) return EINVAL; if (mp->mnt_flag & MNT_GETARGS) { ump = VFSTOUFS(mp); if (ump == NULL) return EIO; memset(args, 0, sizeof *args); args->fspec = NULL; *data_len = sizeof *args; return 0; } update = mp->mnt_flag & MNT_UPDATE; /* Check arguments */ if (args->fspec != NULL) { /* * Look up the name and verify that it's sane. */ error = namei_simple_user(args->fspec, NSM_FOLLOW_NOEMULROOT, &devvp); if (error != 0) return error; if (!update) { /* * Be sure this is a valid block device */ if (devvp->v_type != VBLK) error = ENOTBLK; else if (bdevsw_lookup(devvp->v_rdev) == NULL) error = ENXIO; } else { /* * Be sure we're still naming the same device * used for our initial mount */ ump = VFSTOUFS(mp); if (devvp != ump->um_devvp) { if (devvp->v_rdev != ump->um_devvp->v_rdev) error = EINVAL; else { vrele(devvp); devvp = ump->um_devvp; vref(devvp); } } } } else { if (!update) { /* New mounts must have a filename for the device */ return EINVAL; } else { ump = VFSTOUFS(mp); devvp = ump->um_devvp; vref(devvp); } } /* * If mount by non-root, then verify that user has necessary * permissions on the device. * * Permission to update a mount is checked higher, so here we presume * updating the mount is okay (for example, as far as securelevel goes) * which leaves us with the normal check. */ if (error == 0) { accessmode = VREAD; if (update ? (mp->mnt_iflag & IMNT_WANTRDWR) != 0 : (mp->mnt_flag & MNT_RDONLY) == 0) accessmode |= VWRITE; vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT, KAUTH_REQ_SYSTEM_MOUNT_DEVICE, mp, devvp, KAUTH_ARG(accessmode)); VOP_UNLOCK(devvp); } if (error) { vrele(devvp); return error; } if (!update) { int xflags; if (mp->mnt_flag & MNT_RDONLY) xflags = FREAD; else xflags = FREAD|FWRITE; vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); error = VOP_OPEN(devvp, xflags, FSCRED); VOP_UNLOCK(devvp); if (error) goto fail; error = ext2fs_mountfs(devvp, mp); if (error) { vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); (void)VOP_CLOSE(devvp, xflags, NOCRED); VOP_UNLOCK(devvp); goto fail; } ump = VFSTOUFS(mp); fs = ump->um_e2fs; } else { /* * Update the mount. */ /* * The initial mount got a reference on this * device, so drop the one obtained via * namei(), above. */ vrele(devvp); ump = VFSTOUFS(mp); fs = ump->um_e2fs; if (fs->e2fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) { /* * Changing from r/w to r/o */ flags = WRITECLOSE; if (mp->mnt_flag & MNT_FORCE) flags |= FORCECLOSE; error = ext2fs_flushfiles(mp, flags); if (error == 0 && ext2fs_cgupdate(ump, MNT_WAIT) == 0 && (fs->e2fs.e2fs_state & E2FS_ERRORS) == 0) { fs->e2fs.e2fs_state = E2FS_ISCLEAN; (void) ext2fs_sbupdate(ump, MNT_WAIT); } if (error) return error; fs->e2fs_ronly = 1; } if (mp->mnt_flag & MNT_RELOAD) { error = ext2fs_reload(mp, l->l_cred, l); if (error) return error; } if (fs->e2fs_ronly && (mp->mnt_iflag & IMNT_WANTRDWR)) { /* * Changing from read-only to read/write */ fs->e2fs_ronly = 0; if (fs->e2fs.e2fs_state == E2FS_ISCLEAN) fs->e2fs.e2fs_state = 0; else fs->e2fs.e2fs_state = E2FS_ERRORS; fs->e2fs_fmod = 1; } if (args->fspec == NULL) return 0; } error = set_statvfs_info(path, UIO_USERSPACE, args->fspec, UIO_USERSPACE, mp->mnt_op->vfs_name, mp, l); if (error == 0) ext2fs_sb_setmountinfo(fs, mp); if (fs->e2fs_fmod != 0) { /* XXX */ fs->e2fs_fmod = 0; if (fs->e2fs.e2fs_state == 0) fs->e2fs.e2fs_wtime = time_second; else printf("%s: file system not clean; please fsck(8)\n", mp->mnt_stat.f_mntfromname); (void) ext2fs_cgupdate(ump, MNT_WAIT); } return error; fail: vrele(devvp); return error; }
static int chfs_mount(struct mount *mp, const char *path, void *data, size_t *data_len) { struct lwp *l = curlwp; struct nameidata nd; struct pathbuf *pb; struct vnode *devvp = NULL; struct ufs_args *args = data; struct ufsmount *ump = NULL; struct chfs_mount *chmp; int err = 0; int xflags; dbg("mount()\n"); if (args == NULL) return EINVAL; if (*data_len < sizeof *args) return EINVAL; if (mp->mnt_flag & MNT_GETARGS) { ump = VFSTOUFS(mp); if (ump == NULL) return EIO; memset(args, 0, sizeof *args); args->fspec = NULL; *data_len = sizeof *args; return 0; } if (mp->mnt_flag & MNT_UPDATE) { /* XXX: There is no support yet to update file system * settings. Should be added. */ return ENODEV; } if (args->fspec != NULL) { err = pathbuf_copyin(args->fspec, &pb); if (err) { return err; } /* Look up the name and verify that it's sane. */ NDINIT(&nd, LOOKUP, FOLLOW, pb); if ((err = namei(&nd)) != 0 ) return (err); devvp = nd.ni_vp; /* Be sure this is a valid block device */ if (devvp->v_type != VBLK) err = ENOTBLK; else if (bdevsw_lookup(devvp->v_rdev) == NULL) err = ENXIO; } if (err) { vrele(devvp); return (err); } if (mp->mnt_flag & MNT_RDONLY) xflags = FREAD; else xflags = FREAD|FWRITE; err = VOP_OPEN(devvp, xflags, FSCRED); if (err) goto fail; /* call CHFS mount function */ err = chfs_mountfs(devvp, mp); if (err) { vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); (void)VOP_CLOSE(devvp, xflags, NOCRED); VOP_UNLOCK(devvp); goto fail; } ump = VFSTOUFS(mp); chmp = ump->um_chfs; vfs_getnewfsid(mp); chmp->chm_fsmp = mp; return set_statvfs_info(path, UIO_USERSPACE, args->fspec, UIO_USERSPACE, mp->mnt_op->vfs_name, mp, l); fail: vrele(devvp); return (err); }
void dodumpsys(void) { const struct bdevsw *bdev; int dumpend, psize; int error; if (dumpdev == NODEV) return; bdev = bdevsw_lookup(dumpdev); if (bdev == NULL || bdev->d_psize == NULL) return; /* * For dumps during autoconfiguration, * if dump device has already configured... */ if (dumpsize == 0) cpu_dumpconf(); if (dumplo <= 0 || dumpsize == 0) { printf("\ndump to dev %u,%u not possible\n", major(dumpdev), minor(dumpdev)); return; } printf("\ndumping to dev %u,%u offset %ld\n", major(dumpdev), minor(dumpdev), dumplo); psize = (*bdev->d_psize)(dumpdev); printf("dump "); if (psize == -1) { printf("area unavailable\n"); return; } #if 0 /* XXX this doesn't work. grr. */ /* toss any characters present prior to dump */ while (sget() != NULL); /*syscons and pccons differ */ #endif dump_seg_prep(); dumpend = dumplo + btodb(dump_header_size) + ctod(dump_npages); if (dumpend > psize) { printf("failed: insufficient space (%d < %d)\n", psize, dumpend); goto failed; } dump_header_start(); if ((error = cpu_dump()) != 0) goto err; if ((error = dump_header_finish()) != 0) goto err; if (dump_header_blkno != dumplo + btodb(dump_header_size)) { printf("BAD header size (%ld [written] != %ld [expected])\n", (long)(dump_header_blkno - dumplo), (long)btodb(dump_header_size)); goto failed; } dump_totalbytesleft = roundup(ptoa(dump_npages), BYTES_PER_DUMP); error = dump_seg_iter(dumpsys_seg); if (error == 0 && dump_header_blkno != dumpend) { printf("BAD dump size (%ld [written] != %ld [expected])\n", (long)(dumpend - dumplo), (long)(dump_header_blkno - dumplo)); goto failed; } err: switch (error) { case ENXIO: printf("device bad\n"); break; case EFAULT: printf("device not ready\n"); break; case EINVAL: printf("area improper\n"); break; case EIO: printf("i/o error\n"); break; case EINTR: printf("aborted from console\n"); break; case 0: printf("succeeded\n"); break; default: printf("error %d\n", error); break; } failed: printf("\n\n"); delay(5000000); /* 5 seconds */ }
int adosfs_mount(struct mount *mp, const char *path, void *data, size_t *data_len) { struct lwp *l = curlwp; struct vnode *devvp; struct adosfs_args *args = data; struct adosfsmount *amp; int error; mode_t accessmode; if (*data_len < sizeof *args) return EINVAL; if (mp->mnt_flag & MNT_GETARGS) { amp = VFSTOADOSFS(mp); if (amp == NULL) return EIO; args->uid = amp->uid; args->gid = amp->gid; args->mask = amp->mask; args->fspec = NULL; *data_len = sizeof *args; return 0; } if ((mp->mnt_flag & MNT_RDONLY) == 0) return (EROFS); if ((mp->mnt_flag & MNT_UPDATE) && args->fspec == NULL) return EOPNOTSUPP; /* * Not an update, or updating the name: look up the name * and verify that it refers to a sensible block device. */ error = namei_simple_user(args->fspec, NSM_FOLLOW_NOEMULROOT, &devvp); if (error != 0) return (error); if (devvp->v_type != VBLK) { vrele(devvp); return (ENOTBLK); } if (bdevsw_lookup(devvp->v_rdev) == NULL) { vrele(devvp); return (ENXIO); } /* * If mount by non-root, then verify that user has necessary * permissions on the device. */ accessmode = VREAD; if ((mp->mnt_flag & MNT_RDONLY) == 0) accessmode |= VWRITE; vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT, KAUTH_REQ_SYSTEM_MOUNT_DEVICE, mp, devvp, KAUTH_ARG(accessmode)); VOP_UNLOCK(devvp); if (error) { vrele(devvp); return (error); } /* MNT_UPDATE? */ if ((error = adosfs_mountfs(devvp, mp, l)) != 0) { vrele(devvp); return (error); } amp = VFSTOADOSFS(mp); amp->uid = args->uid; amp->gid = args->gid; amp->mask = args->mask; return set_statvfs_info(path, UIO_USERSPACE, args->fspec, UIO_USERSPACE, mp->mnt_op->vfs_name, mp, l); }
int hfs_mount(struct mount *mp, const char *path, void *data, size_t *data_len) { struct lwp *l = curlwp; struct nameidata nd; struct hfs_args *args = data; struct vnode *devvp; struct hfsmount *hmp; int error; int update; mode_t accessmode; if (*data_len < sizeof *args) return EINVAL; #ifdef HFS_DEBUG printf("vfsop = hfs_mount()\n"); #endif /* HFS_DEBUG */ if (mp->mnt_flag & MNT_GETARGS) { hmp = VFSTOHFS(mp); if (hmp == NULL) return EIO; args->fspec = NULL; *data_len = sizeof *args; return 0; } if (data == NULL) return EINVAL; /* FIXME: For development ONLY - disallow remounting for now */ #if 0 update = mp->mnt_flag & MNT_UPDATE; #else update = 0; #endif /* Check arguments */ if (args->fspec != NULL) { /* * Look up the name and verify that it's sane. */ NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, args->fspec); if ((error = namei(&nd)) != 0) return error; devvp = nd.ni_vp; if (!update) { /* * Be sure this is a valid block device */ if (devvp->v_type != VBLK) error = ENOTBLK; else if (bdevsw_lookup(devvp->v_rdev) == NULL) error = ENXIO; } else { /* * Be sure we're still naming the same device * used for our initial mount */ hmp = VFSTOHFS(mp); if (devvp != hmp->hm_devvp) error = EINVAL; } } else { if (update) { /* Use the extant mount */ hmp = VFSTOHFS(mp); devvp = hmp->hm_devvp; vref(devvp); } else { /* New mounts must have a filename for the device */ return EINVAL; } } /* * If mount by non-root, then verify that user has necessary * permissions on the device. */ if (error == 0 && kauth_authorize_generic(l->l_cred, KAUTH_GENERIC_ISSUSER, NULL) != 0) { accessmode = VREAD; if (update ? (mp->mnt_iflag & IMNT_WANTRDWR) != 0 : (mp->mnt_flag & MNT_RDONLY) == 0) accessmode |= VWRITE; vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); error = VOP_ACCESS(devvp, accessmode, l->l_cred); VOP_UNLOCK(devvp, 0); } if (error != 0) goto error; if (update) { printf("HFS: live remounting not yet supported!\n"); error = EINVAL; goto error; } if ((error = hfs_mountfs(devvp, mp, l, args->fspec)) != 0) goto error; error = set_statvfs_info(path, UIO_USERSPACE, args->fspec, UIO_USERSPACE, mp->mnt_op->vfs_name, mp, l); #ifdef HFS_DEBUG if(!update) { char* volname; hmp = VFSTOHFS(mp); volname = malloc(hmp->hm_vol.name.length + 1, M_TEMP, M_WAITOK); if (volname == NULL) printf("could not allocate volname; ignored\n"); else { if (hfs_unicode_to_ascii(hmp->hm_vol.name.unicode, hmp->hm_vol.name.length, volname) == NULL) printf("could not convert volume name to ascii; ignored\n"); else printf("mounted volume \"%s\"\n", volname); free(volname, M_TEMP); } } #endif /* HFS_DEBUG */ return error; error: vrele(devvp); return error; }
static int ntfs_mount ( struct mount *mp, const char *path, void *data, size_t *data_len) { struct lwp *l = curlwp; int err = 0, flags; struct vnode *devvp; struct ntfs_args *args = data; if (*data_len < sizeof *args) return EINVAL; if (mp->mnt_flag & MNT_GETARGS) { struct ntfsmount *ntmp = VFSTONTFS(mp); if (ntmp == NULL) return EIO; args->fspec = NULL; args->uid = ntmp->ntm_uid; args->gid = ntmp->ntm_gid; args->mode = ntmp->ntm_mode; args->flag = ntmp->ntm_flag; *data_len = sizeof *args; return 0; } /* *** * Mounting non-root file system or updating a file system *** */ /* * If updating, check whether changing from read-only to * read/write; if there is no device name, that's all we do. */ if (mp->mnt_flag & MNT_UPDATE) { printf("ntfs_mount(): MNT_UPDATE not supported\n"); return (EINVAL); } /* * Not an update, or updating the name: look up the name * and verify that it refers to a sensible block device. */ err = namei_simple_user(args->fspec, NSM_FOLLOW_NOEMULROOT, &devvp); if (err) { /* can't get devvp!*/ return (err); } if (devvp->v_type != VBLK) { err = ENOTBLK; goto fail; } if (bdevsw_lookup(devvp->v_rdev) == NULL) { err = ENXIO; goto fail; } if (mp->mnt_flag & MNT_UPDATE) { #if 0 /* ******************** * UPDATE ******************** */ if (devvp != ntmp->um_devvp) { err = EINVAL; /* needs translation */ goto fail; } /* * Update device name only on success */ err = set_statvfs_info(NULL, UIO_USERSPACE, args->fspec, UIO_USERSPACE, mp->mnt_op->vfs_name, mp, p); if (err) goto fail; vrele(devvp); #endif } else { /* ******************** * NEW MOUNT ******************** */ /* * Since this is a new mount, we want the names for * the device and the mount point copied in. If an * error occurs, the mountpoint is discarded by the * upper level code. */ /* Save "last mounted on" info for mount point (NULL pad)*/ err = set_statvfs_info(path, UIO_USERSPACE, args->fspec, UIO_USERSPACE, mp->mnt_op->vfs_name, mp, l); if (err) goto fail; if (mp->mnt_flag & MNT_RDONLY) flags = FREAD; else flags = FREAD|FWRITE; vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); err = VOP_OPEN(devvp, flags, FSCRED); VOP_UNLOCK(devvp); if (err) goto fail; err = ntfs_mountfs(devvp, mp, args, l); if (err) { vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); (void)VOP_CLOSE(devvp, flags, NOCRED); VOP_UNLOCK(devvp); goto fail; } } /* * Initialize FS stat information in mount struct; uses both * mp->mnt_stat.f_mntonname and mp->mnt_stat.f_mntfromname * * This code is common to root and non-root mounts */ (void)VFS_STATVFS(mp, &mp->mnt_stat); return (err); fail: vrele(devvp); return (err); }