int execve(const char * path, char * const argv[], char * const envp[]) { struct _exec_args args = { .argv = argv, .env = envp, }; int retval; args.fd = open(path, O_EXEC); if (args.fd < 0) { return -1; } args.nargv = vcount(argv); args.nenv = vcount(envp); retval = syscall(SYSCALL_EXEC_EXEC, &args); close(args.fd); return retval; }
static int hammer_setup_device(struct vnode **devvpp, const char *dev_path, int ronly) { int error; struct nlookupdata nd; /* * Get the device vnode */ if (*devvpp == NULL) { error = nlookup_init(&nd, dev_path, UIO_SYSSPACE, NLC_FOLLOW); if (error == 0) error = nlookup(&nd); if (error == 0) error = cache_vref(&nd.nl_nch, nd.nl_cred, devvpp); nlookup_done(&nd); } else { error = 0; } if (error == 0) { if (vn_isdisk(*devvpp, &error)) { error = vfs_mountedon(*devvpp); } } if (error == 0 && vcount(*devvpp) > 0) error = EBUSY; if (error == 0) { vn_lock(*devvpp, LK_EXCLUSIVE | LK_RETRY); error = vinvalbuf(*devvpp, V_SAVE, 0, 0); if (error == 0) { error = VOP_OPEN(*devvpp, (ronly ? FREAD : FREAD|FWRITE), FSCRED, NULL); } vn_unlock(*devvpp); } if (error && *devvpp) { vrele(*devvpp); *devvpp = NULL; } return (error); }
int cnclose(dev_t dev, int flag, int mode, struct proc *p) { struct vnode *vp; if (cn_tab == NULL) return (0); /* * If the real console isn't otherwise open, close it. * If it's otherwise open, don't close it, because that'll * screw up others who have it open. */ dev = cn_tab->cn_dev; if (cn_devvp != NULL) { /* release our reference to real dev's vnode */ vrele(cn_devvp); cn_devvp = NULL; } if (vfinddev(dev, VCHR, &vp) && vcount(vp)) return (0); return ((*cdevsw[major(dev)].d_close)(dev, flag, mode, p)); }
void VideoDisplay::DrawOverscanMask(float horizontal_percent, float vertical_percent) const { Vector2D v(viewport_width, viewport_height); Vector2D size = Vector2D(horizontal_percent, vertical_percent) / 2 * v; // Clockwise from top-left Vector2D corners[] = { size, Vector2D(viewport_width - size.X(), size), v - size, Vector2D(size, viewport_height - size.Y()) }; // Shift to compensate for black bars Vector2D pos(viewport_left, viewport_top); for (auto& corner : corners) corner = corner + pos; int count = 0; std::vector<float> points; for (size_t i = 0; i < 4; ++i) { size_t prev = (i + 3) % 4; size_t next = (i + 1) % 4; count += SplineCurve( (corners[prev] + corners[i] * 4) / 5, corners[i], corners[i], (corners[next] + corners[i] * 4) / 5) .GetPoints(points); } OpenGLWrapper gl; gl.SetFillColour(wxColor(30, 70, 200), .5f); gl.SetLineColour(*wxBLACK, 0, 1); std::vector<int> vstart(1, 0); std::vector<int> vcount(1, count); gl.DrawMultiPolygon(points, vstart, vcount, Vector2D(viewport_left, viewport_top), Vector2D(viewport_width, viewport_height), true); }
int loadfirmware(const char *name, u_char **bufp, size_t *buflen) { struct proc *p = curproc; struct nameidata nid; char *path, *ptr; struct iovec iov; struct uio uio; struct vattr va; int error; if (!rootvp || !vcount(rootvp)) return (EIO); path = malloc(MAXPATHLEN, M_TEMP, M_NOWAIT); if (path == NULL) return (ENOMEM); if (snprintf(path, MAXPATHLEN, "/etc/firmware/%s", name) >= MAXPATHLEN) { error = ENAMETOOLONG; goto err; } NDINIT(&nid, LOOKUP, NOFOLLOW|LOCKLEAF, UIO_SYSSPACE, path, p); error = namei(&nid); #ifdef RAMDISK_HOOKS /* try again with mounted disk */ if (error) { if (snprintf(path, MAXPATHLEN, "/mnt/etc/firmware/%s", name) >= MAXPATHLEN) { error = ENAMETOOLONG; goto err; } NDINIT(&nid, LOOKUP, NOFOLLOW|LOCKLEAF, UIO_SYSSPACE, path, p); error = namei(&nid); } #endif if (error) goto err; error = VOP_GETATTR(nid.ni_vp, &va, p->p_ucred, p); if (error) goto fail; if (nid.ni_vp->v_type != VREG || va.va_size == 0) { error = EINVAL; goto fail; } if (va.va_size > FIRMWARE_MAX) { error = E2BIG; goto fail; } ptr = malloc(va.va_size, M_DEVBUF, M_NOWAIT); if (ptr == NULL) { error = ENOMEM; goto fail; } iov.iov_base = ptr; iov.iov_len = va.va_size; uio.uio_iov = &iov; uio.uio_iovcnt = 1; uio.uio_offset = 0; uio.uio_resid = va.va_size; uio.uio_segflg = UIO_SYSSPACE; uio.uio_rw = UIO_READ; uio.uio_procp = p; error = VOP_READ(nid.ni_vp, &uio, 0, p->p_ucred); if (error == 0) { *bufp = ptr; *buflen = va.va_size; } else free(ptr, M_DEVBUF); fail: vput(nid.ni_vp); err: if (path) free(path, M_TEMP); return (error); }
int msdosfs_mountfs(struct vnode *devvp, struct mount *mp, struct proc *p, struct msdosfs_args *argp) { struct msdosfsmount *pmp; struct buf *bp; dev_t dev = devvp->v_rdev; union bootsector *bsp; struct byte_bpb33 *b33; struct byte_bpb50 *b50; struct byte_bpb710 *b710; extern struct vnode *rootvp; u_int8_t SecPerClust; int ronly, error, bmapsiz; uint32_t fat_max_clusters; /* * Disallow multiple mounts of the same device. * Disallow mounting of a device that is currently in use * (except for root, which might share swap device for miniroot). * Flush out any old buffers remaining from a previous use. */ if ((error = vfs_mountedon(devvp)) != 0) return (error); if (vcount(devvp) > 1 && devvp != rootvp) return (EBUSY); vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p); error = vinvalbuf(devvp, V_SAVE, p->p_ucred, p, 0, 0); VOP_UNLOCK(devvp, 0, p); if (error) return (error); ronly = (mp->mnt_flag & MNT_RDONLY) != 0; error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, p); if (error) return (error); bp = NULL; /* both used in error_exit */ pmp = NULL; /* * Read the boot sector of the filesystem, and then check the * boot signature. If not a dos boot sector then error out. */ if ((error = bread(devvp, 0, 4096, NOCRED, &bp)) != 0) goto error_exit; bp->b_flags |= B_AGE; bsp = (union bootsector *)bp->b_data; b33 = (struct byte_bpb33 *)bsp->bs33.bsBPB; b50 = (struct byte_bpb50 *)bsp->bs50.bsBPB; b710 = (struct byte_bpb710 *)bsp->bs710.bsPBP; pmp = malloc(sizeof *pmp, M_MSDOSFSMNT, M_WAITOK | M_ZERO); pmp->pm_mountp = mp; /* * Compute several useful quantities from the bpb in the * bootsector. Copy in the dos 5 variant of the bpb then fix up * the fields that are different between dos 5 and dos 3.3. */ SecPerClust = b50->bpbSecPerClust; pmp->pm_BytesPerSec = getushort(b50->bpbBytesPerSec); pmp->pm_ResSectors = getushort(b50->bpbResSectors); pmp->pm_FATs = b50->bpbFATs; pmp->pm_RootDirEnts = getushort(b50->bpbRootDirEnts); pmp->pm_Sectors = getushort(b50->bpbSectors); pmp->pm_FATsecs = getushort(b50->bpbFATsecs); pmp->pm_SecPerTrack = getushort(b50->bpbSecPerTrack); pmp->pm_Heads = getushort(b50->bpbHeads); pmp->pm_Media = b50->bpbMedia; /* Determine the number of DEV_BSIZE blocks in a MSDOSFS sector */ pmp->pm_BlkPerSec = pmp->pm_BytesPerSec / DEV_BSIZE; if (!pmp->pm_BytesPerSec || !SecPerClust || pmp->pm_SecPerTrack > 64) { error = EFTYPE; goto error_exit; } if (pmp->pm_Sectors == 0) { pmp->pm_HiddenSects = getulong(b50->bpbHiddenSecs); pmp->pm_HugeSectors = getulong(b50->bpbHugeSectors); } else { pmp->pm_HiddenSects = getushort(b33->bpbHiddenSecs); pmp->pm_HugeSectors = pmp->pm_Sectors; } if (pmp->pm_RootDirEnts == 0) { if (pmp->pm_Sectors || pmp->pm_FATsecs || getushort(b710->bpbFSVers)) { error = EINVAL; goto error_exit; } pmp->pm_fatmask = FAT32_MASK; pmp->pm_fatmult = 4; pmp->pm_fatdiv = 1; pmp->pm_FATsecs = getulong(b710->bpbBigFATsecs); if (getushort(b710->bpbExtFlags) & FATMIRROR) pmp->pm_curfat = getushort(b710->bpbExtFlags) & FATNUM; else pmp->pm_flags |= MSDOSFS_FATMIRROR; } else pmp->pm_flags |= MSDOSFS_FATMIRROR; /* * More sanity checks: * MSDOSFS sectors per cluster: >0 && power of 2 * MSDOSFS sector size: >= DEV_BSIZE && power of 2 * HUGE sector count: >0 * FAT sectors: >0 */ if ((SecPerClust == 0) || (SecPerClust & (SecPerClust - 1)) || (pmp->pm_BytesPerSec < DEV_BSIZE) || (pmp->pm_BytesPerSec & (pmp->pm_BytesPerSec - 1)) || (pmp->pm_HugeSectors == 0) || (pmp->pm_FATsecs == 0)) { error = EINVAL; goto error_exit; } pmp->pm_HugeSectors *= pmp->pm_BlkPerSec; pmp->pm_HiddenSects *= pmp->pm_BlkPerSec; pmp->pm_FATsecs *= pmp->pm_BlkPerSec; pmp->pm_fatblk = pmp->pm_ResSectors * pmp->pm_BlkPerSec; SecPerClust *= pmp->pm_BlkPerSec; if (FAT32(pmp)) { pmp->pm_rootdirblk = getulong(b710->bpbRootClust); pmp->pm_firstcluster = pmp->pm_fatblk + (pmp->pm_FATs * pmp->pm_FATsecs); pmp->pm_fsinfo = getushort(b710->bpbFSInfo) * pmp->pm_BlkPerSec; } else { pmp->pm_rootdirblk = pmp->pm_fatblk + (pmp->pm_FATs * pmp->pm_FATsecs); pmp->pm_rootdirsize = (pmp->pm_RootDirEnts * sizeof(struct direntry) + DEV_BSIZE - 1) / DEV_BSIZE; pmp->pm_firstcluster = pmp->pm_rootdirblk + pmp->pm_rootdirsize; } pmp->pm_nmbrofclusters = (pmp->pm_HugeSectors - pmp->pm_firstcluster) / SecPerClust; pmp->pm_maxcluster = pmp->pm_nmbrofclusters + 1; pmp->pm_fatsize = pmp->pm_FATsecs * DEV_BSIZE; if (pmp->pm_fatmask == 0) { if (pmp->pm_maxcluster <= ((CLUST_RSRVD - CLUST_FIRST) & FAT12_MASK)) { /* * This will usually be a floppy disk. This size makes * sure that one fat entry will not be split across * multiple blocks. */ pmp->pm_fatmask = FAT12_MASK; pmp->pm_fatmult = 3; pmp->pm_fatdiv = 2; } else { pmp->pm_fatmask = FAT16_MASK; pmp->pm_fatmult = 2; pmp->pm_fatdiv = 1; } } if (FAT12(pmp)) pmp->pm_fatblocksize = 3 * pmp->pm_BytesPerSec; else pmp->pm_fatblocksize = MAXBSIZE; /* * We now have the number of sectors in each FAT, so can work * out how many clusters can be represented in a FAT. Let's * make sure the file system doesn't claim to have more clusters * than this. * * We perform the calculation like we do to avoid integer overflow. * * This will give us a count of clusters. They are numbered * from 0, so the max cluster value is one less than the value * we end up with. */ fat_max_clusters = pmp->pm_fatsize / pmp->pm_fatmult; fat_max_clusters *= pmp->pm_fatdiv; if (pmp->pm_maxcluster >= fat_max_clusters) { #ifndef SMALL_KERNEL printf("msdosfs: reducing max cluster to %d from %d " "due to FAT size\n", fat_max_clusters - 1, pmp->pm_maxcluster); #endif pmp->pm_maxcluster = fat_max_clusters - 1; } pmp->pm_fatblocksec = pmp->pm_fatblocksize / DEV_BSIZE; pmp->pm_bnshift = ffs(DEV_BSIZE) - 1; /* * Compute mask and shift value for isolating cluster relative byte * offsets and cluster numbers from a file offset. */ pmp->pm_bpcluster = SecPerClust * DEV_BSIZE; pmp->pm_crbomask = pmp->pm_bpcluster - 1; pmp->pm_cnshift = ffs(pmp->pm_bpcluster) - 1; /* * Check for valid cluster size * must be a power of 2 */ if (pmp->pm_bpcluster ^ (1 << pmp->pm_cnshift)) { error = EFTYPE; goto error_exit; } /* * Release the bootsector buffer. */ brelse(bp); bp = NULL; /* * Check FSInfo */ if (pmp->pm_fsinfo) { struct fsinfo *fp; if ((error = bread(devvp, pmp->pm_fsinfo, fsi_size(pmp), NOCRED, &bp)) != 0) goto error_exit; fp = (struct fsinfo *)bp->b_data; if (!bcmp(fp->fsisig1, "RRaA", 4) && !bcmp(fp->fsisig2, "rrAa", 4) && !bcmp(fp->fsisig3, "\0\0\125\252", 4) && !bcmp(fp->fsisig4, "\0\0\125\252", 4)) /* Valid FSInfo. */ ; else pmp->pm_fsinfo = 0; /* XXX make sure this tiny buf doesn't come back in fillinusemap! */ SET(bp->b_flags, B_INVAL); brelse(bp); bp = NULL; } /* * Check and validate (or perhaps invalidate?) the fsinfo structure? XXX */ /* * Allocate memory for the bitmap of allocated clusters, and then * fill it in. */ bmapsiz = (pmp->pm_maxcluster + N_INUSEBITS - 1) / N_INUSEBITS; if (bmapsiz == 0 || SIZE_MAX / bmapsiz < sizeof(*pmp->pm_inusemap)) { /* detect multiplicative integer overflow */ error = EINVAL; goto error_exit; } pmp->pm_inusemap = malloc(bmapsiz * sizeof(*pmp->pm_inusemap), M_MSDOSFSFAT, M_WAITOK | M_CANFAIL); if (pmp->pm_inusemap == NULL) { error = EINVAL; goto error_exit; } /* * fillinusemap() needs pm_devvp. */ pmp->pm_dev = dev; pmp->pm_devvp = devvp; /* * Have the inuse map filled in. */ if ((error = fillinusemap(pmp)) != 0) goto error_exit; /* * If they want fat updates to be synchronous then let them suffer * the performance degradation in exchange for the on disk copy of * the fat being correct just about all the time. I suppose this * would be a good thing to turn on if the kernel is still flakey. */ if (mp->mnt_flag & MNT_SYNCHRONOUS) pmp->pm_flags |= MSDOSFSMNT_WAITONFAT; /* * Finish up. */ if (ronly) pmp->pm_flags |= MSDOSFSMNT_RONLY; else pmp->pm_fmod = 1; mp->mnt_data = (qaddr_t)pmp; mp->mnt_stat.f_fsid.val[0] = (long)dev; mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum; #ifdef QUOTA /* * If we ever do quotas for DOS filesystems this would be a place * to fill in the info in the msdosfsmount structure. You dolt, * quotas on dos filesystems make no sense because files have no * owners on dos filesystems. of course there is some empty space * in the directory entry where we could put uid's and gid's. */ #endif devvp->v_specmountpoint = mp; return (0); error_exit: devvp->v_specmountpoint = NULL; if (bp) brelse(bp); vn_lock(devvp, LK_EXCLUSIVE|LK_RETRY, p); (void) VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, NOCRED, p); VOP_UNLOCK(devvp, 0, p); if (pmp) { if (pmp->pm_inusemap) free(pmp->pm_inusemap, M_MSDOSFSFAT); free(pmp, M_MSDOSFSMNT); mp->mnt_data = (qaddr_t)0; } return (error); }
/* * Device close routine */ int spec_close(void *v) { struct vop_close_args *ap = v; struct proc *p = ap->a_p; struct vnode *vp = ap->a_vp; dev_t dev = vp->v_rdev; int (*devclose)(dev_t, int, int, struct proc *); int mode, relock, error; switch (vp->v_type) { case VCHR: /* * Hack: a tty device that is a controlling terminal * has a reference from the session structure. * We cannot easily tell that a character device is * a controlling terminal, unless it is the closing * process' controlling terminal. In that case, * if the reference count is 2 (this last descriptor * plus the session), release the reference from the session. */ if (vcount(vp) == 2 && p != NULL && p->p_p->ps_pgrp && vp == p->p_p->ps_pgrp->pg_session->s_ttyvp) { vrele(vp); p->p_p->ps_pgrp->pg_session->s_ttyvp = NULL; } if (cdevsw[major(dev)].d_flags & D_CLONE) return (spec_close_clone(ap)); /* * If the vnode is locked, then we are in the midst * of forcably closing the device, otherwise we only * close on last reference. */ if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0) return (0); devclose = cdevsw[major(dev)].d_close; mode = S_IFCHR; break; case VBLK: /* * On last close of a block device (that isn't mounted) * we must invalidate any in core blocks, so that * we can, for instance, change floppy disks. In order to do * that, we must lock the vnode. If we are coming from * vclean(), the vnode is already locked. */ if (!(vp->v_flag & VXLOCK)) vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); error = vinvalbuf(vp, V_SAVE, ap->a_cred, p, 0, 0); if (!(vp->v_flag & VXLOCK)) VOP_UNLOCK(vp, 0, p); if (error) return (error); /* * We do not want to really close the device if it * is still in use unless we are trying to close it * forcibly. Since every use (buffer, vnode, swap, cmap) * holds a reference to the vnode, and because we mark * any other vnodes that alias this device, when the * sum of the reference counts on all the aliased * vnodes descends to one, we are on last close. */ if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0) return (0); devclose = bdevsw[major(dev)].d_close; mode = S_IFBLK; break; default: panic("spec_close: not special"); } /* release lock if held and this isn't coming from vclean() */ relock = VOP_ISLOCKED(vp) && !(vp->v_flag & VXLOCK); if (relock) VOP_UNLOCK(vp, 0, p); error = (*devclose)(dev, ap->a_fflag, mode, p); if (relock) vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); return (error); }
int udf_mountfs(struct vnode *devvp, struct mount *mp, uint32_t lb, struct proc *p) { struct buf *bp = NULL; struct anchor_vdp avdp; struct umount *ump = NULL; struct part_desc *pd; struct logvol_desc *lvd; struct fileset_desc *fsd; struct extfile_entry *xfentry; struct file_entry *fentry; uint32_t sector, size, mvds_start, mvds_end; uint32_t fsd_offset = 0; uint16_t part_num = 0, fsd_part = 0; int error = EINVAL; int logvol_found = 0, part_found = 0, fsd_found = 0; int bsize; /* * Disallow multiple mounts of the same device. * Disallow mounting of a device that is currently in use * (except for root, which might share swap device for miniroot). * Flush out any old buffers remaining from a previous use. */ if ((error = vfs_mountedon(devvp))) return (error); if (vcount(devvp) > 1 && devvp != rootvp) return (EBUSY); vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p); error = vinvalbuf(devvp, V_SAVE, p->p_ucred, p, 0, 0); VOP_UNLOCK(devvp, 0, p); if (error) return (error); error = VOP_OPEN(devvp, FREAD, FSCRED, p); if (error) return (error); ump = malloc(sizeof(*ump), M_UDFMOUNT, M_WAITOK | M_ZERO); mp->mnt_data = (qaddr_t) ump; mp->mnt_stat.f_fsid.val[0] = devvp->v_rdev; mp->mnt_stat.f_fsid.val[1] = makefstype(MOUNT_UDF); mp->mnt_flag |= MNT_LOCAL; ump->um_mountp = mp; ump->um_dev = devvp->v_rdev; ump->um_devvp = devvp; bsize = 2048; /* Should probe the media for its size. */ /* * Get the Anchor Volume Descriptor Pointer from sector 256. * Should also check sector n - 256, n, and 512. */ sector = 256; if ((error = bread(devvp, sector * btodb(bsize), bsize, NOCRED, &bp)) != 0) goto bail; if ((error = udf_checktag((struct desc_tag *)bp->b_data, TAGID_ANCHOR))) goto bail; bcopy(bp->b_data, &avdp, sizeof(struct anchor_vdp)); brelse(bp); bp = NULL; /* * Extract the Partition Descriptor and Logical Volume Descriptor * from the Volume Descriptor Sequence. * Should we care about the partition type right now? * What about multiple partitions? */ mvds_start = letoh32(avdp.main_vds_ex.loc); mvds_end = mvds_start + (letoh32(avdp.main_vds_ex.len) - 1) / bsize; for (sector = mvds_start; sector < mvds_end; sector++) { if ((error = bread(devvp, sector * btodb(bsize), bsize, NOCRED, &bp)) != 0) { printf("Can't read sector %d of VDS\n", sector); goto bail; } lvd = (struct logvol_desc *)bp->b_data; if (!udf_checktag(&lvd->tag, TAGID_LOGVOL)) { ump->um_bsize = letoh32(lvd->lb_size); ump->um_bmask = ump->um_bsize - 1; ump->um_bshift = ffs(ump->um_bsize) - 1; fsd_part = letoh16(lvd->_lvd_use.fsd_loc.loc.part_num); fsd_offset = letoh32(lvd->_lvd_use.fsd_loc.loc.lb_num); if (udf_find_partmaps(ump, lvd)) break; logvol_found = 1; } pd = (struct part_desc *)bp->b_data; if (!udf_checktag(&pd->tag, TAGID_PARTITION)) { part_found = 1; part_num = letoh16(pd->part_num); ump->um_len = ump->um_reallen = letoh32(pd->part_len); ump->um_start = ump->um_realstart = letoh32(pd->start_loc); } brelse(bp); bp = NULL; if ((part_found) && (logvol_found)) break; } if (!part_found || !logvol_found) { error = EINVAL; goto bail; } if (ISSET(ump->um_flags, UDF_MNT_USES_META)) { /* Read Metadata File 'File Entry' to find Metadata file. */ struct long_ad *la; sector = ump->um_start + ump->um_meta_start; /* Set in udf_get_mpartmap() */ if ((error = RDSECTOR(devvp, sector, ump->um_bsize, &bp)) != 0) { printf("Cannot read sector %d for Metadata File Entry\n", sector); error = EINVAL; goto bail; } xfentry = (struct extfile_entry *)bp->b_data; fentry = (struct file_entry *)bp->b_data; if (udf_checktag(&xfentry->tag, TAGID_EXTFENTRY) == 0) la = (struct long_ad *)&xfentry->data[letoh32(xfentry->l_ea)]; else if (udf_checktag(&fentry->tag, TAGID_FENTRY) == 0) la = (struct long_ad *)&fentry->data[letoh32(fentry->l_ea)]; else { printf("Invalid Metadata File FE @ sector %d! (tag.id %d)\n", sector, fentry->tag.id); error = EINVAL; goto bail; } ump->um_meta_start = letoh32(la->loc.lb_num); ump->um_meta_len = letoh32(la->len); if (bp != NULL) { brelse(bp); bp = NULL; } } else if (fsd_part != part_num) { printf("FSD does not lie within the partition!\n"); error = EINVAL; goto bail; } mtx_init(&ump->um_hashmtx, IPL_NONE); ump->um_hashtbl = hashinit(UDF_HASHTBLSIZE, M_UDFMOUNT, M_WAITOK, &ump->um_hashsz); /* Get the VAT, if needed */ if (ump->um_flags & UDF_MNT_FIND_VAT) { error = udf_vat_get(ump, lb); if (error) goto bail; } /* * Grab the Fileset Descriptor * Thanks to Chuck McCrobie <*****@*****.**> for pointing * me in the right direction here. */ if (ISSET(ump->um_flags, UDF_MNT_USES_META)) sector = ump->um_meta_start; else sector = fsd_offset; udf_vat_map(ump, §or); if ((error = RDSECTOR(devvp, sector, ump->um_bsize, &bp)) != 0) { printf("Cannot read sector %d of FSD\n", sector); goto bail; } fsd = (struct fileset_desc *)bp->b_data; if (!udf_checktag(&fsd->tag, TAGID_FSD)) { fsd_found = 1; bcopy(&fsd->rootdir_icb, &ump->um_root_icb, sizeof(struct long_ad)); if (ISSET(ump->um_flags, UDF_MNT_USES_META)) { ump->um_root_icb.loc.lb_num += ump->um_meta_start; ump->um_root_icb.loc.part_num = part_num; } } brelse(bp); bp = NULL; if (!fsd_found) { printf("Couldn't find the fsd\n"); error = EINVAL; goto bail; } /* * Find the file entry for the root directory. */ sector = letoh32(ump->um_root_icb.loc.lb_num); size = letoh32(ump->um_root_icb.len); udf_vat_map(ump, §or); if ((error = udf_readlblks(ump, sector, size, &bp)) != 0) { printf("Cannot read sector %d\n", sector); goto bail; } xfentry = (struct extfile_entry *)bp->b_data; fentry = (struct file_entry *)bp->b_data; error = udf_checktag(&xfentry->tag, TAGID_EXTFENTRY); if (error) { error = udf_checktag(&fentry->tag, TAGID_FENTRY); if (error) { printf("Invalid root file entry!\n"); goto bail; } } brelse(bp); bp = NULL; devvp->v_specmountpoint = mp; return (0); bail: if (ump->um_hashtbl != NULL) free(ump->um_hashtbl, M_UDFMOUNT); if (ump != NULL) { free(ump, M_UDFMOUNT); mp->mnt_data = NULL; mp->mnt_flag &= ~MNT_LOCAL; } if (bp != NULL) brelse(bp); vn_lock(devvp, LK_EXCLUSIVE|LK_RETRY, p); VOP_CLOSE(devvp, FREAD, FSCRED, p); VOP_UNLOCK(devvp, 0, p); return (error); }
/* * Common code for mount and mountroot */ int ffs_mountfs(struct vnode *devvp, struct mount *mp, struct proc *p) { struct ufsmount *ump; struct buf *bp; struct fs *fs; dev_t dev; struct partinfo dpart; caddr_t space; ufs2_daddr_t sbloc; int error, i, blks, size, ronly; int32_t *lp; size_t strsize; struct ucred *cred; u_int64_t maxfilesize; /* XXX */ dev = devvp->v_rdev; cred = p ? p->p_ucred : NOCRED; /* * Disallow multiple mounts of the same device. * Disallow mounting of a device that is currently in use * (except for root, which might share swap device for miniroot). * Flush out any old buffers remaining from a previous use. */ if ((error = vfs_mountedon(devvp)) != 0) return (error); if (vcount(devvp) > 1 && devvp != rootvp) return (EBUSY); vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p); error = vinvalbuf(devvp, V_SAVE, cred, p, 0, 0); VOP_UNLOCK(devvp, 0, p); if (error) return (error); ronly = (mp->mnt_flag & MNT_RDONLY) != 0; error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, p); if (error) return (error); if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, cred, p) != 0) size = DEV_BSIZE; else size = dpart.disklab->d_secsize; bp = NULL; ump = NULL; /* * Try reading the super-block in each of its possible locations. */ for (i = 0; sbtry[i] != -1; i++) { if (bp != NULL) { bp->b_flags |= B_NOCACHE; brelse(bp); bp = NULL; } error = bread(devvp, sbtry[i] / size, SBSIZE, cred, &bp); if (error) goto out; fs = (struct fs *) bp->b_data; sbloc = sbtry[i]; #if 0 if (fs->fs_magic == FS_UFS2_MAGIC) { printf("ffs_mountfs(): Sorry, no UFS2 support (yet)\n"); error = EFTYPE; goto out; } #endif /* * Do not look for an FFS1 file system at SBLOCK_UFS2. Doing so * will find the wrong super-block for file systems with 64k * block size. */ if (fs->fs_magic == FS_UFS1_MAGIC && sbloc == SBLOCK_UFS2) continue; if (ffs_validate(fs)) break; /* Super block validated */ } if (sbtry[i] == -1) { error = EINVAL; goto out; } fs->fs_fmod = 0; fs->fs_flags &= ~FS_UNCLEAN; if (fs->fs_clean == 0) { fs->fs_flags |= FS_UNCLEAN; #if 0 /* * It is safe mount unclean file system * if it was previously mounted with softdep * but we may loss space and must * sometimes run fsck manually. */ if (fs->fs_flags & FS_DOSOFTDEP) printf( "WARNING: %s was not properly unmounted\n", fs->fs_fsmnt); else #endif if (ronly || (mp->mnt_flag & MNT_FORCE)) { printf( "WARNING: %s was not properly unmounted\n", fs->fs_fsmnt); } else { printf( "WARNING: R/W mount of %s denied. Filesystem is not clean - run fsck\n", fs->fs_fsmnt); error = EROFS; goto out; } } if (fs->fs_postblformat == FS_42POSTBLFMT && !ronly) { #ifndef SMALL_KERNEL printf("ffs_mountfs(): obsolete rotational table format, " "please use fsck_ffs(8) -c 1\n"); #endif error = EFTYPE; goto out; } ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK); bzero(ump, sizeof *ump); ump->um_fs = malloc((u_long)fs->fs_sbsize, M_UFSMNT, M_WAITOK); if (fs->fs_magic == FS_UFS1_MAGIC) ump->um_fstype = UM_UFS1; #ifdef FFS2 else ump->um_fstype = UM_UFS2; #endif bcopy(bp->b_data, ump->um_fs, (u_int)fs->fs_sbsize); if (fs->fs_sbsize < SBSIZE) bp->b_flags |= B_INVAL; brelse(bp); bp = NULL; fs = ump->um_fs; ffs1_compat_read(fs, ump, sbloc); fs->fs_ronly = ronly; size = fs->fs_cssize; blks = howmany(size, fs->fs_fsize); if (fs->fs_contigsumsize > 0) size += fs->fs_ncg * sizeof(int32_t); space = malloc((u_long)size, M_UFSMNT, M_WAITOK); fs->fs_csp = (struct csum *)space; for (i = 0; i < blks; i += fs->fs_frag) { size = fs->fs_bsize; if (i + fs->fs_frag > blks) size = (blks - i) * fs->fs_fsize; error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size, cred, &bp); if (error) { free(fs->fs_csp, M_UFSMNT); goto out; } bcopy(bp->b_data, space, (u_int)size); space += size; brelse(bp); bp = NULL; } if (fs->fs_contigsumsize > 0) { fs->fs_maxcluster = lp = (int32_t *)space; for (i = 0; i < fs->fs_ncg; i++) *lp++ = fs->fs_contigsumsize; } mp->mnt_data = (qaddr_t)ump; mp->mnt_stat.f_fsid.val[0] = (long)dev; /* Use on-disk fsid if it exists, else fake it */ if (fs->fs_id[0] != 0 && fs->fs_id[1] != 0) mp->mnt_stat.f_fsid.val[1] = fs->fs_id[1]; else mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum; mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen; mp->mnt_flag |= MNT_LOCAL; ump->um_mountp = mp; ump->um_dev = dev; ump->um_devvp = devvp; ump->um_nindir = fs->fs_nindir; ump->um_bptrtodb = fs->fs_fsbtodb; ump->um_seqinc = fs->fs_frag; for (i = 0; i < MAXQUOTAS; i++) ump->um_quotas[i] = NULLVP; devvp->v_specmountpoint = mp; ffs_oldfscompat(fs); if (ronly) fs->fs_contigdirs = NULL; else { fs->fs_contigdirs = (u_int8_t*)malloc((u_long)fs->fs_ncg, M_UFSMNT, M_WAITOK); bzero(fs->fs_contigdirs, fs->fs_ncg); } /* * Set FS local "last mounted on" information (NULL pad) */ copystr(mp->mnt_stat.f_mntonname, /* mount point*/ fs->fs_fsmnt, /* copy area*/ sizeof(fs->fs_fsmnt) - 1, /* max size*/ &strsize); /* real size*/ bzero(fs->fs_fsmnt + strsize, sizeof(fs->fs_fsmnt) - strsize); #if 0 if( mp->mnt_flag & MNT_ROOTFS) { /* * Root mount; update timestamp in mount structure. * this will be used by the common root mount code * to update the system clock. */ mp->mnt_time = fs->fs_time; } #endif /* * XXX * Limit max file size. Even though ffs can handle files up to 16TB, * we do limit the max file to 2^31 pages to prevent overflow of * a 32-bit unsigned int. The buffer cache has its own checks but * a little added paranoia never hurts. */ ump->um_savedmaxfilesize = fs->fs_maxfilesize; /* XXX */ maxfilesize = (u_int64_t)0x80000000 * MIN(PAGE_SIZE, fs->fs_bsize) - 1; if (fs->fs_maxfilesize > maxfilesize) /* XXX */ fs->fs_maxfilesize = maxfilesize; /* XXX */ if (ronly == 0) { if ((fs->fs_flags & FS_DOSOFTDEP) && (error = softdep_mount(devvp, mp, fs, cred)) != 0) { free(fs->fs_csp, M_UFSMNT); free(fs->fs_contigdirs, M_UFSMNT); goto out; } fs->fs_fmod = 1; fs->fs_clean = 0; if (mp->mnt_flag & MNT_SOFTDEP) fs->fs_flags |= FS_DOSOFTDEP; else fs->fs_flags &= ~FS_DOSOFTDEP; (void) ffs_sbupdate(ump, MNT_WAIT); } return (0); out: devvp->v_specmountpoint = NULL; if (bp) brelse(bp); vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p); (void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, cred, p); VOP_UNLOCK(devvp, 0, p); if (ump) { free(ump->um_fs, M_UFSMNT); free(ump, M_UFSMNT); mp->mnt_data = (qaddr_t)0; } return (error); }
/* * Common code for mount and mountroot */ int ext2fs_mountfs(struct vnode *devvp, struct mount *mp, struct proc *p) { struct ufsmount *ump; struct buf *bp; struct ext2fs *fs; struct m_ext2fs *m_fs; dev_t dev; int error, i, ronly; struct ucred *cred; dev = devvp->v_rdev; cred = p ? p->p_ucred : NOCRED; /* * Disallow multiple mounts of the same device. * Disallow mounting of a device that is currently in use * (except for root, which might share swap device for miniroot). * Flush out any old buffers remaining from a previous use. */ if ((error = vfs_mountedon(devvp)) != 0) return (error); if (vcount(devvp) > 1 && devvp != rootvp) return (EBUSY); if ((error = vinvalbuf(devvp, V_SAVE, cred, p, 0, 0)) != 0) return (error); ronly = (mp->mnt_flag & MNT_RDONLY) != 0; error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED); if (error) return (error); bp = NULL; ump = NULL; #ifdef DEBUG_EXT2 printf("ext2 sb size: %d\n", sizeof(struct ext2fs)); #endif error = bread(devvp, (daddr_t)(SBOFF / DEV_BSIZE), SBSIZE, &bp); if (error) goto out; fs = (struct ext2fs *)bp->b_data; error = ext2fs_checksb(fs, ronly); if (error) goto out; ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK | M_ZERO); ump->um_e2fs = malloc(sizeof(struct m_ext2fs), M_UFSMNT, M_WAITOK | M_ZERO); e2fs_sbload((struct ext2fs*)bp->b_data, &ump->um_e2fs->e2fs); brelse(bp); bp = NULL; m_fs = ump->um_e2fs; m_fs->e2fs_ronly = ronly; ump->um_fstype = UM_EXT2FS; #ifdef DEBUG_EXT2 printf("ext2 ino size %d\n", EXT2_DINODE_SIZE(m_fs)); #endif if (ronly == 0) { if (m_fs->e2fs.e2fs_state == E2FS_ISCLEAN) m_fs->e2fs.e2fs_state = 0; else m_fs->e2fs.e2fs_state = E2FS_ERRORS; m_fs->e2fs_fmod = 1; } /* compute dynamic sb infos */ m_fs->e2fs_ncg = howmany(m_fs->e2fs.e2fs_bcount - m_fs->e2fs.e2fs_first_dblock, m_fs->e2fs.e2fs_bpg); /* XXX assume hw bsize = 512 */ m_fs->e2fs_fsbtodb = m_fs->e2fs.e2fs_log_bsize + 1; m_fs->e2fs_bsize = 1024 << m_fs->e2fs.e2fs_log_bsize; m_fs->e2fs_bshift = LOG_MINBSIZE + m_fs->e2fs.e2fs_log_bsize; m_fs->e2fs_qbmask = m_fs->e2fs_bsize - 1; m_fs->e2fs_bmask = ~m_fs->e2fs_qbmask; m_fs->e2fs_ngdb = howmany(m_fs->e2fs_ncg, m_fs->e2fs_bsize / sizeof(struct ext2_gd)); m_fs->e2fs_ipb = m_fs->e2fs_bsize / EXT2_DINODE_SIZE(m_fs); m_fs->e2fs_itpg = m_fs->e2fs.e2fs_ipg/m_fs->e2fs_ipb; m_fs->e2fs_gd = malloc(m_fs->e2fs_ngdb * m_fs->e2fs_bsize, M_UFSMNT, M_WAITOK); for (i=0; i < m_fs->e2fs_ngdb; i++) { error = bread(devvp , fsbtodb(m_fs, ((m_fs->e2fs_bsize>1024)? 0 : 1) + i + 1), m_fs->e2fs_bsize, &bp); if (error) { free(m_fs->e2fs_gd, M_UFSMNT); goto out; } e2fs_cgload((struct ext2_gd*)bp->b_data, &m_fs->e2fs_gd[i * m_fs->e2fs_bsize / sizeof(struct ext2_gd)], m_fs->e2fs_bsize); brelse(bp); bp = NULL; } mp->mnt_data = (qaddr_t)ump; mp->mnt_stat.f_fsid.val[0] = (long)dev; mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum; mp->mnt_maxsymlinklen = EXT2_MAXSYMLINKLEN; mp->mnt_flag |= MNT_LOCAL; ump->um_mountp = mp; ump->um_dev = dev; ump->um_devvp = devvp; ump->um_nindir = NINDIR(m_fs); ump->um_bptrtodb = m_fs->e2fs_fsbtodb; ump->um_seqinc = 1; /* no frags */ devvp->v_specmountpoint = mp; return (0); out: if (bp) brelse(bp); vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p); (void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, cred); VOP_UNLOCK(devvp, 0); if (ump) { free(ump->um_e2fs, M_UFSMNT); free(ump, M_UFSMNT); mp->mnt_data = (qaddr_t)0; } return (error); }
/* * Device close routine */ int spec_close(struct vnop_close_args *ap) { struct vnode *vp = ap->a_vp; dev_t dev = vp->v_rdev; int (*devclose)(dev_t, int, int, struct proc *); int mode, error; int flags = ap->a_fflag; struct proc *p = vfs_context_proc(ap->a_context); struct session *sessp; switch (vp->v_type) { case VCHR: /* * Hack: a tty device that is a controlling terminal * has a reference from the session structure. * We cannot easily tell that a character device is * a controlling terminal, unless it is the closing * process' controlling terminal. In that case, * if the reference count is 1 (this is the very * last close) */ sessp = proc_session(p); if (sessp != SESSION_NULL) { if ((vcount(vp) == 1) && (vp == sessp->s_ttyvp)) { session_lock(sessp); sessp->s_ttyvp = NULL; sessp->s_ttyvid = 0; sessp->s_ttyp = TTY_NULL; sessp->s_ttypgrpid = NO_PID; session_unlock(sessp); vnode_rele(vp); } session_rele(sessp); } devclose = cdevsw[major(dev)].d_close; mode = S_IFCHR; /* * close on last reference or on vnode revoke call */ if ((flags & IO_REVOKE) != 0) break; if (vcount(vp) > 0) return (0); break; case VBLK: /* * Since every use (buffer, vnode, swap, blockmap) * holds a reference to the vnode, and because we mark * any other vnodes that alias this device, when the * sum of the reference counts on all the aliased * vnodes descends to zero, we are on last close. */ if (vcount(vp) > 0) return (0); /* * On last close of a block device (that isn't mounted) * we must invalidate any in core blocks, so that * we can, for instance, change floppy disks. */ if ((error = spec_fsync_internal(vp, MNT_WAIT, ap->a_context))) return (error); error = buf_invalidateblks(vp, BUF_WRITE_DATA, 0, 0); if (error) return (error); devclose = bdevsw[major(dev)].d_close; mode = S_IFBLK; break; default: panic("spec_close: not special"); return(EBADF); } return ((*devclose)(dev, flags, mode, p)); }
/* * Common code for mount and mountroot */ int ntfs_mountfs(struct vnode *devvp, struct mount *mp, struct ntfs_args *argsp, struct ucred *cred) { struct buf *bp; struct ntfsmount *ntmp; cdev_t dev; int error, ronly, ncount, i; struct vnode *vp; char cs_local[ICONV_CSNMAXLEN]; char cs_ntfs[ICONV_CSNMAXLEN]; /* * Disallow multiple mounts of the same device. * Disallow mounting of a device that is currently in use * (except for root, which might share swap device for miniroot). * Flush out any old buffers remaining from a previous use. */ error = vfs_mountedon(devvp); if (error) return (error); ncount = vcount(devvp); if (devvp->v_object) ncount -= 1; if (ncount > 1) return (EBUSY); VN_LOCK(devvp, LK_EXCLUSIVE | LK_RETRY); error = vinvalbuf(devvp, V_SAVE, 0, 0); VOP__UNLOCK(devvp, 0); if (error) return (error); ronly = (mp->mnt_flag & MNT_RDONLY) != 0; VN_LOCK(devvp, LK_EXCLUSIVE | LK_RETRY); error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, NULL); VOP__UNLOCK(devvp, 0); if (error) return (error); dev = devvp->v_rdev; bp = NULL; error = bread(devvp, BBLOCK, BBSIZE, &bp); if (error) goto out; ntmp = kmalloc(sizeof *ntmp, M_NTFSMNT, M_WAITOK | M_ZERO); bcopy( bp->b_data, &ntmp->ntm_bootfile, sizeof(struct bootfile) ); /* * We must not cache the boot block if its size is not exactly * one cluster in order to avoid confusing the buffer cache when * the boot file is read later by ntfs_readntvattr_plain(), which * reads a cluster at a time. */ if (ntfs_cntob(1) != BBSIZE) bp->b_flags |= B_NOCACHE; brelse( bp ); bp = NULL; if (strncmp(ntmp->ntm_bootfile.bf_sysid, NTFS_BBID, NTFS_BBIDLEN)) { error = EINVAL; dprintf(("ntfs_mountfs: invalid boot block\n")); goto out; } { int8_t cpr = ntmp->ntm_mftrecsz; if( cpr > 0 ) ntmp->ntm_bpmftrec = ntmp->ntm_spc * cpr; else ntmp->ntm_bpmftrec = (1 << (-cpr)) / ntmp->ntm_bps; } dprintf(("ntfs_mountfs(): bps: %d, spc: %d, media: %x, mftrecsz: %d (%d sects)\n", ntmp->ntm_bps,ntmp->ntm_spc,ntmp->ntm_bootfile.bf_media, ntmp->ntm_mftrecsz,ntmp->ntm_bpmftrec)); dprintf(("ntfs_mountfs(): mftcn: 0x%x|0x%x\n", (u_int32_t)ntmp->ntm_mftcn,(u_int32_t)ntmp->ntm_mftmirrcn)); ntmp->ntm_mountp = mp; ntmp->ntm_dev = dev; ntmp->ntm_devvp = devvp; ntmp->ntm_uid = argsp->uid; ntmp->ntm_gid = argsp->gid; ntmp->ntm_mode = argsp->mode; ntmp->ntm_flag = argsp->flag; if (argsp->flag & NTFS_MFLAG_KICONV && ntfs_iconv) { bcopy(argsp->cs_local, cs_local, sizeof(cs_local)); bcopy(argsp->cs_ntfs, cs_ntfs, sizeof(cs_ntfs)); ntfs_82u_init(ntmp, cs_local, cs_ntfs); ntfs_u28_init(ntmp, NULL, cs_local, cs_ntfs); } else { ntfs_82u_init(ntmp, NULL, NULL); ntfs_u28_init(ntmp, ntmp->ntm_82u, NULL, NULL); } mp->mnt_data = (qaddr_t)ntmp; dprintf(("ntfs_mountfs(): case-%s,%s uid: %d, gid: %d, mode: %o\n", (ntmp->ntm_flag & NTFS_MFLAG_CASEINS)?"insens.":"sens.", (ntmp->ntm_flag & NTFS_MFLAG_ALLNAMES)?" allnames,":"", ntmp->ntm_uid, ntmp->ntm_gid, ntmp->ntm_mode)); vfs_add_vnodeops(mp, &ntfs_vnode_vops, &mp->mnt_vn_norm_ops); /* * We read in some system nodes to do not allow * reclaim them and to have everytime access to them. */ { int pi[3] = { NTFS_MFTINO, NTFS_ROOTINO, NTFS_BITMAPINO }; for (i=0; i<3; i++) { error = VFS_VGET(mp, NULL, pi[i], &(ntmp->ntm_sysvn[pi[i]])); if(error) goto out1; vsetflags(ntmp->ntm_sysvn[pi[i]], VSYSTEM); vref(ntmp->ntm_sysvn[pi[i]]); vput(ntmp->ntm_sysvn[pi[i]]); } } /* read the Unicode lowercase --> uppercase translation table, * if necessary */ if ((error = ntfs_toupper_use(mp, ntmp))) goto out1; /* * Scan $BitMap and count free clusters */ error = ntfs_calccfree(ntmp, &ntmp->ntm_cfree); if(error) goto out1; /* * Read and translate to internal format attribute * definition file. */ { int num,j; struct attrdef ad; /* Open $AttrDef */ error = VFS_VGET(mp, NULL, NTFS_ATTRDEFINO, &vp); if(error) goto out1; /* Count valid entries */ for(num=0;;num++) { error = ntfs_readattr(ntmp, VTONT(vp), NTFS_A_DATA, NULL, num * sizeof(ad), sizeof(ad), &ad, NULL); if (error) goto out1; if (ad.ad_name[0] == 0) break; } /* Alloc memory for attribute definitions */ ntmp->ntm_ad = kmalloc(num * sizeof(struct ntvattrdef), M_NTFSMNT, M_WAITOK); ntmp->ntm_adnum = num; /* Read them and translate */ for(i=0;i<num;i++){ error = ntfs_readattr(ntmp, VTONT(vp), NTFS_A_DATA, NULL, i * sizeof(ad), sizeof(ad), &ad, NULL); if (error) goto out1; j = 0; do { ntmp->ntm_ad[i].ad_name[j] = ad.ad_name[j]; } while(ad.ad_name[j++]); ntmp->ntm_ad[i].ad_namelen = j - 1; ntmp->ntm_ad[i].ad_type = ad.ad_type; } vput(vp); } mp->mnt_stat.f_fsid.val[0] = dev2udev(dev); mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum; mp->mnt_maxsymlinklen = 0; mp->mnt_flag |= MNT_LOCAL; dev->si_mountpoint = mp; return (0); out1: for(i=0;i<NTFS_SYSNODESNUM;i++) if(ntmp->ntm_sysvn[i]) vrele(ntmp->ntm_sysvn[i]); if (vflush(mp, 0, 0)) dprintf(("ntfs_mountfs: vflush failed\n")); out: dev->si_mountpoint = NULL; if (bp) brelse(bp); vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); (void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, NULL); vn_unlock(devvp); return (error); }
/************************************************************************ * VOLUMES * ************************************************************************ * * Load a HAMMER volume by name. Returns 0 on success or a positive error * code on failure. Volumes must be loaded at mount time, get_volume() will * not load a new volume. * * The passed devvp is vref()'d but not locked. This function consumes the * ref (typically by associating it with the volume structure). * * Calls made to hammer_load_volume() or single-threaded */ int hammer_install_volume(struct hammer_mount *hmp, const char *volname, struct vnode *devvp) { struct mount *mp; hammer_volume_t volume; struct hammer_volume_ondisk *ondisk; struct nlookupdata nd; struct buf *bp = NULL; int error; int ronly; int setmp = 0; mp = hmp->mp; ronly = ((mp->mnt_flag & MNT_RDONLY) ? 1 : 0); /* * Allocate a volume structure */ ++hammer_count_volumes; volume = kmalloc(sizeof(*volume), hmp->m_misc, M_WAITOK|M_ZERO); volume->vol_name = kstrdup(volname, hmp->m_misc); volume->io.hmp = hmp; /* bootstrap */ hammer_io_init(&volume->io, volume, HAMMER_STRUCTURE_VOLUME); volume->io.offset = 0LL; volume->io.bytes = HAMMER_BUFSIZE; /* * Get the device vnode */ if (devvp == NULL) { error = nlookup_init(&nd, volume->vol_name, UIO_SYSSPACE, NLC_FOLLOW); if (error == 0) error = nlookup(&nd); if (error == 0) error = cache_vref(&nd.nl_nch, nd.nl_cred, &volume->devvp); nlookup_done(&nd); } else { error = 0; volume->devvp = devvp; } if (error == 0) { if (vn_isdisk(volume->devvp, &error)) { error = vfs_mountedon(volume->devvp); } } if (error == 0 && vcount(volume->devvp) > 0) error = EBUSY; if (error == 0) { vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY); error = vinvalbuf(volume->devvp, V_SAVE, 0, 0); if (error == 0) { error = VOP_OPEN(volume->devvp, (ronly ? FREAD : FREAD|FWRITE), FSCRED, NULL); } vn_unlock(volume->devvp); } if (error) { hammer_free_volume(volume); return(error); } volume->devvp->v_rdev->si_mountpoint = mp; setmp = 1; /* * Extract the volume number from the volume header and do various * sanity checks. */ error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp); if (error) goto late_failure; ondisk = (void *)bp->b_data; if (ondisk->vol_signature != HAMMER_FSBUF_VOLUME) { kprintf("hammer_mount: volume %s has an invalid header\n", volume->vol_name); error = EFTYPE; goto late_failure; } volume->vol_no = ondisk->vol_no; volume->buffer_base = ondisk->vol_buf_beg; volume->vol_flags = ondisk->vol_flags; volume->nblocks = ondisk->vol_nblocks; volume->maxbuf_off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no, ondisk->vol_buf_end - ondisk->vol_buf_beg); volume->maxraw_off = ondisk->vol_buf_end; if (RB_EMPTY(&hmp->rb_vols_root)) { hmp->fsid = ondisk->vol_fsid; } else if (bcmp(&hmp->fsid, &ondisk->vol_fsid, sizeof(uuid_t))) { kprintf("hammer_mount: volume %s's fsid does not match " "other volumes\n", volume->vol_name); error = EFTYPE; goto late_failure; } /* * Insert the volume structure into the red-black tree. */ if (RB_INSERT(hammer_vol_rb_tree, &hmp->rb_vols_root, volume)) { kprintf("hammer_mount: volume %s has a duplicate vol_no %d\n", volume->vol_name, volume->vol_no); error = EEXIST; } /* * Set the root volume . HAMMER special cases rootvol the structure. * We do not hold a ref because this would prevent related I/O * from being flushed. */ if (error == 0 && ondisk->vol_rootvol == ondisk->vol_no) { hmp->rootvol = volume; hmp->nvolumes = ondisk->vol_count; if (bp) { brelse(bp); bp = NULL; } hmp->mp->mnt_stat.f_blocks += ondisk->vol0_stat_bigblocks * (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE); hmp->mp->mnt_vstat.f_blocks += ondisk->vol0_stat_bigblocks * (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE); } late_failure: if (bp) brelse(bp); if (error) { /*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/ if (setmp) volume->devvp->v_rdev->si_mountpoint = NULL; vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY); VOP_CLOSE(volume->devvp, ronly ? FREAD : FREAD|FWRITE); vn_unlock(volume->devvp); hammer_free_volume(volume); } return (error); }
/* * Common code for mount and mountroot */ int ext2fs_mountfs(struct vnode *devvp, struct mount *mp, struct proc *p) { struct ufsmount *ump; struct buf *bp; struct ext2fs *fs; dev_t dev; int error, ronly; struct ucred *cred; dev = devvp->v_rdev; cred = p ? p->p_ucred : NOCRED; /* * Disallow multiple mounts of the same device. * Disallow mounting of a device that is currently in use * (except for root, which might share swap device for miniroot). * Flush out any old buffers remaining from a previous use. */ if ((error = vfs_mountedon(devvp)) != 0) return (error); if (vcount(devvp) > 1 && devvp != rootvp) return (EBUSY); if ((error = vinvalbuf(devvp, V_SAVE, cred, p, 0, 0)) != 0) return (error); ronly = (mp->mnt_flag & MNT_RDONLY) != 0; error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, p); if (error) return (error); bp = NULL; ump = NULL; /* * Read the superblock from disk. */ error = bread(devvp, (daddr_t)(SBOFF / DEV_BSIZE), SBSIZE, &bp); if (error) goto out; fs = (struct ext2fs *)bp->b_data; error = e2fs_sbcheck(fs, ronly); if (error) goto out; ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK | M_ZERO); ump->um_e2fs = malloc(sizeof(struct m_ext2fs), M_UFSMNT, M_WAITOK | M_ZERO); /* * Copy in the superblock, compute in-memory values * and load group descriptors. */ e2fs_sbload(fs, &ump->um_e2fs->e2fs); if ((error = e2fs_sbfill(devvp, ump->um_e2fs)) != 0) goto out; brelse(bp); bp = NULL; fs = &ump->um_e2fs->e2fs; ump->um_e2fs->e2fs_ronly = ronly; ump->um_fstype = UM_EXT2FS; if (ronly == 0) { if (fs->e2fs_state == E2FS_ISCLEAN) fs->e2fs_state = 0; else fs->e2fs_state = E2FS_ERRORS; ump->um_e2fs->e2fs_fmod = 1; } mp->mnt_data = (qaddr_t)ump; mp->mnt_stat.f_fsid.val[0] = (long)dev; mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum; mp->mnt_stat.f_namemax = MAXNAMLEN; mp->mnt_flag |= MNT_LOCAL; ump->um_mountp = mp; ump->um_dev = dev; ump->um_devvp = devvp; ump->um_nindir = NINDIR(ump->um_e2fs); ump->um_bptrtodb = ump->um_e2fs->e2fs_fsbtodb; ump->um_seqinc = 1; /* no frags */ ump->um_maxsymlinklen = EXT2_MAXSYMLINKLEN; devvp->v_specmountpoint = mp; return (0); out: if (bp) brelse(bp); vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p); (void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, cred, p); VOP_UNLOCK(devvp, p); if (ump) { free(ump->um_e2fs, M_UFSMNT, sizeof *ump->um_e2fs); free(ump, M_UFSMNT, sizeof *ump); mp->mnt_data = NULL; } return (error); }