Esempio n. 1
0
        /* object has an underlying thing to poll */
        if (realfp->f_op && realfp->f_op->poll) {
            mask = (*realfp->f_op->poll)(realfp, pt_p);
        }
    }
    return mask;
}

#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,36)
long
vnode_fop_ioctl(
    FILE_T *file_p,
    uint cmd,
    ulong arg
)
#else /* LINUX_VERSION_CODE > KERNEL_VERSION(2,6,36) */
int
vnode_fop_ioctl(
    INODE_T *ino_p,
    FILE_T *file_p,
    uint cmd,
    ulong arg
)
#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2,6,36) */
{
    int err;
    int rval;                           /* unused */
    CALL_DATA_T cd;
    struct ioctl_ctx ctx;

    ASSERT_KERNEL_LOCKED();
    mdki_linux_init_call_data(&cd);
    ctx.filp = file_p;
    ctx.caller_is_32bit = 0;            /* unknown as of yet */
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,36)
    err = VOP_IOCTL(ITOV(file_p->f_path.dentry->d_inode), cmd, (void *)arg, 0, &cd, &rval, NULL, &ctx);
#else
    err = VOP_IOCTL(ITOV(ino_p), cmd, (void *)arg, 0, &cd, &rval, NULL, &ctx);
#endif
    err = mdki_errno_unix_to_linux(err);
    mdki_linux_destroy_call_data(&cd);
    return err;
}
Esempio n. 2
0
static int
unionfs_ioctl(void *v)
{
	struct vop_ioctl_args *ap = v;
	int error;
	struct unionfs_node *unp;
	struct unionfs_node_status *unsp;
	struct vnode   *ovp;

	UNIONFS_INTERNAL_DEBUG("unionfs_ioctl: enter\n");

 	vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY);
	unp = VTOUNIONFS(ap->a_vp);
	unionfs_get_node_status(unp, &unsp);
	ovp = (unsp->uns_upper_opencnt ? unp->un_uppervp : unp->un_lowervp);
	unionfs_tryrem_node_status(unp, unsp);
	VOP_UNLOCK(ap->a_vp);

	if (ovp == NULLVP)
		return (EBADF);

	error = VOP_IOCTL(ovp, ap->a_command, ap->a_data, ap->a_fflag,
	    ap->a_cred);

	UNIONFS_INTERNAL_DEBUG("unionfs_ioctl: lease (%d)\n", error);

	return (error);
}
Esempio n. 3
0
int
wapbl_getdisksize(struct vnode *vp, uint64_t *numsecp, unsigned int *secsizep)
{
	struct partinfo dpart;
	unsigned int secsize;
	uint64_t numsec;
	int error;

	error = VOP_IOCTL(vp, DIOCGPART, &dpart, FREAD, NOCRED);
	if (error == 0) {
		secsize = dpart.disklab->d_secsize;
		numsec  = dpart.part->p_size;
	}

	if (error == 0 &&
	    (secsize == 0 || secsize > MAXBSIZE || !powerof2(secsize) ||
	     numsec == 0)) {
#ifdef DIAGNOSTIC
		printf("%s: vnode %p returns invalid disksize values"
		    " (secsize = %u, numsec = %llu)\n", __func__, vp,
		    secsize, numsec);
#endif
		error = EINVAL;
	}
	if (error == 0) {
		*secsizep = secsize;
		*numsecp  = numsec;
	}

	return error;
}
static void
udf_discstrat_finish_seq(struct udf_strat_args *args)
{
	struct udf_mount *ump = args->ump;
	struct strat_private *priv = PRIV(ump);
	int error;

	if (ump == NULL)
		return;

	/* stop our sheduling thread */
	KASSERT(priv->run_thread == 1);
	priv->run_thread = 0;
	wakeup(priv->queue_lwp);
	do {
		error = tsleep(&priv->run_thread, PRIBIO+1,
			"udfshedfin", hz);
	} while (error);
	/* kthread should be finished now */

	/* set back old device strategy method */
	VOP_IOCTL(ump->devvp, DIOCSSTRATEGY, &priv->old_strategy_setting,
			FWRITE, NOCRED);

	/* destroy our pool */
	pool_destroy(&priv->desc_pool);

	mutex_destroy(&priv->discstrat_mutex);
	cv_destroy(&priv->discstrat_cv);

	/* free our private space */
	free(ump->strategy_private, M_UDFTEMP);
	ump->strategy_private = NULL;
}
Esempio n. 5
0
static int
cgdioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
{
	struct	cgd_softc *cs;
	struct	dk_softc *dksc;
	int	part = DISKPART(dev);
	int	pmask = 1 << part;

	DPRINTF_FOLLOW(("cgdioctl(0x%"PRIx64", %ld, %p, %d, %p)\n",
	    dev, cmd, data, flag, l));

	switch (cmd) {
	case CGDIOCGET:
		return cgd_ioctl_get(dev, data, l);
	case CGDIOCSET:
	case CGDIOCCLR:
		if ((flag & FWRITE) == 0)
			return EBADF;
		/* FALLTHROUGH */
	default:
		GETCGD_SOFTC(cs, dev);
		dksc = &cs->sc_dksc;
		break;
	}

	switch (cmd) {
	case CGDIOCSET:
		if (DK_ATTACHED(dksc))
			return EBUSY;
		return cgd_ioctl_set(cs, data, l);
	case CGDIOCCLR:
		if (DK_BUSY(&cs->sc_dksc, pmask))
			return EBUSY;
		return cgd_ioctl_clr(cs, l);
	case DIOCCACHESYNC:
		/*
		 * XXX Do we really need to care about having a writable
		 * file descriptor here?
		 */
		if ((flag & FWRITE) == 0)
			return (EBADF);

		/*
		 * We pass this call down to the underlying disk.
		 */
		return VOP_IOCTL(cs->sc_tvn, cmd, data, flag, l->l_cred);
	case DIOCGSTRATEGY:
	case DIOCSSTRATEGY:
		if (!DK_ATTACHED(dksc))
			return ENOENT;
		/*FALLTHROUGH*/
	default:
		return dk_ioctl(dksc, dev, cmd, data, flag, l);
	case CGDIOCGET:
		KASSERT(0);
		return EINVAL;
	}
}
/*
 * Sync underlying disk caches.
 */
int
dm_target_snapshot_orig_sync(dm_table_entry_t * table_en)
{
	int cmd;
	dm_target_snapshot_origin_config_t *tsoc;

	tsoc = table_en->target_config;

	cmd = 1;

	return VOP_IOCTL(tsoc->tsoc_real_dev->pdev_vnode,  DIOCCACHESYNC, &cmd, FREAD|FWRITE, kauth_cred_get());
}
Esempio n. 7
0
STATIC void
linvfs_unfreeze_fs(
	struct super_block	*sb)
{
	vfs_t			*vfsp = LINVFS_GET_VFS(sb);
	vnode_t			*vp;
	int			error;

	VFS_ROOT(vfsp, &vp, error);
	VOP_IOCTL(vp, LINVFS_GET_IP(vp), NULL, 0, XFS_IOC_THAW, 0, error);
	VN_RELE(vp);
}
Esempio n. 8
0
/*
 * File table vnode ioctl routine.
 */
int
vn_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
{
	struct vnode *vp = ((struct vnode *)fp->f_data);
	struct vattr vattr;
	int error;

	switch (vp->v_type) {

	case VREG:
	case VDIR:
		if (com == FIONREAD) {
			error = VOP_GETATTR(vp, &vattr, p->p_ucred, p);
			if (error)
				return (error);
			*(int *)data = vattr.va_size - fp->f_offset;
			return (0);
		}
		if (com == FIBMAP)
			return VOP_IOCTL(vp, com, data, fp->f_flag,
					 p->p_ucred, p);
		if (com == FIONBIO || com == FIOASYNC)  /* XXX */
			return (0);			/* XXX */
		/* FALLTHROUGH */
	default:
		return (ENOTTY);
		
	case VFIFO:
	case VCHR:
	case VBLK:
		error = VOP_IOCTL(vp, com, data, fp->f_flag, p->p_ucred, p);
		if (error == 0 && com == TIOCSCTTY) {
			if (p->p_session->s_ttyvp)
				vrele(p->p_session->s_ttyvp);
			p->p_session->s_ttyvp = vp;
			VREF(vp);
		}
		return (error);
	}
}
Esempio n. 9
0
/*
 * Helper function for findroot():
 * Return non-zero if disk device matches bootinfo.
 */
static int
match_bootdisk(device_t dv, struct btinfo_bootdisk *bid)
{
	struct vnode *tmpvn;
	int error;
	struct disklabel label;
	int found = 0;

	if (device_is_a(dv, "dk")) {
		DPRINTF(("%s: dk %s\n", __func__, device_xname(dv)));
		return 0;
	}

	/*
	 * A disklabel is required here.  The boot loader doesn't refuse
	 * to boot from a disk without a label, but this is normally not
	 * wanted.
	 */
	if (bid->labelsector == -1) {
		DPRINTF(("%s: no label %s\n", __func__, device_xname(dv)));
		return 0;
	}
	
	if ((tmpvn = opendisk(dv)) == NULL) {
		DPRINTF(("%s: can't open %s\n", __func__, device_xname(dv)));
		return 0;
	}

	error = VOP_IOCTL(tmpvn, DIOCGDINFO, &label, FREAD, NOCRED);
	if (error) {
		/*
		 * XXX Can't happen -- open() would have errored out
		 * or faked one up.
		 */
		printf("%s: can't get label for dev %s (%d)\n", __func__,
		    device_xname(dv), error);
		goto closeout;
	}

	/* Compare with our data. */
	if (label.d_type == bid->label.type &&
	    label.d_checksum == bid->label.checksum &&
	    strncmp(label.d_packname, bid->label.packname, 16) == 0)
		found = 1;

	DPRINTF(("%s: %s found=%d\n", __func__, device_xname(dv), found));
 closeout:
	VOP_CLOSE(tmpvn, FREAD, NOCRED);
	vput(tmpvn);
	return (found);
}
Esempio n. 10
0
STATIC void
linvfs_freeze_fs(
	struct super_block	*sb)
{
	vfs_t			*vfsp = LINVFS_GET_VFS(sb);
	vnode_t			*vp;
	int			error;

	if (sb->s_flags & MS_RDONLY)
		return;
	VFS_ROOT(vfsp, &vp, error);
	VOP_IOCTL(vp, LINVFS_GET_IP(vp), NULL, 0, XFS_IOC_FREEZE, 0, error);
	VN_RELE(vp);
}
Esempio n. 11
0
int
RUMP_VOP_IOCTL(struct vnode *vp,
    u_long command,
    void *data,
    int fflag,
    struct kauth_cred *cred)
{
	int error;

	rump_schedule();
	error = VOP_IOCTL(vp, command, data, fflag, cred);
	rump_unschedule();

	return error;
}
Esempio n. 12
0
/*ARGSUSED*/
int
cttyioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
{
    struct vnode *ttyvp = cttyvp(p);

    if (ttyvp == NULL)
        return (EIO);
    if (cmd == TIOCSCTTY)		/* XXX */
        return (EINVAL);
    if (cmd == TIOCNOTTY) {
        if (!SESS_LEADER(p)) {
            atomic_clearbits_int(&p->p_flag, P_CONTROLT);
            return (0);
        } else
            return (EINVAL);
    }
    return (VOP_IOCTL(ttyvp, cmd, addr, flag, NOCRED, p));
}
Esempio n. 13
0
/*ARGSUSED*/
static	int
cttyioctl(struct dev_ioctl_args *ap)
{
	struct vnode *ttyvp;
	struct proc *p = curproc;

	KKASSERT(p);
	lwkt_gettoken(&p->p_token);
	lwkt_gettoken(&proc_token);
	ttyvp = cttyvp(p);
	if (ttyvp == NULL) {
		lwkt_reltoken(&proc_token);
		lwkt_reltoken(&p->p_token);
		return (EIO);
	}
	/*
	 * Don't allow controlling tty to be set to the controlling tty
	 * (infinite recursion).
	 */
	if (ap->a_cmd == TIOCSCTTY) {
		lwkt_reltoken(&proc_token);
		lwkt_reltoken(&p->p_token);
		return EINVAL;
	}
	if (ap->a_cmd == TIOCNOTTY) {
		if (!SESS_LEADER(p)) {
			p->p_flags &= ~P_CONTROLT;
			lwkt_reltoken(&proc_token);
			lwkt_reltoken(&p->p_token);
			return (0);
		} else {
			lwkt_reltoken(&proc_token);
			lwkt_reltoken(&p->p_token);
			return (EINVAL);
		}
	}
	lwkt_reltoken(&proc_token);
	lwkt_reltoken(&p->p_token);

	return (VOP_IOCTL(ttyvp, ap->a_cmd, ap->a_data, ap->a_fflag,
			  ap->a_cred, ap->a_sysmsg));
}
Esempio n. 14
0
STATIC long
xfs_file_ioctl_invis(
	struct file	*filp,
	unsigned int	cmd,
	unsigned long	arg)
{
	struct inode	*inode = filp->f_dentry->d_inode;
	vnode_t		*vp = vn_from_inode(inode);
	int		error;

	VOP_IOCTL(vp, inode, filp, IO_INVIS, cmd, (void __user *)arg, error);
	VMODIFY(vp);

	/* NOTE:  some of the ioctl's return positive #'s as a
	 *	  byte count indicating success, such as
	 *	  readlink_by_handle.  So we don't "sign flip"
	 *	  like most other routines.  This means true
	 *	  errors need to be returned as a negative value.
	 */
	return error;
}
Esempio n. 15
0
STATIC int linvfs_ioctl(
	struct inode	*inode,
	struct file	*filp,
	unsigned int	cmd,
	unsigned long	arg)
{
	int	error;
	vnode_t	*vp = LINVFS_GET_VP(inode);


	ASSERT(vp);

	VOP_IOCTL(vp, inode, filp, cmd, arg, error);

	/* NOTE:  some of the ioctl's return positive #'s as a
	 *	  byte count indicating success, such as
	 *	  readlink_by_handle.  So we don't "sign flip"
	 *	  like most other routines.  This means true
	 *	  errors need to be returned as a negative value.
	 */
	return error;
}
Esempio n. 16
0
/*ARGSUSED*/
static int
cttyioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
{
	struct vnode *ttyvp = cttyvp(l->l_proc);
	int rv;

	if (ttyvp == NULL)
		return (EIO);
	if (cmd == TIOCSCTTY)		/* XXX */
		return (EINVAL);
	if (cmd == TIOCNOTTY) {
		mutex_enter(proc_lock);
		if (!SESS_LEADER(l->l_proc)) {
			l->l_proc->p_lflag &= ~PL_CONTROLT;
			rv = 0;
		} else
			rv = EINVAL;
		mutex_exit(proc_lock);
		return (rv);
	}
	return (VOP_IOCTL(ttyvp, cmd, addr, flag, NOCRED));
}
Esempio n. 17
0
/*
 * Common code for mount and mountroot
 */
static int
iso_mountfs(struct vnode *devvp, struct mount *mp, struct lwp *l,
	struct iso_args *argp)
{
	struct iso_mnt *isomp = (struct iso_mnt *)0;
	struct buf *bp = NULL, *pribp = NULL, *supbp = NULL;
	dev_t dev = devvp->v_rdev;
	int error = EINVAL;
	int ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
	int iso_bsize;
	int iso_blknum;
	int joliet_level;
	struct iso_volume_descriptor *vdp;
	struct iso_supplementary_descriptor *sup;
	int sess = 0;
	int ext_attr_length;
	struct disklabel label;

	if (!ronly)
		return EROFS;

	/* Flush out any old buffers remaining from a previous use. */
	if ((error = vinvalbuf(devvp, V_SAVE, l->l_cred, l, 0, 0)) != 0)
		return (error);

	/* This is the "logical sector size".  The standard says this
	 * should be 2048 or the physical sector size on the device,
	 * whichever is greater.  For now, we'll just use a constant.
	 */
	iso_bsize = ISO_DEFAULT_BLOCK_SIZE;

	error = VOP_IOCTL(devvp, DIOCGDINFO, &label, FREAD, FSCRED);
	if (!error) {
		/* XXX more sanity checks? */
		sess = label.d_partitions[DISKPART(dev)].p_cdsession;
	} else {
		/* fallback to old method */
		error = VOP_IOCTL(devvp, CDIOREADMSADDR, &sess, 0, FSCRED);
		if (error)
			sess = 0;	/* never mind */
	}
#ifdef ISO_DEBUG
	printf("isofs: session offset (part %"PRId32") %d\n", DISKPART(dev), sess);
#endif

	for (iso_blknum = 16; iso_blknum < 100; iso_blknum++) {
		if ((error = bread(devvp, (iso_blknum+sess) * btodb(iso_bsize),
				   iso_bsize, NOCRED, 0, &bp)) != 0)
			goto out;

		vdp = (struct iso_volume_descriptor *)bp->b_data;
		if (memcmp(vdp->id, ISO_STANDARD_ID, sizeof(vdp->id)) != 0) {
			error = EINVAL;
			goto out;
		}

		switch (isonum_711(vdp->type)) {
		case ISO_VD_PRIMARY:
			if (pribp == NULL) {
				pribp = bp;
				bp = NULL;
			}
			break;

		case ISO_VD_SUPPLEMENTARY:
			if (supbp == NULL) {
				supbp = bp;
				bp = NULL;
			}
			break;

		default:
			break;
		}

		if (isonum_711 (vdp->type) == ISO_VD_END) {
			brelse(bp, 0);
			bp = NULL;
			break;
		}

		if (bp != NULL) {
			brelse(bp, 0);
			bp = NULL;
		}
	}

	if (pribp == NULL) {
		error = EINVAL;
		goto out;
	}

	isomp = malloc(sizeof *isomp, M_ISOFSMNT, M_WAITOK);
	memset(isomp, 0, sizeof *isomp);
	if (iso_makemp(isomp, pribp, &ext_attr_length) == -1) {
		error = EINVAL;
		goto out;
	}

	isomp->volume_space_size += sess;

	brelse(pribp, BC_AGE);
	pribp = NULL;

	mp->mnt_data = isomp;
	mp->mnt_stat.f_fsidx.__fsid_val[0] = (long)dev;
	mp->mnt_stat.f_fsidx.__fsid_val[1] = makefstype(MOUNT_CD9660);
	mp->mnt_stat.f_fsid = mp->mnt_stat.f_fsidx.__fsid_val[0];
	mp->mnt_stat.f_namemax = ISO_MAXNAMLEN;
	mp->mnt_flag |= MNT_LOCAL;
	mp->mnt_iflag |= IMNT_MPSAFE;
	mp->mnt_dev_bshift = iso_bsize;
	mp->mnt_fs_bshift = isomp->im_bshift;
	isomp->im_mountp = mp;
	isomp->im_dev = dev;
	isomp->im_devvp = devvp;

	/* Check the Rock Ridge Extension support */
	if (!(argp->flags & ISOFSMNT_NORRIP)) {
		struct iso_directory_record *rootp;

		if ((error = bread(isomp->im_devvp,
				   (isomp->root_extent + ext_attr_length) <<
				   (isomp->im_bshift - DEV_BSHIFT),
				   isomp->logical_block_size, NOCRED,
				   0, &bp)) != 0)
		    goto out;

		rootp = (struct iso_directory_record *)bp->b_data;

		if ((isomp->rr_skip = cd9660_rrip_offset(rootp,isomp)) < 0) {
		    argp->flags  |= ISOFSMNT_NORRIP;
		} else {
		    argp->flags  &= ~ISOFSMNT_GENS;
		}

		/*
		 * The contents are valid,
		 * but they will get reread as part of another vnode, so...
		 */
		brelse(bp, BC_AGE);
		bp = NULL;
	}
	isomp->im_flags = argp->flags & (ISOFSMNT_NORRIP | ISOFSMNT_GENS |
		 ISOFSMNT_EXTATT | ISOFSMNT_NOJOLIET | ISOFSMNT_RRCASEINS);

	if (isomp->im_flags & ISOFSMNT_GENS)
		isomp->iso_ftype = ISO_FTYPE_9660;
	else if (isomp->im_flags & ISOFSMNT_NORRIP) {
		isomp->iso_ftype = ISO_FTYPE_DEFAULT;
		if (argp->flags & ISOFSMNT_NOCASETRANS)
			isomp->im_flags |= ISOFSMNT_NOCASETRANS;
	} else
		isomp->iso_ftype = ISO_FTYPE_RRIP;

	/* Check the Joliet Extension support */
	if ((argp->flags & ISOFSMNT_NORRIP) != 0 &&
	    (argp->flags & ISOFSMNT_NOJOLIET) == 0 &&
	    supbp != NULL) {
		joliet_level = 0;
		sup = (struct iso_supplementary_descriptor *)supbp->b_data;

		if ((isonum_711(sup->flags) & 1) == 0) {
			if (memcmp(sup->escape, "%/@", 3) == 0)
				joliet_level = 1;
			if (memcmp(sup->escape, "%/C", 3) == 0)
				joliet_level = 2;
			if (memcmp(sup->escape, "%/E", 3) == 0)
				joliet_level = 3;
		}
		if (joliet_level != 0) {
			if (iso_makemp(isomp, supbp, NULL) == -1) {
				error = EINVAL;
				goto out;
			}
			isomp->im_joliet_level = joliet_level;
		}
	}

	if (supbp != NULL) {
		brelse(supbp, 0);
		supbp = NULL;
	}

	spec_node_setmountedfs(devvp, mp);

	return 0;
out:
	if (bp)
		brelse(bp, 0);
	if (pribp)
		brelse(pribp, 0);
	if (supbp)
		brelse(supbp, 0);
	if (isomp) {
		free(isomp, M_ISOFSMNT);
		mp->mnt_data = NULL;
	}
	return error;
}
Esempio n. 18
0
/* Configure a single disk in the array. */
int
rf_ConfigureDisk(RF_Raid_t *raidPtr, char *buf, RF_RaidDisk_t *diskPtr,
    RF_RowCol_t row, RF_RowCol_t col)
{
	char *p;
	int retcode;

	struct partinfo dpart;
	struct vnode *vp;
	struct vattr va;
	struct proc *proc;
	int error;

	retcode = 0;
	p = rf_find_non_white(buf);
	if (*buf != '\0' && p[strlen(p) - 1] == '\n') {
		/* Strip off the newline. */
		p[strlen(p) - 1] = '\0';
	}
	(void) strlcpy(diskPtr->devname, p, sizeof diskPtr->devname);

	proc = raidPtr->engine_thread;

	/* Let's start by claiming the component is fine and well... */
	diskPtr->status = rf_ds_optimal;

	raidPtr->raid_cinfo[row][col].ci_vp = NULL;
	raidPtr->raid_cinfo[row][col].ci_dev = 0;

	error = raidlookup(diskPtr->devname, curproc, &vp);
	if (error) {
		printf("raidlookup on device: %s failed !\n", diskPtr->devname);
		if (error == ENXIO) {
			/* The component isn't there...  Must be dead :-( */
			diskPtr->status = rf_ds_failed;
		} else {
			return (error);
		}
	}
	if (diskPtr->status == rf_ds_optimal) {

		if ((error = VOP_GETATTR(vp, &va, proc->p_ucred, proc)) != 0) {
			return (error);
		}
		error = VOP_IOCTL(vp, DIOCGPART, (caddr_t) & dpart, FREAD,
		    proc->p_ucred, proc);
		if (error) {
			return (error);
		}
		diskPtr->blockSize = dpart.disklab->d_secsize;

		diskPtr->numBlocks = DL_GETPSIZE(dpart.part) - rf_protectedSectors;
 		diskPtr->partitionSize = DL_GETPSIZE(dpart.part);

		raidPtr->raid_cinfo[row][col].ci_vp = vp;
		raidPtr->raid_cinfo[row][col].ci_dev = va.va_rdev;

 		/* This component was not automatically configured. */
 		diskPtr->auto_configured = 0;
		diskPtr->dev = va.va_rdev;

		/*
		 * We allow the user to specify that only a fraction of the
		 * disks should be used. This is just for debug: it speeds up
		 * the parity scan.
		 */
		diskPtr->numBlocks = diskPtr->numBlocks * rf_sizePercentage
		    / 100;
	}
	return (0);
}
Esempio n. 19
0
/* Do a complete copyback. */
void
rf_CopybackReconstructedData(RF_Raid_t *raidPtr)
{
	RF_ComponentLabel_t c_label;
	int done, retcode;
	RF_CopybackDesc_t *desc;
	RF_RowCol_t frow, fcol;
	RF_RaidDisk_t *badDisk;
	char *databuf;

	struct partinfo dpart;
	struct vnode *vp;
	struct vattr va;
	struct proc *proc;

	int ac;

	done = 0;
	fcol = 0;
	for (frow = 0; frow < raidPtr->numRow; frow++) {
		for (fcol = 0; fcol < raidPtr->numCol; fcol++) {
			if (raidPtr->Disks[frow][fcol].status ==
			     rf_ds_dist_spared ||
			    raidPtr->Disks[frow][fcol].status ==
			     rf_ds_spared) {
				done = 1;
				break;
			}
		}
		if (done)
			break;
	}

	if (frow == raidPtr->numRow) {
		printf("COPYBACK: No disks need copyback.\n");
		return;
	}
	badDisk = &raidPtr->Disks[frow][fcol];

	proc = raidPtr->engine_thread;

	/*
	 * This device may have been opened successfully the first time.
	 * Close it before trying to open it again.
	 */

	if (raidPtr->raid_cinfo[frow][fcol].ci_vp != NULL) {
		printf("Close the opened device: %s.\n",
		    raidPtr->Disks[frow][fcol].devname);
 		vp = raidPtr->raid_cinfo[frow][fcol].ci_vp;
 		ac = raidPtr->Disks[frow][fcol].auto_configured;
 		rf_close_component(raidPtr, vp, ac);
		raidPtr->raid_cinfo[frow][fcol].ci_vp = NULL;

	}
 	/* Note that this disk was *not* auto_configured (any longer). */
 	raidPtr->Disks[frow][fcol].auto_configured = 0;

	printf("About to (re-)open the device: %s.\n",
	    raidPtr->Disks[frow][fcol].devname);

	retcode = raidlookup(raidPtr->Disks[frow][fcol].devname, proc, &vp);

	if (retcode) {
		printf("COPYBACK: raidlookup on device: %s failed: %d !\n",
		    raidPtr->Disks[frow][fcol].devname, retcode);

		/*
		 * XXX The component isn't responding properly... Must be
		 * still dead :-(
		 */
		return;

	} else {

		/*
		 * Ok, so we can at least do a lookup...
		 * How about actually getting a vp for it ?
		 */

		if ((retcode = VOP_GETATTR(vp, &va, proc->p_ucred, proc)) != 0)
		{
			return;
		}
		retcode = VOP_IOCTL(vp, DIOCGPART, (caddr_t) &dpart, FREAD,
		    proc->p_ucred, proc);
		if (retcode) {
			return;
		}
		raidPtr->Disks[frow][fcol].blockSize = dpart.disklab->d_secsize;

		raidPtr->Disks[frow][fcol].numBlocks = dpart.part->p_size -
		    rf_protectedSectors;

		raidPtr->raid_cinfo[frow][fcol].ci_vp = vp;
		raidPtr->raid_cinfo[frow][fcol].ci_dev = va.va_rdev;

		/* XXX Or the above ? */
		raidPtr->Disks[frow][fcol].dev = va.va_rdev;

		/*
		 * We allow the user to specify that only a fraction of the
		 * disks should be used this is just for debug: it speeds up
		 * the parity scan.
		 */
		raidPtr->Disks[frow][fcol].numBlocks =
		    raidPtr->Disks[frow][fcol].numBlocks *
		    rf_sizePercentage / 100;
	}
#if 0
	/* This is the way it was done before the CAM stuff was removed. */

	if (rf_extract_ids(badDisk->devname, &bus, &targ, &lun)) {
		printf("COPYBACK: unable to extract bus, target, lun from"
		    " devname %s.\n", badDisk->devname);
		return;
	}
	/*
	 * TUR the disk that's marked as bad to be sure that it's actually
	 * alive.
	 */
	rf_SCSI_AllocTUR(&tur_op);
	retcode = rf_SCSI_DoTUR(tur_op, bus, targ, lun, badDisk->dev);
	rf_SCSI_FreeDiskOp(tur_op, 0);
#endif

	if (retcode) {
		printf("COPYBACK: target disk failed TUR.\n");
		return;
	}
	/* Get a buffer to hold one SU. */
	RF_Malloc(databuf, rf_RaidAddressToByte(raidPtr,
	    raidPtr->Layout.sectorsPerStripeUnit), (char *));

	/* Create a descriptor. */
	RF_Malloc(desc, sizeof(*desc), (RF_CopybackDesc_t *));
	desc->raidPtr = raidPtr;
	desc->status = 0;
	desc->frow = frow;
	desc->fcol = fcol;
	desc->spRow = badDisk->spareRow;
	desc->spCol = badDisk->spareCol;
	desc->stripeAddr = 0;
	desc->sectPerSU = raidPtr->Layout.sectorsPerStripeUnit;
	desc->sectPerStripe = raidPtr->Layout.sectorsPerStripeUnit *
	    raidPtr->Layout.numDataCol;
	desc->databuf = databuf;
	desc->mcpair = rf_AllocMCPair();

	printf("COPYBACK: Quiescing the array.\n");
	/*
	 * Quiesce the array, since we don't want to code support for user
	 * accs here.
	 */
	rf_SuspendNewRequestsAndWait(raidPtr);

	/* Adjust state of the array and of the disks. */
	RF_LOCK_MUTEX(raidPtr->mutex);
	raidPtr->Disks[desc->frow][desc->fcol].status = rf_ds_optimal;
	raidPtr->status[desc->frow] = rf_rs_optimal;
	rf_copyback_in_progress = 1;	/* Debug only. */
	RF_UNLOCK_MUTEX(raidPtr->mutex);

	printf("COPYBACK: Beginning\n");
	RF_GETTIME(desc->starttime);
	rf_ContinueCopyback(desc);

	/*
	 * Data has been restored.
	 * Fix up the component label.
	 * Don't actually need the read here.
	 */
	raidread_component_label(raidPtr->raid_cinfo[frow][fcol].ci_dev,
				 raidPtr->raid_cinfo[frow][fcol].ci_vp,
				 &c_label);

	raid_init_component_label(raidPtr, &c_label);

	c_label.row = frow;
	c_label.column = fcol;

	raidwrite_component_label(raidPtr->raid_cinfo[frow][fcol].ci_dev,
				  raidPtr->raid_cinfo[frow][fcol].ci_vp,
				  &c_label);
}
Esempio n. 20
0
/*
 * Common code for mount and mountroot
 */
int
ffs_mountfs(struct vnode *devvp, struct mount *mp, struct proc *p)
{
	struct ufsmount *ump;
	struct buf *bp;
	struct fs *fs;
	dev_t dev;
	struct partinfo dpart;
	caddr_t space;
	ufs2_daddr_t sbloc;
	int error, i, blks, size, ronly;
	int32_t *lp;
	size_t strsize;
	struct ucred *cred;
	u_int64_t maxfilesize;					/* XXX */

	dev = devvp->v_rdev;
	cred = p ? p->p_ucred : NOCRED;
	/*
	 * Disallow multiple mounts of the same device.
	 * Disallow mounting of a device that is currently in use
	 * (except for root, which might share swap device for miniroot).
	 * Flush out any old buffers remaining from a previous use.
	 */
	if ((error = vfs_mountedon(devvp)) != 0)
		return (error);
	if (vcount(devvp) > 1 && devvp != rootvp)
		return (EBUSY);
	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
	error = vinvalbuf(devvp, V_SAVE, cred, p, 0, 0);
	VOP_UNLOCK(devvp, 0, p);
	if (error)
		return (error);

	ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
	error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, p);
	if (error)
		return (error);
	if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, cred, p) != 0)
		size = DEV_BSIZE;
	else
		size = dpart.disklab->d_secsize;

	bp = NULL;
	ump = NULL;

	/*
	 * Try reading the super-block in each of its possible locations.
	 */
	for (i = 0; sbtry[i] != -1; i++) {
		if (bp != NULL) {
			bp->b_flags |= B_NOCACHE;
			brelse(bp);
			bp = NULL;
		}

		error = bread(devvp, sbtry[i] / size, SBSIZE, cred, &bp);
		if (error)
			goto out;

		fs = (struct fs *) bp->b_data;
		sbloc = sbtry[i];

#if 0
		if (fs->fs_magic == FS_UFS2_MAGIC) {
			printf("ffs_mountfs(): Sorry, no UFS2 support (yet)\n");
			error = EFTYPE;
			goto out;
		}
#endif

		/*
		 * Do not look for an FFS1 file system at SBLOCK_UFS2. Doing so
		 * will find the wrong super-block for file systems with 64k
		 * block size.
		 */
		if (fs->fs_magic == FS_UFS1_MAGIC && sbloc == SBLOCK_UFS2)
			continue;

		if (ffs_validate(fs))
			break; /* Super block validated */
	}

	if (sbtry[i] == -1) {
		error = EINVAL;
		goto out;
	}

	fs->fs_fmod = 0;
	fs->fs_flags &= ~FS_UNCLEAN;
	if (fs->fs_clean == 0) {
		fs->fs_flags |= FS_UNCLEAN;
#if 0
		/*
		 * It is safe mount unclean file system
		 * if it was previously mounted with softdep
		 * but we may loss space and must
		 * sometimes run fsck manually.
		 */
		if (fs->fs_flags & FS_DOSOFTDEP)
			printf(
"WARNING: %s was not properly unmounted\n",
			    fs->fs_fsmnt);
		else
#endif
		if (ronly || (mp->mnt_flag & MNT_FORCE)) {
			printf(
"WARNING: %s was not properly unmounted\n",
			    fs->fs_fsmnt);
		} else {
			printf(
"WARNING: R/W mount of %s denied.  Filesystem is not clean - run fsck\n",
			    fs->fs_fsmnt);
			error = EROFS;
			goto out;
		}
	}

	if (fs->fs_postblformat == FS_42POSTBLFMT && !ronly) {
#ifndef SMALL_KERNEL
		printf("ffs_mountfs(): obsolete rotational table format, "
		    "please use fsck_ffs(8) -c 1\n");
#endif
		error = EFTYPE;
		goto out;
	}

	ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK);
	bzero(ump, sizeof *ump);
	ump->um_fs = malloc((u_long)fs->fs_sbsize, M_UFSMNT,
	    M_WAITOK);

	if (fs->fs_magic == FS_UFS1_MAGIC)
		ump->um_fstype = UM_UFS1;
#ifdef FFS2
	else
		ump->um_fstype = UM_UFS2;
#endif

	bcopy(bp->b_data, ump->um_fs, (u_int)fs->fs_sbsize);
	if (fs->fs_sbsize < SBSIZE)
		bp->b_flags |= B_INVAL;
	brelse(bp);
	bp = NULL;
	fs = ump->um_fs;

	ffs1_compat_read(fs, ump, sbloc);

	fs->fs_ronly = ronly;
	size = fs->fs_cssize;
	blks = howmany(size, fs->fs_fsize);
	if (fs->fs_contigsumsize > 0)
		size += fs->fs_ncg * sizeof(int32_t);
	space = malloc((u_long)size, M_UFSMNT, M_WAITOK);
	fs->fs_csp = (struct csum *)space;
	for (i = 0; i < blks; i += fs->fs_frag) {
		size = fs->fs_bsize;
		if (i + fs->fs_frag > blks)
			size = (blks - i) * fs->fs_fsize;
		error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
			      cred, &bp);
		if (error) {
			free(fs->fs_csp, M_UFSMNT);
			goto out;
		}
		bcopy(bp->b_data, space, (u_int)size);
		space += size;
		brelse(bp);
		bp = NULL;
	}
	if (fs->fs_contigsumsize > 0) {
		fs->fs_maxcluster = lp = (int32_t *)space;
		for (i = 0; i < fs->fs_ncg; i++)
			*lp++ = fs->fs_contigsumsize;
	}
	mp->mnt_data = (qaddr_t)ump;
	mp->mnt_stat.f_fsid.val[0] = (long)dev;
	/* Use on-disk fsid if it exists, else fake it */
	if (fs->fs_id[0] != 0 && fs->fs_id[1] != 0)
		mp->mnt_stat.f_fsid.val[1] = fs->fs_id[1];
	else
		mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum;
	mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
	mp->mnt_flag |= MNT_LOCAL;
	ump->um_mountp = mp;
	ump->um_dev = dev;
	ump->um_devvp = devvp;
	ump->um_nindir = fs->fs_nindir;
	ump->um_bptrtodb = fs->fs_fsbtodb;
	ump->um_seqinc = fs->fs_frag;
	for (i = 0; i < MAXQUOTAS; i++)
		ump->um_quotas[i] = NULLVP;

	devvp->v_specmountpoint = mp;
	ffs_oldfscompat(fs);

	if (ronly)
		fs->fs_contigdirs = NULL;
	else {
		fs->fs_contigdirs = (u_int8_t*)malloc((u_long)fs->fs_ncg,
						      M_UFSMNT, M_WAITOK);
		bzero(fs->fs_contigdirs, fs->fs_ncg);
	}

	/*
	 * Set FS local "last mounted on" information (NULL pad)
	 */
	copystr(mp->mnt_stat.f_mntonname,	/* mount point*/
		fs->fs_fsmnt,			/* copy area*/
		sizeof(fs->fs_fsmnt) - 1,	/* max size*/
		&strsize);			/* real size*/
	bzero(fs->fs_fsmnt + strsize, sizeof(fs->fs_fsmnt) - strsize);

#if 0
	if( mp->mnt_flag & MNT_ROOTFS) {
		/*
		 * Root mount; update timestamp in mount structure.
		 * this will be used by the common root mount code
		 * to update the system clock.
		 */
		mp->mnt_time = fs->fs_time;
	}
#endif

	/*
	 * XXX
	 * Limit max file size.  Even though ffs can handle files up to 16TB,
	 * we do limit the max file to 2^31 pages to prevent overflow of
	 * a 32-bit unsigned int.  The buffer cache has its own checks but
	 * a little added paranoia never hurts.
	 */
	ump->um_savedmaxfilesize = fs->fs_maxfilesize;		/* XXX */
	maxfilesize = (u_int64_t)0x80000000 * MIN(PAGE_SIZE, fs->fs_bsize) - 1;
	if (fs->fs_maxfilesize > maxfilesize)			/* XXX */
		fs->fs_maxfilesize = maxfilesize;		/* XXX */
	if (ronly == 0) {
		if ((fs->fs_flags & FS_DOSOFTDEP) &&
		    (error = softdep_mount(devvp, mp, fs, cred)) != 0) {
			free(fs->fs_csp, M_UFSMNT);
			free(fs->fs_contigdirs, M_UFSMNT);
			goto out;
		}
		fs->fs_fmod = 1;
		fs->fs_clean = 0;
		if (mp->mnt_flag & MNT_SOFTDEP)
			fs->fs_flags |= FS_DOSOFTDEP;
		else
			fs->fs_flags &= ~FS_DOSOFTDEP;
		(void) ffs_sbupdate(ump, MNT_WAIT);
	}
	return (0);
out:
	devvp->v_specmountpoint = NULL;
	if (bp)
		brelse(bp);
	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
	(void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, cred, p);
	VOP_UNLOCK(devvp, 0, p);
	if (ump) {
		free(ump->um_fs, M_UFSMNT);
		free(ump, M_UFSMNT);
		mp->mnt_data = (qaddr_t)0;
	}
	return (error);
}
Esempio n. 21
0
/*
 * Reload all incore data for a filesystem (used after running fsck on
 * the root filesystem and finding things to fix). The filesystem must
 * be mounted read-only.
 *
 * Things to do to update the mount:
 *	1) invalidate all cached meta-data.
 *	2) re-read superblock from disk.
 *	3) re-read summary information from disk.
 *	4) invalidate all inactive vnodes.
 *	5) invalidate all cached file data.
 *	6) re-read inode data for all active vnodes.
 */
int
ffs_reload(struct mount *mountp, struct ucred *cred, struct proc *p)
{
	struct vnode *devvp;
	caddr_t space;
	struct fs *fs, *newfs;
	struct partinfo dpart;
	int i, blks, size, error;
	int32_t *lp;
	struct buf *bp = NULL;
	struct ffs_reload_args fra;

	if ((mountp->mnt_flag & MNT_RDONLY) == 0)
		return (EINVAL);
	/*
	 * Step 1: invalidate all cached meta-data.
	 */
	devvp = VFSTOUFS(mountp)->um_devvp;
	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
	error = vinvalbuf(devvp, 0, cred, p, 0, 0);
	VOP_UNLOCK(devvp, 0, p);
	if (error)
		panic("ffs_reload: dirty1");

	/*
	 * Step 2: re-read superblock from disk.
	 */
	if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, NOCRED, p) != 0)
		size = DEV_BSIZE;
	else
		size = dpart.disklab->d_secsize;

	error = bread(devvp, (daddr_t)(SBOFF / size), SBSIZE, NOCRED, &bp);
	if (error) {
		brelse(bp);
		return (error);
	}

	newfs = (struct fs *)bp->b_data;
	if (ffs_validate(newfs) == 0) {
		brelse(bp);
		return (EINVAL);
	}
	fs = VFSTOUFS(mountp)->um_fs;
	/*
	 * Copy pointer fields back into superblock before copying in	XXX
	 * new superblock. These should really be in the ufsmount.	XXX
	 * Note that important parameters (eg fs_ncg) are unchanged.
	 */
	newfs->fs_csp = fs->fs_csp;
	newfs->fs_maxcluster = fs->fs_maxcluster;
	newfs->fs_ronly = fs->fs_ronly;
	bcopy(newfs, fs, (u_int)fs->fs_sbsize);
	if (fs->fs_sbsize < SBSIZE)
		bp->b_flags |= B_INVAL;
	brelse(bp);
	mountp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
	ffs1_compat_read(fs, VFSTOUFS(mountp), SBOFF);
	ffs_oldfscompat(fs);
	(void)ffs_statfs(mountp, &mountp->mnt_stat, p);
	/*
	 * Step 3: re-read summary information from disk.
	 */
	blks = howmany(fs->fs_cssize, fs->fs_fsize);
	space = (caddr_t)fs->fs_csp;
	for (i = 0; i < blks; i += fs->fs_frag) {
		size = fs->fs_bsize;
		if (i + fs->fs_frag > blks)
			size = (blks - i) * fs->fs_fsize;
		error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
			      NOCRED, &bp);
		if (error) {
			brelse(bp);
			return (error);
		}
		bcopy(bp->b_data, space, (u_int)size);
		space += size;
		brelse(bp);
	}
	if ((fs->fs_flags & FS_DOSOFTDEP))
		(void) softdep_mount(devvp, mountp, fs, cred);
	/*
	 * We no longer know anything about clusters per cylinder group.
	 */
	if (fs->fs_contigsumsize > 0) {
		lp = fs->fs_maxcluster;
		for (i = 0; i < fs->fs_ncg; i++)
			*lp++ = fs->fs_contigsumsize;
	}

	fra.p = p;
	fra.cred = cred;
	fra.fs = fs;
	fra.devvp = devvp;

	error = vfs_mount_foreach_vnode(mountp, ffs_reload_vnode, &fra);

	return (error);
}
static void
udf_discstrat_init_seq(struct udf_strat_args *args)
{
	struct udf_mount *ump = args->ump;
	struct strat_private *priv = PRIV(ump);
	struct disk_strategy dkstrat;
	uint32_t lb_size;

	KASSERT(ump);
	KASSERT(ump->logical_vol);
	KASSERT(priv == NULL);

	lb_size = udf_rw32(ump->logical_vol->lb_size);
	KASSERT(lb_size > 0);

	/* initialise our memory space */
	ump->strategy_private = malloc(sizeof(struct strat_private),
		M_UDFTEMP, M_WAITOK);
	priv = ump->strategy_private;
	memset(priv, 0 , sizeof(struct strat_private));

	/* initialise locks */
	cv_init(&priv->discstrat_cv, "udfstrat");
	mutex_init(&priv->discstrat_mutex, MUTEX_DEFAULT, IPL_NONE);

	/*
	 * Initialise pool for descriptors associated with nodes. This is done
	 * in lb_size units though currently lb_size is dictated to be
	 * sector_size.
	 */
	pool_init(&priv->desc_pool, lb_size, 0, 0, 0, "udf_desc_pool", NULL,
	    IPL_NONE);

	/*
	 * remember old device strategy method and explicit set method
	 * `discsort' since we have our own more complex strategy that is not
	 * implementable on the CD device and other strategies will get in the
	 * way.
	 */
	memset(&priv->old_strategy_setting, 0,
		sizeof(struct disk_strategy));
	VOP_IOCTL(ump->devvp, DIOCGSTRATEGY, &priv->old_strategy_setting,
		FREAD | FKIOCTL, NOCRED);
	memset(&dkstrat, 0, sizeof(struct disk_strategy));
	strcpy(dkstrat.dks_name, "discsort");
	VOP_IOCTL(ump->devvp, DIOCSSTRATEGY, &dkstrat, FWRITE | FKIOCTL,
		NOCRED);

	/* initialise our internal sheduler */
	priv->cur_queue = UDF_SHED_READING;
	bufq_alloc(&priv->queues[UDF_SHED_READING], "disksort",
		BUFQ_SORT_RAWBLOCK);
	bufq_alloc(&priv->queues[UDF_SHED_WRITING], "disksort",
		BUFQ_SORT_RAWBLOCK);
	bufq_alloc(&priv->queues[UDF_SHED_SEQWRITING], "fcfs", 0);
	vfs_timestamp(&priv->last_queued[UDF_SHED_READING]);
	vfs_timestamp(&priv->last_queued[UDF_SHED_WRITING]);
	vfs_timestamp(&priv->last_queued[UDF_SHED_SEQWRITING]);

	/* create our disk strategy thread */
	priv->run_thread = 1;
	if (kthread_create(PRI_NONE, 0 /* KTHREAD_MPSAFE*/, NULL /* cpu_info*/,
		udf_discstrat_thread, ump, &priv->queue_lwp,
		"%s", "udf_rw")) {
		panic("fork udf_rw");
	}
}
Esempio n. 23
0
int
adosfs_mountfs(struct vnode *devvp, struct mount *mp, struct lwp *l)
{
	struct disklabel dl;
	struct partition *parp;
	struct adosfsmount *amp;
	struct buf *bp;
	struct vnode *rvp;
	size_t bitmap_sz = 0;
	int error, i;
	uint64_t numsecs;
	unsigned secsize;
	unsigned long secsperblk, blksperdisk, resvblks;

	amp = NULL;

	if ((error = vinvalbuf(devvp, V_SAVE, l->l_cred, l, 0, 0)) != 0)
		return (error);

	/*
	 * open blkdev and read boot and root block
	 */
	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
	if ((error = VOP_OPEN(devvp, FREAD, NOCRED)) != 0) {
		VOP_UNLOCK(devvp);
		return (error);
	}

	error = getdisksize(devvp, &numsecs, &secsize);
	if (error)
		goto fail;

	amp = kmem_zalloc(sizeof(struct adosfsmount), KM_SLEEP);

	/*
	 * compute filesystem parameters from disklabel
	 * on arch/amiga the disklabel is computed from the native
	 * partition tables
	 * - p_fsize is the filesystem block size
	 * - p_frag is the number of sectors per filesystem block
	 * - p_cpg is the number of reserved blocks (boot blocks)
	 * - p_psize is reduced by the number of preallocated blocks
	 *           at the end of a partition
	 *
	 * XXX
	 * - bsize and secsperblk could be computed from the first sector
	 *   of the root block
	 * - resvblks (the number of boot blocks) can only be guessed
	 *   by scanning for the root block as its position moves
	 *   with resvblks
	 */
	error = VOP_IOCTL(devvp, DIOCGDINFO, &dl, FREAD, NOCRED);
	VOP_UNLOCK(devvp);
	if (error)
		goto fail;
	parp = &dl.d_partitions[DISKPART(devvp->v_rdev)];
	if (dl.d_type == DTYPE_FLOPPY) {
		amp->bsize = secsize;
		secsperblk = 1;
		resvblks   = 2;
	} else if (parp->p_fsize > 0 && parp->p_frag > 0) {
		amp->bsize = parp->p_fsize * parp->p_frag;
		secsperblk = parp->p_frag;
		resvblks   = parp->p_cpg;
	} else {
		error = EINVAL;
		goto fail;
	}
	blksperdisk = numsecs / secsperblk;


	/* The filesytem variant ('dostype') is stored in the boot block */
	bp = NULL;
	if ((error = bread(devvp, (daddr_t)BBOFF,
			   amp->bsize, NOCRED, 0, &bp)) != 0) {
		goto fail;
	}
	amp->dostype = adoswordn(bp, 0);
	brelse(bp, 0);

	/* basic sanity checks */
	if (amp->dostype < 0x444f5300 || amp->dostype > 0x444f5305) {
		error = EINVAL;
		goto fail;
	}

	amp->rootb = (blksperdisk - 1 + resvblks) / 2;
	amp->numblks = blksperdisk - resvblks;

	amp->nwords = amp->bsize >> 2;
	amp->dbsize = amp->bsize - (IS_FFS(amp) ? 0 : OFS_DATA_OFFSET);
	amp->devvp = devvp;

	amp->mp = mp;
	mp->mnt_data = amp;
	mp->mnt_stat.f_fsidx.__fsid_val[0] = (long)devvp->v_rdev;
	mp->mnt_stat.f_fsidx.__fsid_val[1] = makefstype(MOUNT_ADOSFS);
	mp->mnt_stat.f_fsid = mp->mnt_stat.f_fsidx.__fsid_val[0];
	mp->mnt_stat.f_namemax = ADMAXNAMELEN;
	mp->mnt_fs_bshift = ffs(amp->bsize) - 1;
	mp->mnt_dev_bshift = DEV_BSHIFT;
	mp->mnt_flag |= MNT_LOCAL;

	/*
	 * init anode table.
	 */
	for (i = 0; i < ANODEHASHSZ; i++)
		LIST_INIT(&amp->anodetab[i]);

	/*
	 * get the root anode, if not a valid fs this will fail.
	 */
	if ((error = VFS_ROOT(mp, &rvp)) != 0)
		goto fail;
	/* allocate and load bitmap, set free space */
	bitmap_sz = ((amp->numblks + 31) / 32) * sizeof(*amp->bitmap);
	amp->bitmap = kmem_alloc(bitmap_sz, KM_SLEEP);
	if (amp->bitmap)
		adosfs_loadbitmap(amp);
	if (mp->mnt_flag & MNT_RDONLY && amp->bitmap) {
		/*
		 * Don't need the bitmap any more if it's read-only.
		 */
		kmem_free(amp->bitmap, bitmap_sz);
		amp->bitmap = NULL;
	}
	vput(rvp);

	return(0);

fail:
	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
	(void) VOP_CLOSE(devvp, FREAD, NOCRED);
	VOP_UNLOCK(devvp);
	if (amp && amp->bitmap)
		kmem_free(amp->bitmap, bitmap_sz);
	if (amp)
		kmem_free(amp, sizeof(*amp));
	return (error);
}
Esempio n. 24
0
int
ffs_fsync(void *v)
{
	struct vop_fsync_args /* {
		struct vnode *a_vp;
		kauth_cred_t a_cred;
		int a_flags;
		off_t a_offlo;
		off_t a_offhi;
		struct lwp *a_l;
	} */ *ap = v;
	struct buf *bp;
	int num, error, i;
	struct indir ia[NIADDR + 1];
	int bsize;
	daddr_t blk_high;
	struct vnode *vp;
	struct mount *mp;

	vp = ap->a_vp;
	mp = vp->v_mount;

	fstrans_start(mp, FSTRANS_LAZY);
	if ((ap->a_offlo == 0 && ap->a_offhi == 0) || (vp->v_type != VREG)) {
		error = ffs_full_fsync(vp, ap->a_flags);
		goto out;
	}

	bsize = mp->mnt_stat.f_iosize;
	blk_high = ap->a_offhi / bsize;
	if (ap->a_offhi % bsize != 0)
		blk_high++;

	/*
	 * First, flush all pages in range.
	 */

	mutex_enter(vp->v_interlock);
	error = VOP_PUTPAGES(vp, trunc_page(ap->a_offlo),
	    round_page(ap->a_offhi), PGO_CLEANIT |
	    ((ap->a_flags & FSYNC_WAIT) ? PGO_SYNCIO : 0));
	if (error) {
		goto out;
	}

#ifdef WAPBL
	KASSERT(vp->v_type == VREG);
	if (mp->mnt_wapbl) {
		/*
		 * Don't bother writing out metadata if the syncer is
		 * making the request.  We will let the sync vnode
		 * write it out in a single burst through a call to
		 * VFS_SYNC().
		 */
		if ((ap->a_flags & (FSYNC_DATAONLY | FSYNC_LAZY)) != 0) {
			fstrans_done(mp);
			return 0;
		}
		error = 0;
		if (vp->v_tag == VT_UFS && VTOI(vp)->i_flag &
		    (IN_ACCESS | IN_CHANGE | IN_UPDATE | IN_MODIFY |
				 IN_MODIFIED | IN_ACCESSED)) {
			error = UFS_WAPBL_BEGIN(mp);
			if (error) {
				fstrans_done(mp);
				return error;
			}
			error = ffs_update(vp, NULL, NULL, UPDATE_CLOSE |
			    ((ap->a_flags & FSYNC_WAIT) ? UPDATE_WAIT : 0));
			UFS_WAPBL_END(mp);
		}
		if (error || (ap->a_flags & FSYNC_NOLOG) != 0) {
			fstrans_done(mp);
			return error;
		}
		error = wapbl_flush(mp->mnt_wapbl, 0);
		fstrans_done(mp);
		return error;
	}
#endif /* WAPBL */

	/*
	 * Then, flush indirect blocks.
	 */

	if (blk_high >= NDADDR) {
		error = ufs_getlbns(vp, blk_high, ia, &num);
		if (error)
			goto out;

		mutex_enter(&bufcache_lock);
		for (i = 0; i < num; i++) {
			if ((bp = incore(vp, ia[i].in_lbn)) == NULL)
				continue;
			if ((bp->b_cflags & BC_BUSY) != 0 ||
			    (bp->b_oflags & BO_DELWRI) == 0)
				continue;
			bp->b_cflags |= BC_BUSY | BC_VFLUSH;
			mutex_exit(&bufcache_lock);
			bawrite(bp);
			mutex_enter(&bufcache_lock);
		}
		mutex_exit(&bufcache_lock);
	}

	if (ap->a_flags & FSYNC_WAIT) {
		mutex_enter(vp->v_interlock);
		while (vp->v_numoutput > 0)
			cv_wait(&vp->v_cv, vp->v_interlock);
		mutex_exit(vp->v_interlock);
	}

	error = ffs_update(vp, NULL, NULL, UPDATE_CLOSE |
	    (((ap->a_flags & (FSYNC_WAIT | FSYNC_DATAONLY)) == FSYNC_WAIT)
	    ? UPDATE_WAIT : 0));

	if (error == 0 && ap->a_flags & FSYNC_CACHE) {
		int l = 0;
		VOP_IOCTL(VTOI(vp)->i_devvp, DIOCCACHESYNC, &l, FWRITE,
			curlwp->l_cred);
	}

out:
	fstrans_done(mp);
	return error;
}
Esempio n. 25
0
/* ARGSUSED */
int
ffs_full_fsync(struct vnode *vp, int flags)
{
	int error, i, uflags;
	struct mount *mp;

	KASSERT(vp->v_tag == VT_UFS);
	KASSERT(VTOI(vp) != NULL);
	KASSERT(vp->v_type != VCHR && vp->v_type != VBLK);

	error = 0;
	uflags = UPDATE_CLOSE | ((flags & FSYNC_WAIT) ? UPDATE_WAIT : 0);

	mp = vp->v_mount;

	/*
	 * Flush all dirty data associated with the vnode.
	 */
	if (vp->v_type == VREG) {
		int pflags = PGO_ALLPAGES | PGO_CLEANIT;

		if ((flags & FSYNC_WAIT))
			pflags |= PGO_SYNCIO;
		if (fstrans_getstate(mp) == FSTRANS_SUSPENDING)
			pflags |= PGO_FREE;
		mutex_enter(vp->v_interlock);
		error = VOP_PUTPAGES(vp, 0, 0, pflags);
		if (error)
			return error;
	}

#ifdef WAPBL
	if (mp && mp->mnt_wapbl) {
		/*
		 * Don't bother writing out metadata if the syncer is
		 * making the request.  We will let the sync vnode
		 * write it out in a single burst through a call to
		 * VFS_SYNC().
		 */
		if ((flags & (FSYNC_DATAONLY | FSYNC_LAZY)) != 0)
			return 0;

		if ((VTOI(vp)->i_flag & (IN_ACCESS | IN_CHANGE | IN_UPDATE
		    | IN_MODIFY | IN_MODIFIED | IN_ACCESSED)) != 0) {
			error = UFS_WAPBL_BEGIN(mp);
			if (error)
				return error;
			error = ffs_update(vp, NULL, NULL, uflags);
			UFS_WAPBL_END(mp);
		}
		if (error || (flags & FSYNC_NOLOG) != 0)
			return error;

		/*
		 * Don't flush the log if the vnode being flushed
		 * contains no dirty buffers that could be in the log.
		 */
		if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
			error = wapbl_flush(mp->mnt_wapbl, 0);
			if (error)
				return error;
		}

		if ((flags & FSYNC_WAIT) != 0) {
			mutex_enter(vp->v_interlock);
			while (vp->v_numoutput != 0)
				cv_wait(&vp->v_cv, vp->v_interlock);
			mutex_exit(vp->v_interlock);
		}

		return error;
	}
#endif /* WAPBL */

	error = vflushbuf(vp, (flags & FSYNC_WAIT) != 0);
	if (error == 0)
		error = ffs_update(vp, NULL, NULL, uflags);
	if (error == 0 && (flags & FSYNC_CACHE) != 0) {
		i = 1;
		(void)VOP_IOCTL(VTOI(vp)->i_devvp, DIOCCACHESYNC, &i, FWRITE,
		    kauth_cred_get());
	}

	return error;
}
Esempio n. 26
0
int
ccdinit(struct ccddevice *ccd, char **cpaths, struct proc *p)
{
	struct ccd_softc *cs = &ccd_softc[ccd->ccd_unit];
	struct ccdcinfo *ci = NULL;
	size_t size;
	int ix, rpm;
	struct vnode *vp;
	struct vattr va;
	size_t minsize;
	int maxsecsize;
	struct partinfo dpart;
	struct ccdgeom *ccg = &cs->sc_geom;
	char tmppath[MAXPATHLEN];
	int error;

	CCD_DPRINTF(CCDB_FOLLOW | CCDB_INIT, ("ccdinit: unit %d cflags %b\n",
	    ccd->ccd_unit, ccd->ccd_flags, CCDF_BITS));

	cs->sc_size = 0;
	cs->sc_ileave = ccd->ccd_interleave;
	cs->sc_nccdisks = ccd->ccd_ndev;
	if (snprintf(cs->sc_xname, sizeof(cs->sc_xname), "ccd%d",
	    ccd->ccd_unit) >= sizeof(cs->sc_xname)) {
		printf("ccdinit: device name too long.\n");
		return(ENXIO);
	}

	/* Allocate space for the component info. */
	cs->sc_cinfo = malloc(cs->sc_nccdisks * sizeof(struct ccdcinfo),
	    M_DEVBUF, M_WAITOK);
	bzero(cs->sc_cinfo, cs->sc_nccdisks * sizeof(struct ccdcinfo));

	/*
	 * Verify that each component piece exists and record
	 * relevant information about it.
	 */
	maxsecsize = 0;
	minsize = 0;
	rpm = 0;
	for (ix = 0; ix < cs->sc_nccdisks; ix++) {
		vp = ccd->ccd_vpp[ix];
		ci = &cs->sc_cinfo[ix];
		ci->ci_vp = vp;

		/*
		 * Copy in the pathname of the component.
		 */
		bzero(tmppath, sizeof(tmppath));	/* sanity */
		error = copyinstr(cpaths[ix], tmppath,
		    MAXPATHLEN, &ci->ci_pathlen);
		if (error) {
			CCD_DPRINTF(CCDB_FOLLOW | CCDB_INIT,
			    ("%s: can't copy path, error = %d\n",
			    cs->sc_xname, error));
			free(cs->sc_cinfo, M_DEVBUF);
			return (error);
		}
		ci->ci_path = malloc(ci->ci_pathlen, M_DEVBUF, M_WAITOK);
		bcopy(tmppath, ci->ci_path, ci->ci_pathlen);

		/*
		 * XXX: Cache the component's dev_t.
		 */
		if ((error = VOP_GETATTR(vp, &va, p->p_ucred, p)) != 0) {
			CCD_DPRINTF(CCDB_FOLLOW | CCDB_INIT,
			    ("%s: %s: getattr failed error = %d\n",
			    cs->sc_xname, ci->ci_path, error));
			free(ci->ci_path, M_DEVBUF);
			free(cs->sc_cinfo, M_DEVBUF);
			return (error);
		}
		ci->ci_dev = va.va_rdev;

		/*
		 * Get partition information for the component.
		 */
		error = VOP_IOCTL(vp, DIOCGPART, (caddr_t)&dpart,
		    FREAD, p->p_ucred, p);
		if (error) {
			CCD_DPRINTF(CCDB_FOLLOW | CCDB_INIT,
			    ("%s: %s: ioctl failed, error = %d\n",
			    cs->sc_xname, ci->ci_path, error));
			free(ci->ci_path, M_DEVBUF);
			free(cs->sc_cinfo, M_DEVBUF);
			return (error);
		}
		if (dpart.part->p_fstype == FS_CCD ||
		    dpart.part->p_fstype == FS_BSDFFS) {
			maxsecsize =
			    ((dpart.disklab->d_secsize > maxsecsize) ?
			    dpart.disklab->d_secsize : maxsecsize);
			size = dpart.part->p_size;
		} else {
			CCD_DPRINTF(CCDB_FOLLOW | CCDB_INIT,
			    ("%s: %s: incorrect partition type\n",
			    cs->sc_xname, ci->ci_path));
			free(ci->ci_path, M_DEVBUF);
			free(cs->sc_cinfo, M_DEVBUF);
			return (EFTYPE);
		}

		/*
		 * Calculate the size, truncating to an interleave
		 * boundary if necessary.
		 */
		if (cs->sc_ileave > 1)
			size -= size % cs->sc_ileave;

		if (size == 0) {
			CCD_DPRINTF(CCDB_FOLLOW | CCDB_INIT,
			    ("%s: %s: size == 0\n", cs->sc_xname, ci->ci_path));
			free(ci->ci_path, M_DEVBUF);
			free(cs->sc_cinfo, M_DEVBUF);
			return (ENODEV);
		}

		if (minsize == 0 || size < minsize)
			minsize = size;
		ci->ci_size = size;
		cs->sc_size += size;
		rpm += dpart.disklab->d_rpm;
	}
	ccg->ccg_rpm = rpm / cs->sc_nccdisks;

	/*
	 * Don't allow the interleave to be smaller than
	 * the biggest component sector.
	 */
	if ((cs->sc_ileave > 0) &&
	    (cs->sc_ileave < (maxsecsize / DEV_BSIZE))) {
		CCD_DPRINTF(CCDB_FOLLOW | CCDB_INIT,
		    ("%s: interleave must be at least %d\n",
		    cs->sc_xname, (maxsecsize / DEV_BSIZE)));
		free(ci->ci_path, M_DEVBUF);
		free(cs->sc_cinfo, M_DEVBUF);
		return (EINVAL);
	}

	/*
	 * Mirroring support requires uniform interleave and
	 * and even number of components.
	 */
	if (ccd->ccd_flags & CCDF_MIRROR) {
		ccd->ccd_flags |= CCDF_UNIFORM;
		if (cs->sc_ileave == 0) {
			CCD_DPRINTF(CCDB_FOLLOW | CCDB_INIT,
			    ("%s: mirroring requires interleave\n",
			    cs->sc_xname));
			free(ci->ci_path, M_DEVBUF);
			free(cs->sc_cinfo, M_DEVBUF);
			return (EINVAL);
		}
		if (cs->sc_nccdisks % 2) {
			CCD_DPRINTF(CCDB_FOLLOW | CCDB_INIT,
			    ("%s: mirroring requires even # of components\n",
			    cs->sc_xname));
			free(ci->ci_path, M_DEVBUF);
			free(cs->sc_cinfo, M_DEVBUF);
			return (EINVAL);
		}
	}

	/*
	 * If uniform interleave is desired set all sizes to that of
	 * the smallest component.
	 */
	ccg->ccg_ntracks = cs->sc_nccunits = cs->sc_nccdisks;
	if (ccd->ccd_flags & CCDF_UNIFORM) {
		for (ci = cs->sc_cinfo;
		     ci < &cs->sc_cinfo[cs->sc_nccdisks]; ci++)
			ci->ci_size = minsize;

		if (ccd->ccd_flags & CCDF_MIRROR)
			cs->sc_nccunits = ccg->ccg_ntracks /= 2;
		cs->sc_size = ccg->ccg_ntracks * minsize;
	}

	cs->sc_cflags = ccd->ccd_flags;	/* So we can find out later... */

	/*
	 * Construct the interleave table.
	 */
	ccdinterleave(cs);

	/*
	 * Create pseudo-geometry based on 1MB cylinders.  It's
	 * pretty close.
	 */
	ccg->ccg_secsize = DEV_BSIZE;
	ccg->ccg_nsectors = cs->sc_ileave? cs->sc_ileave :
	    1024 * (1024 / ccg->ccg_secsize);
	ccg->ccg_ncylinders = cs->sc_size / ccg->ccg_ntracks /
	    ccg->ccg_nsectors;

	cs->sc_flags |= CCDF_INITED;

	return (0);
}
Esempio n. 27
0
/*
 * Helper function for findroot():
 * Return non-zero if disk device matches bootinfo.
 */
static int
match_bootdisk(struct device *dv, struct btinfo_bootdisk *bid)
{
	struct vnode *tmpvn;
	int error;
	struct disklabel label;
	int found = 0;
	int bmajor;

	/*
	 * A disklabel is required here.  The boot loader doesn't refuse
	 * to boot from a disk without a label, but this is normally not
	 * wanted.
	 */
	if (bid->labelsector == -1)
		return (0);
	
	/*
	 * Lookup major number for disk block device.
	 */
	bmajor = devsw_name2blk(dv->dv_xname, NULL, 0);
	if (bmajor == -1)
		return (0);	/* XXX panic ??? */

	/*
	 * Fake a temporary vnode for the disk, open it, and read
	 * the disklabel for comparison.
	 */
	if (bdevvp(MAKEDISKDEV(bmajor, dv->dv_unit, RAW_PART), &tmpvn))
		panic("match_bootdisk: can't alloc vnode");
	error = VOP_OPEN(tmpvn, FREAD, NOCRED);
	if (error) {
#ifndef DEBUG
		/*
		 * Ignore errors caused by missing device, partition,
		 * or medium.
		 */
		if (error != ENXIO && error != ENODEV)
#endif
			printf("match_bootdisk: can't open dev %s (%d)\n",
			    dv->dv_xname, error);
		vput(tmpvn);
		return (0);
	}
	error = VOP_IOCTL(tmpvn, DIOCGDINFO, &label, FREAD, NOCRED);
	if (error) {
		/*
		 * XXX Can't happen -- open() would have errored out
		 * or faked one up.
		 */
		printf("match_bootdisk: can't get label for dev %s (%d)\n",
		    dv->dv_xname, error);
		goto closeout;
	}

	/* Compare with our data. */
	if (label.d_type == bid->label.type &&
	    label.d_checksum == bid->label.checksum &&
	    strncmp(label.d_packname, bid->label.packname, 16) == 0)
	    	found = 1;

closeout:
	VOP_CLOSE(tmpvn, FREAD, NOCRED);
	vput(tmpvn);
	return (found);
}
Esempio n. 28
0
/*
 * File table vnode ioctl routine.
 */
static int
vn_ioctl(file_t *fp, u_long com, void *data)
{
	struct vnode *vp = fp->f_data, *ovp;
	struct vattr vattr;
	int error;

	switch (vp->v_type) {

	case VREG:
	case VDIR:
		if (com == FIONREAD) {
			vn_lock(vp, LK_SHARED | LK_RETRY);
			error = VOP_GETATTR(vp, &vattr, kauth_cred_get());
			VOP_UNLOCK(vp);
			if (error)
				return (error);
			*(int *)data = vattr.va_size - fp->f_offset;
			return (0);
		}
		if ((com == FIONWRITE) || (com == FIONSPACE)) {
			/*
			 * Files don't have send queues, so there never
			 * are any bytes in them, nor is there any
			 * open space in them.
			 */
			*(int *)data = 0;
			return (0);
		}
		if (com == FIOGETBMAP) {
			daddr_t *block;

			if (*(daddr_t *)data < 0)
				return (EINVAL);
			block = (daddr_t *)data;
			return (VOP_BMAP(vp, *block, NULL, block, NULL));
		}
		if (com == OFIOGETBMAP) {
			daddr_t ibn, obn;

			if (*(int32_t *)data < 0)
				return (EINVAL);
			ibn = (daddr_t)*(int32_t *)data;
			error = VOP_BMAP(vp, ibn, NULL, &obn, NULL);
			*(int32_t *)data = (int32_t)obn;
			return error;
		}
		if (com == FIONBIO || com == FIOASYNC)	/* XXX */
			return (0);			/* XXX */
		/* fall into ... */
	case VFIFO:
	case VCHR:
	case VBLK:
		error = VOP_IOCTL(vp, com, data, fp->f_flag,
		    kauth_cred_get());
		if (error == 0 && com == TIOCSCTTY) {
			vref(vp);
			mutex_enter(proc_lock);
			ovp = curproc->p_session->s_ttyvp;
			curproc->p_session->s_ttyvp = vp;
			mutex_exit(proc_lock);
			if (ovp != NULL)
				vrele(ovp);
		}
		return (error);

	default:
		return (EPASSTHROUGH);
	}
}
Esempio n. 29
0
/*
 * MPALMOSTSAFE - acquires mplock
 */
static int
vn_ioctl(struct file *fp, u_long com, caddr_t data, struct ucred *ucred,
	 struct sysmsg *msg)
{
	struct vnode *vp = ((struct vnode *)fp->f_data);
	struct vnode *ovp;
	struct vattr vattr;
	int error;
	off_t size;

	switch (vp->v_type) {
	case VREG:
	case VDIR:
		if (com == FIONREAD) {
			error = VOP_GETATTR(vp, &vattr);
			if (error)
				break;
			size = vattr.va_size;
			if ((vp->v_flag & VNOTSEEKABLE) == 0)
				size -= vn_poll_fpf_offset(fp);
			if (size > 0x7FFFFFFF)
				size = 0x7FFFFFFF;
			*(int *)data = size;
			error = 0;
			break;
		}
		if (com == FIOASYNC) {				/* XXX */
			error = 0;				/* XXX */
			break;
		}
		/* fall into ... */
	default:
#if 0
		return (ENOTTY);
#endif
	case VFIFO:
	case VCHR:
	case VBLK:
		if (com == FIODTYPE) {
			if (vp->v_type != VCHR && vp->v_type != VBLK) {
				error = ENOTTY;
				break;
			}
			*(int *)data = dev_dflags(vp->v_rdev) & D_TYPEMASK;
			error = 0;
			break;
		}
		error = VOP_IOCTL(vp, com, data, fp->f_flag, ucred, msg);
		if (error == 0 && com == TIOCSCTTY) {
			struct proc *p = curthread->td_proc;
			struct session *sess;

			if (p == NULL) {
				error = ENOTTY;
				break;
			}

			get_mplock();
			sess = p->p_session;
			/* Do nothing if reassigning same control tty */
			if (sess->s_ttyvp == vp) {
				error = 0;
				rel_mplock();
				break;
			}

			/* Get rid of reference to old control tty */
			ovp = sess->s_ttyvp;
			vref(vp);
			sess->s_ttyvp = vp;
			if (ovp)
				vrele(ovp);
			rel_mplock();
		}
		break;
	}
	return (error);
}
Esempio n. 30
0
static int
nm_ioctl(vnode_t *vp, int cmd, intptr_t arg, int mode, cred_t *cr, int *rvalp,
	caller_context_t *ct)
{
	return (VOP_IOCTL(VTONM(vp)->nm_filevp, cmd, arg, mode, cr, rvalp, ct));
}