Ejemplo n.º 1
0
static int
sysctl_portalgo_reserve(SYSCTLFN_ARGS, bitmap *bt)
{
	struct sysctlnode node;
	int error;

	DPRINTF("%s called\n", __func__);

	node = *rnode;
	node.sysctl_data = bt;
	node.sysctl_size = sizeof(*bt);

	error = sysctl_lookup(SYSCTLFN_CALL(&node));

	if (error || newp == NULL)
		return error;

#ifdef KAUTH_NETWORK_SOCKET_PORT_RESERVE
	if (l != NULL && (error = kauth_authorize_system(l->l_cred,
	    KAUTH_NETWORK_SOCKET, KAUTH_NETWORK_SOCKET_PORT_RESERVE, bt,
	    NULL, NULL)) != 0)
		return error;
#endif
	return error;
}
Ejemplo n.º 2
0
/*
 * The sysctl hook that is supposed to check that we are picking one
 * of the valid algorithms.
 */
static int
sysctl_portalgo_selected(SYSCTLFN_ARGS, int *algo)
{
	struct sysctlnode node;
	int error;
	char newalgo[PORTALGO_MAXLEN];

	DPRINTF("%s called\n", __func__);

	strlcpy(newalgo, algos[*algo].name, sizeof(newalgo));

	node = *rnode;
	node.sysctl_data = newalgo;
	node.sysctl_size = sizeof(newalgo);

	error = sysctl_lookup(SYSCTLFN_CALL(&node));

	DPRINTF("newalgo: %s\n", newalgo);

	if (error || newp == NULL ||
	    strncmp(newalgo, algos[*algo].name, sizeof(newalgo)) == 0)
		return error;

#ifdef KAUTH_NETWORK_SOCKET_PORT_RANDOMIZE
	if (l != NULL && (error = kauth_authorize_system(l->l_cred,
	    KAUTH_NETWORK_SOCKET, KAUTH_NETWORK_SOCKET_PORT_RANDOMIZE, newname,
	    NULL, NULL)) != 0)
		return error;
#endif

	mutex_enter(softnet_lock);
	error = portalgo_algo_name_select(newalgo, algo);
	mutex_exit(softnet_lock);
	return error;
}
/*
 * ntp_adjtime() - NTP daemon application interface
 */
int
sys_ntp_adjtime(struct lwp *l, const struct sys_ntp_adjtime_args *uap, register_t *retval)
{
	/* {
		syscallarg(struct timex *) tp;
	} */
	struct timex ntv;
	int error = 0;

	error = copyin((void *)SCARG(uap, tp), (void *)&ntv, sizeof(ntv));
	if (error != 0)
		return (error);

	if (ntv.modes != 0 && (error = kauth_authorize_system(l->l_cred,
	    KAUTH_SYSTEM_TIME, KAUTH_REQ_SYSTEM_TIME_NTPADJTIME, NULL,
	    NULL, NULL)) != 0)
		return (error);

	ntp_adjtime1(&ntv);

	error = copyout((void *)&ntv, (void *)SCARG(uap, tp), sizeof(ntv));
	if (!error)
		*retval = ntp_timestatus();

	return error;
}
Ejemplo n.º 4
0
int
secpolicy_fs_unmount(kauth_cred_t cred, struct mount *vfsp)
{

	return kauth_authorize_system(cred, KAUTH_SYSTEM_MOUNT,
	    KAUTH_REQ_SYSTEM_MOUNT_UNMOUNT, vfsp, NULL, NULL);
}
Ejemplo n.º 5
0
/*
 * Translate command sent from libdevmapper to func.
 */
static int
dm_cmd_to_fun(prop_dictionary_t dm_dict) {
	int i, r;
	prop_string_t command;
	
	r = 0;

	if ((command = prop_dictionary_get(dm_dict, DM_IOCTL_COMMAND)) == NULL)
		return EINVAL;

	for(i = 0; cmd_fn[i].cmd != NULL; i++)
		if (prop_string_equals_cstring(command, cmd_fn[i].cmd))
			break;

	if (!cmd_fn[i].allowed && 
	    (r = kauth_authorize_system(kauth_cred_get(),
	    KAUTH_SYSTEM_DEVMAPPER, 0, NULL, NULL, NULL)) != 0)
		return r;

	if (cmd_fn[i].cmd == NULL)
		return EINVAL;

	aprint_debug("ioctl %s called\n", cmd_fn[i].cmd);
	r = cmd_fn[i].fn(dm_dict);

	return r;
}
Ejemplo n.º 6
0
/* This function is used by clock_settime and settimeofday */
static int
settime1(struct proc *p, const struct timespec *ts, bool check_kauth)
{
	struct timespec delta, now;
	int s;

	/* WHAT DO WE DO ABOUT PENDING REAL-TIME TIMEOUTS??? */
	s = splclock();
	nanotime(&now);
	timespecsub(ts, &now, &delta);

	if (check_kauth && kauth_authorize_system(kauth_cred_get(),
	    KAUTH_SYSTEM_TIME, KAUTH_REQ_SYSTEM_TIME_SYSTEM, __UNCONST(ts),
	    &delta, KAUTH_ARG(check_kauth ? false : true)) != 0) {
		splx(s);
		return (EPERM);
	}

#ifdef notyet
	if ((delta.tv_sec < 86400) && securelevel > 0) { /* XXX elad - notyet */
		splx(s);
		return (EPERM);
	}
#endif

	tc_setclock(ts);

	timespecadd(&boottime, &delta, &boottime);

	resettodr();
	splx(s);

	return (0);
}
Ejemplo n.º 7
0
/* ARGSUSED */
int
sys___adjtime50(struct lwp *l, const struct sys___adjtime50_args *uap,
    register_t *retval)
{
	/* {
		syscallarg(const struct timeval *) delta;
		syscallarg(struct timeval *) olddelta;
	} */
	int error;
	struct timeval atv, oldatv;

	if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_TIME,
	    KAUTH_REQ_SYSTEM_TIME_ADJTIME, NULL, NULL, NULL)) != 0)
		return error;

	if (SCARG(uap, delta)) {
		error = copyin(SCARG(uap, delta), &atv,
		    sizeof(*SCARG(uap, delta)));
		if (error)
			return (error);
	}
	adjtime1(SCARG(uap, delta) ? &atv : NULL,
	    SCARG(uap, olddelta) ? &oldatv : NULL, l->l_proc);
	if (SCARG(uap, olddelta))
		error = copyout(&oldatv, SCARG(uap, olddelta),
		    sizeof(*SCARG(uap, olddelta)));
	return error;
}
Ejemplo n.º 8
0
static int 
quota_handle_cmd_quotaon(struct mount *mp, struct lwp *l, 
    prop_dictionary_t cmddict, int type, prop_array_t datas)
{
	prop_dictionary_t data;
	struct ufsmount *ump = VFSTOUFS(mp);
	int error;
	const char *qfile;

	if ((ump->um_flags & UFS_QUOTA2) != 0)
		return EBUSY;
	
	if (prop_array_count(datas) != 1)
		return EINVAL;

	data = prop_array_get(datas, 0);
	if (data == NULL)
		return ENOMEM;
	if (!prop_dictionary_get_cstring_nocopy(data, "quotafile",
	    &qfile))
		return EINVAL;

	error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_FS_QUOTA,
	    KAUTH_REQ_SYSTEM_FS_QUOTA_ONOFF, mp, NULL, NULL);
	if (error != 0) {
		return error;
	}
#ifdef QUOTA
	error = quota1_handle_cmd_quotaon(l, ump, type, qfile);
#else
	error = EOPNOTSUPP;
#endif
	
	return error;
}
Ejemplo n.º 9
0
static int 
quota_handle_cmd_quotaoff(struct mount *mp, struct lwp *l, 
    prop_dictionary_t cmddict, int type, prop_array_t datas)
{
	struct ufsmount *ump = VFSTOUFS(mp);
	int error;

	if ((ump->um_flags & UFS_QUOTA2) != 0)
		return EOPNOTSUPP;
	
	if (prop_array_count(datas) != 0)
		return EINVAL;

	error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_FS_QUOTA,
	    KAUTH_REQ_SYSTEM_FS_QUOTA_ONOFF, mp, NULL, NULL);
	if (error != 0) {
		return error;
	}
#ifdef QUOTA
	error = quota1_handle_cmd_quotaoff(l, ump, type);
#else
	error = EOPNOTSUPP;
#endif
	
	return error;
}
Ejemplo n.º 10
0
/*
 * sys_lfs_segwait:
 *
 * System call wrapper around lfs_segwait().
 *
 *  0 on success
 *  1 on timeout
 * -1/errno is return on error.
 */
int
sys___lfs_segwait50(struct lwp *l, const struct sys___lfs_segwait50_args *uap,
    register_t *retval)
{
	/* {
		syscallarg(fsid_t *) fsidp;
		syscallarg(struct timeval *) tv;
	} */
	struct timeval atv;
	fsid_t fsid;
	int error;

	/* XXX need we be su to segwait? */
	error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS,
	    KAUTH_REQ_SYSTEM_LFS_SEGWAIT, NULL, NULL, NULL);
	if (error)
		return (error);
	if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
		return (error);

	if (SCARG(uap, tv)) {
		error = copyin(SCARG(uap, tv), &atv, sizeof(struct timeval));
		if (error)
			return (error);
		if (itimerfix(&atv))
			return (EINVAL);
	} else /* NULL or invalid */
		atv.tv_sec = atv.tv_usec = 0;
	return lfs_segwait(&fsid, &atv);
}
Ejemplo n.º 11
0
static int 
quota_handle_cmd_getall(struct mount *mp, struct lwp *l, 
    prop_dictionary_t cmddict, int type, prop_array_t datas)
{
	prop_array_t replies;
	struct ufsmount *ump = VFSTOUFS(mp);
	int error;

	if ((ump->um_flags & UFS_QUOTA2) == 0)
		return EOPNOTSUPP;
	
	error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_FS_QUOTA,
	    KAUTH_REQ_SYSTEM_FS_QUOTA_GET, mp, NULL, NULL);
	if (error)
		return error;
		
	replies = prop_array_create();
	if (replies == NULL)
		return ENOMEM;

#ifdef QUOTA2
	if (ump->um_flags & UFS_QUOTA2) {
		error = quota2_handle_cmd_getall(ump, type, replies);
	} else
#endif
		panic("quota_handle_cmd_getall: no support ?");
	if (!prop_dictionary_set_and_rel(cmddict, "data", replies)) {
		error = ENOMEM;
	} else {
		error = 0;
	}
	return error;
}
Ejemplo n.º 12
0
/* XXX shouldn't all this be in kauth ? */
static int
quota_get_auth(struct mount *mp, struct lwp *l, uid_t id) {
	/* The user can always query about his own quota. */
	if (id == kauth_cred_getuid(l->l_cred))
		return 0;
	return kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_FS_QUOTA,
	    KAUTH_REQ_SYSTEM_FS_QUOTA_GET, mp, KAUTH_ARG(id), NULL);
}
Ejemplo n.º 13
0
/*
 * Update disk usage, and take corrective action.
 */
int
lfs_chkdq1(struct inode *ip, int64_t change, kauth_cred_t cred, int flags)
{
	struct dquot *dq;
	int i;
	int ncurblocks, error;

	if ((error = lfs_getinoquota(ip)) != 0)
		return error;
	if (change == 0)
		return (0);
	if (change < 0) {
		for (i = 0; i < ULFS_MAXQUOTAS; i++) {
			if ((dq = ip->i_dquot[i]) == NODQUOT)
				continue;
			mutex_enter(&dq->dq_interlock);
			ncurblocks = dq->dq_curblocks + change;
			if (ncurblocks >= 0)
				dq->dq_curblocks = ncurblocks;
			else
				dq->dq_curblocks = 0;
			dq->dq_flags &= ~DQ_WARN(QL_BLOCK);
			dq->dq_flags |= DQ_MOD;
			mutex_exit(&dq->dq_interlock);
		}
		return (0);
	}
	for (i = 0; i < ULFS_MAXQUOTAS; i++) {
		if ((dq = ip->i_dquot[i]) == NODQUOT)
			continue;
		if ((flags & FORCE) == 0 &&
		    kauth_authorize_system(cred, KAUTH_SYSTEM_FS_QUOTA,
		    KAUTH_REQ_SYSTEM_FS_QUOTA_NOLIMIT, KAUTH_ARG(i),
		    KAUTH_ARG(QL_BLOCK), NULL) != 0) {
			mutex_enter(&dq->dq_interlock);
			error = chkdqchg(ip, change, cred, i);
			mutex_exit(&dq->dq_interlock);
			if (error != 0)
				return (error);
		}
	}
	for (i = 0; i < ULFS_MAXQUOTAS; i++) {
		if ((dq = ip->i_dquot[i]) == NODQUOT)
			continue;
		mutex_enter(&dq->dq_interlock);
		dq->dq_curblocks += change;
		dq->dq_flags |= DQ_MOD;
		mutex_exit(&dq->dq_interlock);
	}
	return (0);
}
int
compat_50_netbsd32_adjtime(struct lwp *l,
    const struct compat_50_netbsd32_adjtime_args *uap, register_t *retval)
{
	/* {
		syscallarg(const netbsd32_timeval50p_t) delta;
		syscallarg(netbsd32_timeval50p_t) olddelta;
	} */
	struct netbsd32_timeval50 atv;
	int error;

	extern int time_adjusted;     /* in kern_ntptime.c */
	extern int64_t time_adjtime;  /* in kern_ntptime.c */

	if ((error = kauth_authorize_system(l->l_cred,
	    KAUTH_SYSTEM_TIME, KAUTH_REQ_SYSTEM_TIME_ADJTIME, NULL, NULL,
	    NULL)) != 0)
		return (error);

	if (SCARG_P32(uap, olddelta)) {
		atv.tv_sec = time_adjtime / 1000000;
		atv.tv_usec = time_adjtime % 1000000;
		if (atv.tv_usec < 0) {
			atv.tv_usec += 1000000;
			atv.tv_sec--;
		}
		(void) copyout(&atv,
			       SCARG_P32(uap, olddelta), 
			       sizeof(atv));
		if (error)
			return (error);
	}
	
	if (SCARG_P32(uap, delta)) {
		error = copyin(SCARG_P32(uap, delta), &atv,
			       sizeof(struct timeval));
		if (error)
			return (error);

		time_adjtime = (int64_t)atv.tv_sec * 1000000 + atv.tv_usec;

		if (time_adjtime)
			/* We need to save the system time during shutdown */
			time_adjusted |= 1;
	}

	return 0;
}
Ejemplo n.º 15
0
int
ipcperm(kauth_cred_t cred, struct ipc_perm *perm, int mode)
{
	int error;

	error = kauth_authorize_system(cred, KAUTH_SYSTEM_SYSVIPC,
	    KAUTH_REQ_SYSTEM_SYSVIPC_BYPASS, perm, KAUTH_ARG(mode), NULL);
	if (error == 0)
		return (0);

	/* Adjust EPERM and EACCES errors until there's a better way to do this. */
	if (mode != IPC_M)
		error = EACCES;

	return error;
}
Ejemplo n.º 16
0
int
sys_lfs_markv(struct lwp *l, const struct sys_lfs_markv_args *uap, register_t *retval)
{
	/* {
		syscallarg(fsid_t *) fsidp;
		syscallarg(struct block_info *) blkiov;
		syscallarg(int) blkcnt;
	} */
	BLOCK_INFO *blkiov;
	int blkcnt, error;
	fsid_t fsid;
	struct lfs *fs;
	struct mount *mntp;

	error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS,
	    KAUTH_REQ_SYSTEM_LFS_MARKV, NULL, NULL, NULL);
	if (error)
		return (error);

	if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
		return (error);

	if ((mntp = vfs_getvfs(fsidp)) == NULL) 
		return (ENOENT);
	fs = VFSTOULFS(mntp)->um_lfs;

	blkcnt = SCARG(uap, blkcnt);
	if ((u_int) blkcnt > LFS_MARKV_MAXBLKCNT)
		return (EINVAL);

	KERNEL_LOCK(1, NULL);
	blkiov = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO), LFS_NB_BLKIOV);
	if ((error = copyin(SCARG(uap, blkiov), blkiov,
			    blkcnt * sizeof(BLOCK_INFO))) != 0)
		goto out;

	if ((error = lfs_markv(p, &fsid, blkiov, blkcnt)) == 0)
		copyout(blkiov, SCARG(uap, blkiov),
			blkcnt * sizeof(BLOCK_INFO));
    out:
	lfs_free(fs, blkiov, LFS_NB_BLKIOV);
	KERNEL_UNLOCK_ONE(NULL);
	return error;
}
Ejemplo n.º 17
0
/*
 * Allocate a block in the file system.
 *
 * A preference may be optionally specified. If a preference is given
 * the following hierarchy is used to allocate a block:
 *   1) allocate the requested block.
 *   2) allocate a rotationally optimal block in the same cylinder.
 *   3) allocate a block in the same cylinder group.
 *   4) quadradically rehash into other cylinder groups, until an
 *	  available block is located.
 * If no block preference is given the following hierarchy is used
 * to allocate a block:
 *   1) allocate a block in the cylinder group that contains the
 *	  inode for the file.
 *   2) quadradically rehash into other cylinder groups, until an
 *	  available block is located.
 */
int
ext2fs_alloc(struct inode *ip, daddr_t lbn, daddr_t bpref,
    kauth_cred_t cred, daddr_t *bnp)
{
	struct m_ext2fs *fs;
	daddr_t bno;
	int cg;

	*bnp = 0;
	fs = ip->i_e2fs;
#ifdef DIAGNOSTIC
	if (cred == NOCRED)
		panic("ext2fs_alloc: missing credential");
#endif /* DIAGNOSTIC */
	if (fs->e2fs.e2fs_fbcount == 0)
		goto nospace;
	if (kauth_authorize_system(cred, KAUTH_SYSTEM_FS_RESERVEDSPACE, 0, NULL,
	    NULL, NULL) != 0 &&
	    freespace(fs) <= 0)
		goto nospace;
	if (bpref >= fs->e2fs.e2fs_bcount)
		bpref = 0;
	if (bpref == 0)
		cg = ino_to_cg(fs, ip->i_number);
	else
		cg = dtog(fs, bpref);
	bno = (daddr_t)ext2fs_hashalloc(ip, cg, bpref, fs->e2fs_bsize,
	    ext2fs_alloccg);
	if (bno > 0) {
		ip->i_e2fs_nblock += btodb(fs->e2fs_bsize);
		ip->i_flag |= IN_CHANGE | IN_UPDATE;
		*bnp = bno;
		return (0);
	}
nospace:
	ext2fs_fserr(fs, kauth_cred_geteuid(cred), "file system full");
	uprintf("\n%s: write failed, file system is full\n", fs->e2fs_fsmnt);
	return (ENOSPC);
}
int
compat_30_netbsd32_fhstat(struct lwp *l, const struct compat_30_netbsd32_fhstat_args *uap, register_t *retval)
{
	/* {
		syscallarg(const netbsd32_fhandlep_t) fhp;
		syscallarg(netbsd32_stat13p_t) sb;
	} */
	struct stat sb;
	struct netbsd32_stat13 sb32;
	int error;
	struct compat_30_fhandle fh;
	struct mount *mp;
	struct vnode *vp;

	/*
	 * Must be super user
	 */
	if ((error = kauth_authorize_system(l->l_cred,
	    KAUTH_SYSTEM_FILEHANDLE, 0, NULL, NULL, NULL)))
		return (error);

	if ((error = copyin(SCARG_P32(uap, fhp), &fh, sizeof(fh))) != 0)
		return (error);

	if ((mp = vfs_getvfs(&fh.fh_fsid)) == NULL)
		return (ESTALE);
	if (mp->mnt_op->vfs_fhtovp == NULL)
		return EOPNOTSUPP;
	if ((error = VFS_FHTOVP(mp, (struct fid*)&fh.fh_fid, &vp)))
		return (error);
	error = vn_stat(vp, &sb);
	vput(vp);
	if (error)
		return (error);
	netbsd32_from___stat13(&sb, &sb32);
	error = copyout(&sb32, SCARG_P32(uap, sb), sizeof(sb));
	return (error);
}
Ejemplo n.º 19
0
/*
 * sys_lfs_segclean:
 *
 * Mark the segment clean.
 *
 *  0 on success
 * -1/errno is return on error.
 */
int
sys_lfs_segclean(struct lwp *l, const struct sys_lfs_segclean_args *uap, register_t *retval)
{
	/* {
		syscallarg(fsid_t *) fsidp;
		syscallarg(u_long) segment;
	} */
	struct lfs *fs;
	struct mount *mntp;
	fsid_t fsid;
	int error;
	unsigned long segnum;

	error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS,
	    KAUTH_REQ_SYSTEM_LFS_SEGCLEAN, NULL, NULL, NULL);
	if (error)
		return (error);

	if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
		return (error);
	if ((mntp = vfs_getvfs(&fsid)) == NULL)
		return (ENOENT);

	fs = VFSTOULFS(mntp)->um_lfs;
	segnum = SCARG(uap, segment);

	if ((error = vfs_busy(mntp, NULL)) != 0)
		return (error);

	KERNEL_LOCK(1, NULL);
	lfs_seglock(fs, SEGM_PROT);
	error = lfs_do_segclean(fs, segnum);
	lfs_segunlock(fs);
	KERNEL_UNLOCK_ONE(NULL);
	vfs_unbusy(mntp, false, NULL);
	return error;
}
Ejemplo n.º 20
0
int
netbsd32_ntp_adjtime(struct lwp *l, const struct netbsd32_ntp_adjtime_args *uap, register_t *retval)
{
	/* {
		syscallarg(netbsd32_timexp_t) tp;
	} */
	struct netbsd32_timex ntv32;
	struct timex ntv;
	int error = 0;
	int modes;

	if ((error = copyin(SCARG_P32(uap, tp), &ntv32, sizeof(ntv32))))
		return (error);

	netbsd32_to_timex(&ntv32, &ntv);

	/*
	 * Update selected clock variables - only the superuser can
	 * change anything. Note that there is no error checking here on
	 * the assumption the superuser should know what it is doing.
	 */
	modes = ntv.modes;
	if (modes != 0 && (error = kauth_authorize_system(l->l_cred,
	    KAUTH_SYSTEM_TIME, KAUTH_REQ_SYSTEM_TIME_NTPADJTIME, NULL, NULL,
	    NULL)))
		return (error);

	ntp_adjtime1(&ntv);

	netbsd32_from_timex(&ntv, &ntv32);
	error = copyout(&ntv32, SCARG_P32(uap, tp), sizeof(ntv32));
	if (!error) {
		*retval = ntp_timestatus();
	}
	return error;
}
Ejemplo n.º 21
0
int
hfs_mount(struct mount *mp, const char *path, void *data, size_t *data_len)
{
	struct lwp *l = curlwp;
	struct hfs_args *args = data;
	struct vnode *devvp;
	struct hfsmount *hmp;
	int error = 0;
	int update;
	mode_t accessmode;

	if (args == NULL)
		return EINVAL;
	if (*data_len < sizeof *args)
		return EINVAL;

#ifdef HFS_DEBUG	
	printf("vfsop = hfs_mount()\n");
#endif /* HFS_DEBUG */
	
	if (mp->mnt_flag & MNT_GETARGS) {
		hmp = VFSTOHFS(mp);
		if (hmp == NULL)
			return EIO;
		args->fspec = NULL;
		*data_len = sizeof *args;
		return 0;
	}

	if (data == NULL)
		return EINVAL;

/* FIXME: For development ONLY - disallow remounting for now */
#if 0
	update = mp->mnt_flag & MNT_UPDATE;
#else
	update = 0;
#endif

	/* Check arguments */
	if (args->fspec != NULL) {
		/*
		 * Look up the name and verify that it's sane.
		 */
		error = namei_simple_user(args->fspec,
					NSM_FOLLOW_NOEMULROOT, &devvp);
		if (error != 0)
			return error;
	
		if (!update) {
			/*
			 * Be sure this is a valid block device
			 */
			if (devvp->v_type != VBLK)
				error = ENOTBLK;
			else if (bdevsw_lookup(devvp->v_rdev) == NULL)
				error = ENXIO;
		} else {
			/*
			 * Be sure we're still naming the same device
			 * used for our initial mount
			 */
			hmp = VFSTOHFS(mp);
			if (devvp != hmp->hm_devvp)
				error = EINVAL;
		}
	} else {
		if (update) {
			/* Use the extant mount */
			hmp = VFSTOHFS(mp);
			devvp = hmp->hm_devvp;
			vref(devvp);
		} else {
			/* New mounts must have a filename for the device */
			return EINVAL;
		}
	}

	
	/*
	 * If mount by non-root, then verify that user has necessary
	 * permissions on the device.
	 *
	 * Permission to update a mount is checked higher, so here we presume
	 * updating the mount is okay (for example, as far as securelevel goes)
	 * which leaves us with the normal check.
	 */
	if (error == 0) {
		accessmode = VREAD;
		if (update ?
			(mp->mnt_iflag & IMNT_WANTRDWR) != 0 :
			(mp->mnt_flag & MNT_RDONLY) == 0)
			accessmode |= VWRITE;
		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
		error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT,
		    KAUTH_REQ_SYSTEM_MOUNT_DEVICE, mp, devvp,
		    KAUTH_ARG(accessmode));
		VOP_UNLOCK(devvp);
	}

	if (error != 0)
		goto error;

	if (update) {
		printf("HFS: live remounting not yet supported!\n");
		error = EINVAL;
		goto error;
	}

	if ((error = hfs_mountfs(devvp, mp, l, args->fspec)) != 0)
		goto error;
	
	error = set_statvfs_info(path, UIO_USERSPACE, args->fspec, UIO_USERSPACE,
		mp->mnt_op->vfs_name, mp, l);

#ifdef HFS_DEBUG
	if(!update) {
		char* volname;
		
		hmp = VFSTOHFS(mp);
		volname = malloc(hmp->hm_vol.name.length + 1, M_TEMP, M_WAITOK);
		if (volname == NULL)
			printf("could not allocate volname; ignored\n");
		else {
			if (hfs_unicode_to_ascii(hmp->hm_vol.name.unicode,
				hmp->hm_vol.name.length, volname) == NULL)
				printf("could not convert volume name to ascii; ignored\n");
			else
				printf("mounted volume \"%s\"\n", volname);
			free(volname, M_TEMP);
		}
	}
#endif /* HFS_DEBUG */
		
	return error;
	
error:
	vrele(devvp);
	return error;
}
Ejemplo n.º 22
0
int
sys_lfs_bmapv(struct lwp *l, const struct sys_lfs_bmapv_args *uap, register_t *retval)
{
	/* {
		syscallarg(fsid_t *) fsidp;
		syscallarg(struct block_info *) blkiov;
		syscallarg(int) blkcnt;
	} */
	BLOCK_INFO *blkiov;
	BLOCK_INFO_15 *blkiov15;
	int i, blkcnt, error;
	fsid_t fsid;
	struct lfs *fs;
	struct mount *mntp;

	error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS,
	    KAUTH_REQ_SYSTEM_LFS_BMAPV, NULL, NULL, NULL);
	if (error)
		return (error);

	if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
		return (error);

	if ((mntp = vfs_getvfs(&fsid)) == NULL) 
		return (ENOENT);
	fs = VFSTOULFS(mntp)->um_lfs;

	blkcnt = SCARG(uap, blkcnt);
	if ((size_t) blkcnt > SIZE_T_MAX / sizeof(BLOCK_INFO))
		return (EINVAL);
	KERNEL_LOCK(1, NULL);
	blkiov = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO), LFS_NB_BLKIOV);
	blkiov15 = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO_15), LFS_NB_BLKIOV);
	if ((error = copyin(SCARG(uap, blkiov), blkiov15,
			    blkcnt * sizeof(BLOCK_INFO_15))) != 0)
		goto out;

	for (i = 0; i < blkcnt; i++) {
		blkiov[i].bi_inode     = blkiov15[i].bi_inode;
		blkiov[i].bi_lbn       = blkiov15[i].bi_lbn;
		blkiov[i].bi_daddr     = blkiov15[i].bi_daddr;
		blkiov[i].bi_segcreate = blkiov15[i].bi_segcreate;
		blkiov[i].bi_version   = blkiov15[i].bi_version;
		blkiov[i].bi_bp	       = blkiov15[i].bi_bp;
		blkiov[i].bi_size      = blkiov15[i].bi_size;
	}

	if ((error = lfs_bmapv(l->l_proc, &fsid, blkiov, blkcnt)) == 0) {
		for (i = 0; i < blkcnt; i++) {
			blkiov15[i].bi_inode	 = blkiov[i].bi_inode;
			blkiov15[i].bi_lbn	 = blkiov[i].bi_lbn;
			blkiov15[i].bi_daddr	 = blkiov[i].bi_daddr;
			blkiov15[i].bi_segcreate = blkiov[i].bi_segcreate;
			blkiov15[i].bi_version	 = blkiov[i].bi_version;
			blkiov15[i].bi_bp	 = blkiov[i].bi_bp;
			blkiov15[i].bi_size	 = blkiov[i].bi_size;
		}
		copyout(blkiov15, SCARG(uap, blkiov),
			blkcnt * sizeof(BLOCK_INFO_15));
	}
    out:
	lfs_free(fs, blkiov, LFS_NB_BLKIOV);
	lfs_free(fs, blkiov15, LFS_NB_BLKIOV);
	KERNEL_UNLOCK_ONE(NULL);
	return error;
}
Ejemplo n.º 23
0
/*
 * VFS Operations.
 *
 * mount system call
 */
int
ext2fs_mount(struct mount *mp, const char *path, void *data, size_t *data_len)
{
	struct lwp *l = curlwp;
	struct vnode *devvp;
	struct ufs_args *args = data;
	struct ufsmount *ump = NULL;
	struct m_ext2fs *fs;
	int error = 0, flags, update;
	mode_t accessmode;

	if (args == NULL)
		return EINVAL;
	if (*data_len < sizeof *args)
		return EINVAL;

	if (mp->mnt_flag & MNT_GETARGS) {
		ump = VFSTOUFS(mp);
		if (ump == NULL)
			return EIO;
		memset(args, 0, sizeof *args);
		args->fspec = NULL;
		*data_len = sizeof *args;
		return 0;
	}

	update = mp->mnt_flag & MNT_UPDATE;

	/* Check arguments */
	if (args->fspec != NULL) {
		/*
		 * Look up the name and verify that it's sane.
		 */
		error = namei_simple_user(args->fspec,
					NSM_FOLLOW_NOEMULROOT, &devvp);
		if (error != 0)
			return error;

		if (!update) {
			/*
			 * Be sure this is a valid block device
			 */
			if (devvp->v_type != VBLK)
				error = ENOTBLK;
			else if (bdevsw_lookup(devvp->v_rdev) == NULL)
				error = ENXIO;
		} else {
		        /*
			 * Be sure we're still naming the same device
			 * used for our initial mount
			 */
			ump = VFSTOUFS(mp);
			if (devvp != ump->um_devvp) {
				if (devvp->v_rdev != ump->um_devvp->v_rdev)
					error = EINVAL;
				else {
					vrele(devvp);
					devvp = ump->um_devvp;
					vref(devvp);
				}
			}
		}
	} else {
		if (!update) {
			/* New mounts must have a filename for the device */
			return EINVAL;
		} else {
			ump = VFSTOUFS(mp);
			devvp = ump->um_devvp;
			vref(devvp);
		}
	}

	/*
	 * If mount by non-root, then verify that user has necessary
	 * permissions on the device.
	 *
	 * Permission to update a mount is checked higher, so here we presume
	 * updating the mount is okay (for example, as far as securelevel goes)
	 * which leaves us with the normal check.
	 */
	if (error == 0) {
		accessmode = VREAD;
		if (update ?
		    (mp->mnt_iflag & IMNT_WANTRDWR) != 0 :
		    (mp->mnt_flag & MNT_RDONLY) == 0)
			accessmode |= VWRITE;
		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
		error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT,
		    KAUTH_REQ_SYSTEM_MOUNT_DEVICE, mp, devvp,
		    KAUTH_ARG(accessmode));
		VOP_UNLOCK(devvp);
	}

	if (error) {
		vrele(devvp);
		return error;
	}

	if (!update) {
		int xflags;

		if (mp->mnt_flag & MNT_RDONLY)
			xflags = FREAD;
		else
			xflags = FREAD|FWRITE;
		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
		error = VOP_OPEN(devvp, xflags, FSCRED);
		VOP_UNLOCK(devvp);
		if (error)
			goto fail;
		error = ext2fs_mountfs(devvp, mp);
		if (error) {
			vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
			(void)VOP_CLOSE(devvp, xflags, NOCRED);
			VOP_UNLOCK(devvp);
			goto fail;
		}

		ump = VFSTOUFS(mp);
		fs = ump->um_e2fs;
	} else {
		/*
		 * Update the mount.
		 */

		/*
		 * The initial mount got a reference on this
		 * device, so drop the one obtained via
		 * namei(), above.
		 */
		vrele(devvp);

		ump = VFSTOUFS(mp);
		fs = ump->um_e2fs;
		if (fs->e2fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
			/*
			 * Changing from r/w to r/o
			 */
			flags = WRITECLOSE;
			if (mp->mnt_flag & MNT_FORCE)
				flags |= FORCECLOSE;
			error = ext2fs_flushfiles(mp, flags);
			if (error == 0 &&
			    ext2fs_cgupdate(ump, MNT_WAIT) == 0 &&
			    (fs->e2fs.e2fs_state & E2FS_ERRORS) == 0) {
				fs->e2fs.e2fs_state = E2FS_ISCLEAN;
				(void) ext2fs_sbupdate(ump, MNT_WAIT);
			}
			if (error)
				return error;
			fs->e2fs_ronly = 1;
		}

		if (mp->mnt_flag & MNT_RELOAD) {
			error = ext2fs_reload(mp, l->l_cred, l);
			if (error)
				return error;
		}

		if (fs->e2fs_ronly && (mp->mnt_iflag & IMNT_WANTRDWR)) {
			/*
			 * Changing from read-only to read/write
			 */
			fs->e2fs_ronly = 0;
			if (fs->e2fs.e2fs_state == E2FS_ISCLEAN)
				fs->e2fs.e2fs_state = 0;
			else
				fs->e2fs.e2fs_state = E2FS_ERRORS;
			fs->e2fs_fmod = 1;
		}
		if (args->fspec == NULL)
			return 0;
	}

	error = set_statvfs_info(path, UIO_USERSPACE, args->fspec,
	    UIO_USERSPACE, mp->mnt_op->vfs_name, mp, l);
	if (error == 0)
		ext2fs_sb_setmountinfo(fs, mp);

	if (fs->e2fs_fmod != 0) {	/* XXX */
		fs->e2fs_fmod = 0;
		if (fs->e2fs.e2fs_state == 0)
			fs->e2fs.e2fs_wtime = time_second;
		else
			printf("%s: file system not clean; please fsck(8)\n",
				mp->mnt_stat.f_mntfromname);
		(void) ext2fs_cgupdate(ump, MNT_WAIT);
	}
	return error;

fail:
	vrele(devvp);
	return error;
}
Ejemplo n.º 24
0
/*
 * mp - path - addr in user space of mount point (ie /usr or whatever)
 * data - addr in user space of mount params including the name of the block
 * special file to treat as a filesystem.
 */
int
msdosfs_mount(struct mount *mp, const char *path, void *data, size_t *data_len)
{
	struct lwp *l = curlwp;
	struct vnode *devvp;	  /* vnode for blk device to mount */
	struct msdosfs_args *args = data; /* holds data from mount request */
	/* msdosfs specific mount control block */
	struct msdosfsmount *pmp = NULL;
	int error, flags;
	mode_t accessmode;

	if (*data_len < sizeof *args)
		return EINVAL;

	if (mp->mnt_flag & MNT_GETARGS) {
		pmp = VFSTOMSDOSFS(mp);
		if (pmp == NULL)
			return EIO;
		args->fspec = NULL;
		args->uid = pmp->pm_uid;
		args->gid = pmp->pm_gid;
		args->mask = pmp->pm_mask;
		args->flags = pmp->pm_flags;
		args->version = MSDOSFSMNT_VERSION;
		args->dirmask = pmp->pm_dirmask;
		args->gmtoff = pmp->pm_gmtoff;
		*data_len = sizeof *args;
		return 0;
	}

	/*
	 * If not versioned (i.e. using old mount_msdos(8)), fill in
	 * the additional structure items with suitable defaults.
	 */
	if ((args->flags & MSDOSFSMNT_VERSIONED) == 0) {
		args->version = 1;
		args->dirmask = args->mask;
	}

	/*
	 * Reset GMT offset for pre-v3 mount structure args.
	 */
	if (args->version < 3)
		args->gmtoff = 0;

	/*
	 * If updating, check whether changing from read-only to
	 * read/write; if there is no device name, that's all we do.
	 */
	if (mp->mnt_flag & MNT_UPDATE) {
		pmp = VFSTOMSDOSFS(mp);
		error = 0;
		if (!(pmp->pm_flags & MSDOSFSMNT_RONLY) &&
		    (mp->mnt_flag & MNT_RDONLY)) {
			flags = WRITECLOSE;
			if (mp->mnt_flag & MNT_FORCE)
				flags |= FORCECLOSE;
			error = vflush(mp, NULLVP, flags);
		}
		if (!error && (mp->mnt_flag & MNT_RELOAD))
			/* not yet implemented */
			error = EOPNOTSUPP;
		if (error) {
			DPRINTF(("vflush %d\n", error));
			return (error);
		}
		if ((pmp->pm_flags & MSDOSFSMNT_RONLY) &&
		    (mp->mnt_iflag & IMNT_WANTRDWR)) {
			/*
			 * If upgrade to read-write by non-root, then verify
			 * that user has necessary permissions on the device.
			 *
			 * Permission to update a mount is checked higher, so
			 * here we presume updating the mount is okay (for
			 * example, as far as securelevel goes) which leaves us
			 * with the normal check.
			 */
			devvp = pmp->pm_devvp;
			vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
			error = kauth_authorize_system(l->l_cred,
			    KAUTH_SYSTEM_MOUNT, KAUTH_REQ_SYSTEM_MOUNT_DEVICE,
			    mp, devvp, KAUTH_ARG(VREAD | VWRITE));
			VOP_UNLOCK(devvp);
			DPRINTF(("KAUTH_REQ_SYSTEM_MOUNT_DEVICE %d\n", error));
			if (error)
				return (error);

			pmp->pm_flags &= ~MSDOSFSMNT_RONLY;
		}
		if (args->fspec == NULL) {
			DPRINTF(("missing fspec\n"));
			return EINVAL;
		}
	}
	/*
	 * Not an update, or updating the name: look up the name
	 * and verify that it refers to a sensible block device.
	 */
	error = namei_simple_user(args->fspec,
				NSM_FOLLOW_NOEMULROOT, &devvp);
	if (error != 0) {
		DPRINTF(("namei %d\n", error));
		return (error);
	}

	if (devvp->v_type != VBLK) {
		DPRINTF(("not block\n"));
		vrele(devvp);
		return (ENOTBLK);
	}
	if (bdevsw_lookup(devvp->v_rdev) == NULL) {
		DPRINTF(("no block switch\n"));
		vrele(devvp);
		return (ENXIO);
	}
	/*
	 * If mount by non-root, then verify that user has necessary
	 * permissions on the device.
	 */
	accessmode = VREAD;
	if ((mp->mnt_flag & MNT_RDONLY) == 0)
		accessmode |= VWRITE;
	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
	error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT,
	    KAUTH_REQ_SYSTEM_MOUNT_DEVICE, mp, devvp, KAUTH_ARG(accessmode));
	VOP_UNLOCK(devvp);
	if (error) {
		DPRINTF(("KAUTH_REQ_SYSTEM_MOUNT_DEVICE %d\n", error));
		vrele(devvp);
		return (error);
	}
	if ((mp->mnt_flag & MNT_UPDATE) == 0) {
		int xflags;

		if (mp->mnt_flag & MNT_RDONLY)
			xflags = FREAD;
		else
			xflags = FREAD|FWRITE;
		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
		error = VOP_OPEN(devvp, xflags, FSCRED);
		VOP_UNLOCK(devvp);
		if (error) {
			DPRINTF(("VOP_OPEN %d\n", error));
			goto fail;
		}
		error = msdosfs_mountfs(devvp, mp, l, args);
		if (error) {
			DPRINTF(("msdosfs_mountfs %d\n", error));
			vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
			(void) VOP_CLOSE(devvp, xflags, NOCRED);
			VOP_UNLOCK(devvp);
			goto fail;
		}
#ifdef MSDOSFS_DEBUG		/* only needed for the printf below */
		pmp = VFSTOMSDOSFS(mp);
#endif
	} else {
		vrele(devvp);
		if (devvp != pmp->pm_devvp) {
			DPRINTF(("devvp %p pmp %p\n", 
			    devvp, pmp->pm_devvp));
			return (EINVAL);	/* needs translation */
		}
	}
	if ((error = update_mp(mp, args)) != 0) {
		msdosfs_unmount(mp, MNT_FORCE);
		DPRINTF(("update_mp %d\n", error));
		return error;
	}

#ifdef MSDOSFS_DEBUG
	printf("msdosfs_mount(): mp %p, pmp %p, inusemap %p\n", mp, pmp, pmp->pm_inusemap);
#endif
	return set_statvfs_info(path, UIO_USERSPACE, args->fspec, UIO_USERSPACE,
	    mp->mnt_op->vfs_name, mp, l);

fail:
	vrele(devvp);
	return (error);
}
Ejemplo n.º 25
0
static int 
quota_handle_cmd_clear(struct mount *mp, struct lwp *l, 
    prop_dictionary_t cmddict, int type, prop_array_t datas)
{
	prop_array_t replies;
	prop_object_iterator_t iter;
	prop_dictionary_t data;
	uint32_t id;
	struct ufsmount *ump = VFSTOUFS(mp);
	int error, defaultq = 0;
	const char *idstr;

	if ((ump->um_flags & UFS_QUOTA2) == 0)
		return EOPNOTSUPP;
	
	replies = prop_array_create();
	if (replies == NULL)
		return ENOMEM;

	iter = prop_array_iterator(datas);
	if (iter == NULL) {
		prop_object_release(replies);
		return ENOMEM;
	}
	while ((data = prop_object_iterator_next(iter)) != NULL) {
		if (!prop_dictionary_get_uint32(data, "id", &id)) {
			if (!prop_dictionary_get_cstring_nocopy(data, "id",
			    &idstr))
				continue;
			if (strcmp(idstr, "default"))
				continue;
			id = 0;
			defaultq = 1;
		} else {
			defaultq = 0;
		}
		error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_FS_QUOTA,
		    KAUTH_REQ_SYSTEM_FS_QUOTA_MANAGE, mp, KAUTH_ARG(id), NULL);
		if (error != 0)
			goto err;
#ifdef QUOTA2
		if (ump->um_flags & UFS_QUOTA2) {
			error = quota2_handle_cmd_clear(ump, type, id, defaultq,
			    data);
		} else
#endif
			panic("quota_handle_cmd_get: no support ?");
		
		if (error && error != ENOENT)
			goto err;
	}
	prop_object_iterator_release(iter);
	if (!prop_dictionary_set_and_rel(cmddict, "data", replies)) {
		error = ENOMEM;
	} else {
		error = 0;
	}
	return error;
err:
	prop_object_iterator_release(iter);
	prop_object_release(replies);
	return error;
}
Ejemplo n.º 26
0
/*
 * Mount umap layer
 */
int
umapfs_mount(struct mount *mp, const char *path, void *data, size_t *data_len)
{
	struct lwp *l = curlwp;
	struct pathbuf *pb;
	struct nameidata nd;
	struct umap_args *args = data;
	struct vnode *lowerrootvp, *vp;
	struct umap_mount *amp;
	int error;
#ifdef UMAPFS_DIAGNOSTIC
	int i;
#endif

	if (args == NULL)
		return EINVAL;
	if (*data_len < sizeof *args)
		return EINVAL;

	if (mp->mnt_flag & MNT_GETARGS) {
		amp = MOUNTTOUMAPMOUNT(mp);
		if (amp == NULL)
			return EIO;
		args->la.target = NULL;
		args->nentries = amp->info_nentries;
		args->gnentries = amp->info_gnentries;
		*data_len = sizeof *args;
		return 0;
	}

	/* only for root */
	error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT,
	    KAUTH_REQ_SYSTEM_MOUNT_UMAP, NULL, NULL, NULL);
	if (error)
		return error;

#ifdef UMAPFS_DIAGNOSTIC
	printf("umapfs_mount(mp = %p)\n", mp);
#endif

	/*
	 * Update is not supported
	 */
	if (mp->mnt_flag & MNT_UPDATE)
		return EOPNOTSUPP;

	/*
	 * Find lower node
	 */
	error = pathbuf_copyin(args->umap_target, &pb);
	if (error) {
		return error;
	}
	NDINIT(&nd, LOOKUP, FOLLOW|LOCKLEAF, pb);
	if ((error = namei(&nd)) != 0) {
		pathbuf_destroy(pb);
		return error;
	}

	/*
	 * Sanity check on lower vnode
	 */
	lowerrootvp = nd.ni_vp;
	pathbuf_destroy(pb);
#ifdef UMAPFS_DIAGNOSTIC
	printf("vp = %p, check for VDIR...\n", lowerrootvp);
#endif

	if (lowerrootvp->v_type != VDIR) {
		vput(lowerrootvp);
		return (EINVAL);
	}

#ifdef UMAPFS_DIAGNOSTIC
	printf("mp = %p\n", mp);
#endif

	amp = kmem_zalloc(sizeof(struct umap_mount), KM_SLEEP);
	mp->mnt_data = amp;
	amp->umapm_vfs = lowerrootvp->v_mount;
	if (amp->umapm_vfs->mnt_flag & MNT_LOCAL)
		mp->mnt_flag |= MNT_LOCAL;

	/*
	 * Now copy in the number of entries and maps for umap mapping.
	 */
	if (args->nentries > MAPFILEENTRIES || args->gnentries > GMAPFILEENTRIES) {
		vput(lowerrootvp);
		return (error);
	}

	amp->info_nentries = args->nentries;
	amp->info_gnentries = args->gnentries;
	error = copyin(args->mapdata, amp->info_mapdata,
	    2*sizeof(u_long)*args->nentries);
	if (error) {
		vput(lowerrootvp);
		return (error);
	}

#ifdef UMAPFS_DIAGNOSTIC
	printf("umap_mount:nentries %d\n",args->nentries);
	for (i = 0; i < args->nentries; i++)
		printf("   %ld maps to %ld\n", amp->info_mapdata[i][0],
	 	    amp->info_mapdata[i][1]);
#endif

	error = copyin(args->gmapdata, amp->info_gmapdata,
	    2*sizeof(u_long)*args->gnentries);
	if (error) {
		vput(lowerrootvp);
		return (error);
	}

#ifdef UMAPFS_DIAGNOSTIC
	printf("umap_mount:gnentries %d\n",args->gnentries);
	for (i = 0; i < args->gnentries; i++)
		printf("\tgroup %ld maps to %ld\n",
		    amp->info_gmapdata[i][0],
	 	    amp->info_gmapdata[i][1]);
#endif

	/*
	 * Make sure the mount point's sufficiently initialized
	 * that the node create call will work.
	 */
	vfs_getnewfsid(mp);
	amp->umapm_size = sizeof(struct umap_node);
	amp->umapm_tag = VT_UMAP;
	amp->umapm_bypass = umap_bypass;
	amp->umapm_vnodeop_p = umap_vnodeop_p;

	/*
	 * fix up umap node for root vnode.
	 */
	VOP_UNLOCK(lowerrootvp);
	error = layer_node_create(mp, lowerrootvp, &vp);
	/*
	 * Make sure the node alias worked
	 */
	if (error) {
		vrele(lowerrootvp);
		kmem_free(amp, sizeof(struct umap_mount));
		return error;
	}

	/*
	 * Keep a held reference to the root vnode.
	 * It is vrele'd in umapfs_unmount.
	 */
	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
	vp->v_vflag |= VV_ROOT;
	amp->umapm_rootvp = vp;
	VOP_UNLOCK(vp);

	error = set_statvfs_info(path, UIO_USERSPACE, args->umap_target,
	    UIO_USERSPACE, mp->mnt_op->vfs_name, mp, l);
#ifdef UMAPFS_DIAGNOSTIC
	printf("umapfs_mount: lower %s, alias at %s\n",
		mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntonname);
#endif
	return error;
}
Ejemplo n.º 27
0
int
lfs_bmapv(struct lwp *l, fsid_t *fsidp, BLOCK_INFO *blkiov, int blkcnt)
{
	BLOCK_INFO *blkp;
	IFILE *ifp;
	struct buf *bp;
	struct inode *ip = NULL;
	struct lfs *fs;
	struct mount *mntp;
	struct ulfsmount *ump;
	struct vnode *vp;
	ino_t lastino;
	daddr_t v_daddr;
	int cnt, error;
	int numrefed = 0;

	error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS,
	    KAUTH_REQ_SYSTEM_LFS_BMAPV, NULL, NULL, NULL);
	if (error)
		return (error);

	if ((mntp = vfs_getvfs(fsidp)) == NULL)
		return (ENOENT);

	if ((error = vfs_busy(mntp, NULL)) != 0)
		return (error);

	ump = VFSTOULFS(mntp);
	fs = ump->um_lfs;

	if (fs->lfs_cleaner_thread == NULL)
		fs->lfs_cleaner_thread = curlwp;
	KASSERT(fs->lfs_cleaner_thread == curlwp);

	cnt = blkcnt;

	error = 0;

	/* these were inside the initialization for the for loop */
	vp = NULL;
	v_daddr = LFS_UNUSED_DADDR;
	lastino = LFS_UNUSED_INUM;
	for (blkp = blkiov; cnt--; ++blkp)
	{
		/*
		 * Get the IFILE entry (only once) and see if the file still
		 * exists.
		 */
		if (lastino != blkp->bi_inode) {
			/*
			 * Finish the old file, if there was one.
			 */
			if (vp != NULL) {
				vput(vp);
				vp = NULL;
				numrefed--;
			}

			/*
			 * Start a new file
			 */
			lastino = blkp->bi_inode;
			if (blkp->bi_inode == LFS_IFILE_INUM)
				v_daddr = lfs_sb_getidaddr(fs);
			else {
				LFS_IENTRY(ifp, fs, blkp->bi_inode, bp);
				v_daddr = lfs_if_getdaddr(fs, ifp);
				brelse(bp, 0);
			}
			if (v_daddr == LFS_UNUSED_DADDR) {
				blkp->bi_daddr = LFS_UNUSED_DADDR;
				continue;
			}
			error = lfs_fastvget(mntp, blkp->bi_inode, NULL,
			    LK_SHARED, &vp);
			if (error) {
				DLOG((DLOG_CLEAN, "lfs_bmapv: lfs_fastvget ino"
				      "%d failed with %d",
				      blkp->bi_inode,error));
				KASSERT(vp == NULL);
				continue;
			} else {
				KASSERT(VOP_ISLOCKED(vp));
				numrefed++;
			}
			ip = VTOI(vp);
		} else if (vp == NULL) {
			/*
			 * This can only happen if the vnode is dead.
			 * Keep going.	Note that we DO NOT set the
			 * bi_addr to anything -- if we failed to get
			 * the vnode, for example, we want to assume
			 * conservatively that all of its blocks *are*
			 * located in the segment in question.
			 * lfs_markv will throw them out if we are
			 * wrong.
			 */
			continue;
		}

		/* Past this point we are guaranteed that vp, ip are valid. */

		if (blkp->bi_lbn == LFS_UNUSED_LBN) {
			/*
			 * We just want the inode address, which is
			 * conveniently in v_daddr.
			 */
			blkp->bi_daddr = v_daddr;
		} else {
			daddr_t bi_daddr;

			error = VOP_BMAP(vp, blkp->bi_lbn, NULL,
					 &bi_daddr, NULL);
			if (error)
			{
				blkp->bi_daddr = LFS_UNUSED_DADDR;
				continue;
			}
			blkp->bi_daddr = LFS_DBTOFSB(fs, bi_daddr);
			/* Fill in the block size, too */
			if (blkp->bi_lbn >= 0)
				blkp->bi_size = lfs_blksize(fs, ip, blkp->bi_lbn);
			else
				blkp->bi_size = lfs_sb_getbsize(fs);
		}
	}

	/*
	 * Finish the old file, if there was one.
	 */
	if (vp != NULL) {
		vput(vp);
		vp = NULL;
		numrefed--;
	}

#ifdef DIAGNOSTIC
	if (numrefed != 0)
		panic("lfs_bmapv: numrefed=%d", numrefed);
#endif

	vfs_unbusy(mntp, false, NULL);

	return 0;
}
Ejemplo n.º 28
0
int
adosfs_mount(struct mount *mp, const char *path, void *data, size_t *data_len)
{
	struct lwp *l = curlwp;
	struct vnode *devvp;
	struct adosfs_args *args = data;
	struct adosfsmount *amp;
	int error;
	mode_t accessmode;

	if (*data_len < sizeof *args)
		return EINVAL;

	if (mp->mnt_flag & MNT_GETARGS) {
		amp = VFSTOADOSFS(mp);
		if (amp == NULL)
			return EIO;
		args->uid = amp->uid;
		args->gid = amp->gid;
		args->mask = amp->mask;
		args->fspec = NULL;
		*data_len = sizeof *args;
		return 0;
	}

	if ((mp->mnt_flag & MNT_RDONLY) == 0)
		return (EROFS);

	if ((mp->mnt_flag & MNT_UPDATE) && args->fspec == NULL)
		return EOPNOTSUPP;

	/*
	 * Not an update, or updating the name: look up the name
	 * and verify that it refers to a sensible block device.
	 */
	error = namei_simple_user(args->fspec,
				NSM_FOLLOW_NOEMULROOT, &devvp);
	if (error != 0)
		return (error);

	if (devvp->v_type != VBLK) {
		vrele(devvp);
		return (ENOTBLK);
	}
	if (bdevsw_lookup(devvp->v_rdev) == NULL) {
		vrele(devvp);
		return (ENXIO);
	}
	/*
	 * If mount by non-root, then verify that user has necessary
	 * permissions on the device.
	 */
	accessmode = VREAD;
	if ((mp->mnt_flag & MNT_RDONLY) == 0)
		accessmode |= VWRITE;
	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
	error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT,
	    KAUTH_REQ_SYSTEM_MOUNT_DEVICE, mp, devvp, KAUTH_ARG(accessmode));
	VOP_UNLOCK(devvp);
	if (error) {
		vrele(devvp);
		return (error);
	}
/* MNT_UPDATE? */
	if ((error = adosfs_mountfs(devvp, mp, l)) != 0) {
		vrele(devvp);
		return (error);
	}
	amp = VFSTOADOSFS(mp);
	amp->uid = args->uid;
	amp->gid = args->gid;
	amp->mask = args->mask;
	return set_statvfs_info(path, UIO_USERSPACE, args->fspec, UIO_USERSPACE,
	    mp->mnt_op->vfs_name, mp, l);
}
Ejemplo n.º 29
0
int
lfs_markv(struct lwp *l, fsid_t *fsidp, BLOCK_INFO *blkiov,
    int blkcnt)
{
	BLOCK_INFO *blkp;
	IFILE *ifp;
	struct buf *bp;
	struct inode *ip = NULL;
	struct lfs *fs;
	struct mount *mntp;
	struct ulfsmount *ump;
	struct vnode *vp;
	ino_t lastino;
	daddr_t b_daddr;
	int cnt, error;
	int do_again = 0;
	int numrefed = 0;
	ino_t maxino;
	size_t obsize;

	/* number of blocks/inodes that we have already bwrite'ed */
	int nblkwritten, ninowritten;

	error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS,
	    KAUTH_REQ_SYSTEM_LFS_MARKV, NULL, NULL, NULL);
	if (error)
		return (error);

	if ((mntp = vfs_getvfs(fsidp)) == NULL)
		return (ENOENT);

	ump = VFSTOULFS(mntp);
	fs = ump->um_lfs;

	if (fs->lfs_ronly)
		return EROFS;

	maxino = (lfs_fragstoblks(fs, lfs_dino_getblocks(fs, VTOI(fs->lfs_ivnode)->i_din)) -
		      lfs_sb_getcleansz(fs) - lfs_sb_getsegtabsz(fs)) * lfs_sb_getifpb(fs);

	cnt = blkcnt;

	if ((error = vfs_busy(mntp, NULL)) != 0)
		return (error);

	/*
	 * This seglock is just to prevent the fact that we might have to sleep
	 * from allowing the possibility that our blocks might become
	 * invalid.
	 *
	 * It is also important to note here that unless we specify SEGM_CKP,
	 * any Ifile blocks that we might be asked to clean will never get
	 * to the disk.
	 */
	lfs_seglock(fs, SEGM_CLEAN | SEGM_CKP | SEGM_SYNC);

	/* Mark blocks/inodes dirty.  */
	error = 0;

	/* these were inside the initialization for the for loop */
	vp = NULL;
	lastino = LFS_UNUSED_INUM;
	nblkwritten = ninowritten = 0;
	for (blkp = blkiov; cnt--; ++blkp)
	{
		/* Bounds-check incoming data, avoid panic for failed VGET */
		if (blkp->bi_inode <= 0 || blkp->bi_inode >= maxino) {
			error = EINVAL;
			goto err3;
		}
		/*
		 * Get the IFILE entry (only once) and see if the file still
		 * exists.
		 */
		if (lastino != blkp->bi_inode) {
			/*
			 * Finish the old file, if there was one.
			 */
			if (vp != NULL) {
				vput(vp);
				vp = NULL;
				numrefed--;
			}

			/*
			 * Start a new file
			 */
			lastino = blkp->bi_inode;

			/* Get the vnode/inode. */
			error = lfs_fastvget(mntp, blkp->bi_inode, blkp,
			    LK_EXCLUSIVE | LK_NOWAIT, &vp);
			if (error) {
				DLOG((DLOG_CLEAN, "lfs_markv: lfs_fastvget"
				      " failed with %d (ino %d, segment %d)\n",
				      error, blkp->bi_inode,
				      lfs_dtosn(fs, blkp->bi_daddr)));
				/*
				 * If we got EAGAIN, that means that the
				 * Inode was locked.  This is
				 * recoverable: just clean the rest of
				 * this segment, and let the cleaner try
				 * again with another.	(When the
				 * cleaner runs again, this segment will
				 * sort high on the list, since it is
				 * now almost entirely empty.)
				 */
				if (error == EAGAIN) {
					error = 0;
					do_again++;
				} else
					KASSERT(error == ENOENT);
				KASSERT(vp == NULL);
				ip = NULL;
				continue;
			}

			ip = VTOI(vp);
			numrefed++;
			ninowritten++;
		} else if (vp == NULL) {
			/*
			 * This can only happen if the vnode is dead (or
			 * in any case we can't get it...e.g., it is
			 * inlocked).  Keep going.
			 */
			continue;
		}

		/* Past this point we are guaranteed that vp, ip are valid. */

		/* Can't clean VU_DIROP directories in case of truncation */
		/* XXX - maybe we should mark removed dirs specially? */
		if (vp->v_type == VDIR && (vp->v_uflag & VU_DIROP)) {
			do_again++;
			continue;
		}

		/* If this BLOCK_INFO didn't contain a block, keep going. */
		if (blkp->bi_lbn == LFS_UNUSED_LBN) {
			/* XXX need to make sure that the inode gets written in this case */
			/* XXX but only write the inode if it's the right one */
			if (blkp->bi_inode != LFS_IFILE_INUM) {
				LFS_IENTRY(ifp, fs, blkp->bi_inode, bp);
				if (lfs_if_getdaddr(fs, ifp) == blkp->bi_daddr) {
					mutex_enter(&lfs_lock);
					LFS_SET_UINO(ip, IN_CLEANING);
					mutex_exit(&lfs_lock);
				}
				brelse(bp, 0);
			}
			continue;
		}

		b_daddr = 0;
		if (VOP_BMAP(vp, blkp->bi_lbn, NULL, &b_daddr, NULL) ||
		    LFS_DBTOFSB(fs, b_daddr) != blkp->bi_daddr)
		{
			if (lfs_dtosn(fs, LFS_DBTOFSB(fs, b_daddr)) ==
			    lfs_dtosn(fs, blkp->bi_daddr))
			{
				DLOG((DLOG_CLEAN, "lfs_markv: wrong da same seg: %jx vs %jx\n",
				      (intmax_t)blkp->bi_daddr, (intmax_t)LFS_DBTOFSB(fs, b_daddr)));
			}
			do_again++;
			continue;
		}

		/*
		 * Check block sizes.  The blocks being cleaned come from
		 * disk, so they should have the same size as their on-disk
		 * counterparts.
		 */
		if (blkp->bi_lbn >= 0)
			obsize = lfs_blksize(fs, ip, blkp->bi_lbn);
		else
			obsize = lfs_sb_getbsize(fs);
		/* Check for fragment size change */
		if (blkp->bi_lbn >= 0 && blkp->bi_lbn < ULFS_NDADDR) {
			obsize = ip->i_lfs_fragsize[blkp->bi_lbn];
		}
		if (obsize != blkp->bi_size) {
			DLOG((DLOG_CLEAN, "lfs_markv: ino %d lbn %jd wrong"
			      " size (%ld != %d), try again\n",
			      blkp->bi_inode, (intmax_t)blkp->bi_lbn,
			      (long) obsize, blkp->bi_size));
			do_again++;
			continue;
		}

		/*
		 * If we get to here, then we are keeping the block.  If
		 * it is an indirect block, we want to actually put it
		 * in the buffer cache so that it can be updated in the
		 * finish_meta section.	 If it's not, we need to
		 * allocate a fake buffer so that writeseg can perform
		 * the copyin and write the buffer.
		 */
		if (ip->i_number != LFS_IFILE_INUM && blkp->bi_lbn >= 0) {
			/* Data Block */
			bp = lfs_fakebuf(fs, vp, blkp->bi_lbn,
					 blkp->bi_size, blkp->bi_bp);
			/* Pretend we used bread() to get it */
			bp->b_blkno = LFS_FSBTODB(fs, blkp->bi_daddr);
		} else {
			/* Indirect block or ifile */
			if (blkp->bi_size != lfs_sb_getbsize(fs) &&
			    ip->i_number != LFS_IFILE_INUM)
				panic("lfs_markv: partial indirect block?"
				    " size=%d\n", blkp->bi_size);
			bp = getblk(vp, blkp->bi_lbn, blkp->bi_size, 0, 0);
			if (!(bp->b_oflags & (BO_DONE|BO_DELWRI))) {
				/*
				 * The block in question was not found
				 * in the cache; i.e., the block that
				 * getblk() returned is empty.	So, we
				 * can (and should) copy in the
				 * contents, because we've already
				 * determined that this was the right
				 * version of this block on disk.
				 *
				 * And, it can't have changed underneath
				 * us, because we have the segment lock.
				 */
				error = copyin(blkp->bi_bp, bp->b_data, blkp->bi_size);
				if (error)
					goto err2;
			}
		}
		if ((error = lfs_bwrite_ext(bp, BW_CLEAN)) != 0)
			goto err2;

		nblkwritten++;
		/*
		 * XXX should account indirect blocks and ifile pages as well
		 */
		if (nblkwritten + lfs_lblkno(fs, ninowritten * DINOSIZE(fs))
		    > LFS_MARKV_MAX_BLOCKS) {
			DLOG((DLOG_CLEAN, "lfs_markv: writing %d blks %d inos\n",
			      nblkwritten, ninowritten));
			lfs_segwrite(mntp, SEGM_CLEAN);
			nblkwritten = ninowritten = 0;
		}
	}

	/*
	 * Finish the old file, if there was one
	 */
	if (vp != NULL) {
		vput(vp);
		vp = NULL;
		numrefed--;
	}

#ifdef DIAGNOSTIC
	if (numrefed != 0)
		panic("lfs_markv: numrefed=%d", numrefed);
#endif
	DLOG((DLOG_CLEAN, "lfs_markv: writing %d blks %d inos (check point)\n",
	      nblkwritten, ninowritten));

	/*
	 * The last write has to be SEGM_SYNC, because of calling semantics.
	 * It also has to be SEGM_CKP, because otherwise we could write
	 * over the newly cleaned data contained in a checkpoint, and then
	 * we'd be unhappy at recovery time.
	 */
	lfs_segwrite(mntp, SEGM_CLEAN | SEGM_CKP | SEGM_SYNC);

	lfs_segunlock(fs);

	vfs_unbusy(mntp, false, NULL);
	if (error)
		return (error);
	else if (do_again)
		return EAGAIN;

	return 0;

err2:
	DLOG((DLOG_CLEAN, "lfs_markv err2\n"));

	/*
	 * XXX we're here because copyin() failed.
	 * XXX it means that we can't trust the cleanerd.  too bad.
	 * XXX how can we recover from this?
	 */

err3:
	/*
	 * XXX should do segwrite here anyway?
	 */

	if (vp != NULL) {
		vput(vp);
		vp = NULL;
		--numrefed;
	}

	lfs_segunlock(fs);
	vfs_unbusy(mntp, false, NULL);
#ifdef DIAGNOSTIC
	if (numrefed != 0)
		panic("lfs_markv: numrefed=%d", numrefed);
#endif

	return (error);
}
Ejemplo n.º 30
0
int
v7fs_mount(struct mount *mp, const char *path, void *data, size_t *data_len)
{
    struct lwp *l = curlwp;
    struct v7fs_args *args = data;
    struct v7fs_mount *v7fsmount = (void *)mp->mnt_data;
    struct vnode *devvp = NULL;
    int error = 0;
    bool update = mp->mnt_flag & MNT_UPDATE;

    DPRINTF("mnt_flag=%x %s\n", mp->mnt_flag, update ? "update" : "");

    if (*data_len < sizeof(*args))
        return EINVAL;

    if (mp->mnt_flag & MNT_GETARGS) {
        if (!v7fsmount)
            return EIO;
        args->fspec = NULL;
        args->endian = v7fsmount->core->endian;
        *data_len = sizeof(*args);
        return 0;
    }

    DPRINTF("args->fspec=%s endian=%d\n", args->fspec, args->endian);
    if (args->fspec == NULL) {
        /* nothing to do. */
        return EINVAL;
    }

    if (args->fspec != NULL) {
        /* Look up the name and verify that it's sane. */
        error = namei_simple_user(args->fspec,
                                  NSM_FOLLOW_NOEMULROOT, &devvp);
        if (error != 0)
            return (error);
        DPRINTF("mount device=%lx\n", (long)devvp->v_rdev);

        if (!update) {
            /*
             * Be sure this is a valid block device
             */
            if (devvp->v_type != VBLK)
                error = ENOTBLK;
            else if (bdevsw_lookup(devvp->v_rdev) == NULL)
                error = ENXIO;
        } else {
            KDASSERT(v7fsmount);
            /*
             * Be sure we're still naming the same device
             * used for our initial mount
             */
            if (devvp != v7fsmount->devvp) {
                DPRINTF("devvp %p != %p rootvp=%p\n", devvp,
                        v7fsmount->devvp, rootvp);
                if (rootvp == v7fsmount->devvp) {
                    vrele(devvp);
                    devvp = rootvp;
                    vref(devvp);
                } else {
                    error = EINVAL;
                }
            }
        }
    }

    /*
     * If mount by non-root, then verify that user has necessary
     * permissions on the device.
     *
     * Permission to update a mount is checked higher, so here we presume
     * updating the mount is okay (for example, as far as securelevel goes)
     * which leaves us with the normal check.
     */
    if (error == 0) {
        int accessmode = VREAD;
        if (update ?
                (mp->mnt_iflag & IMNT_WANTRDWR) != 0 :
                (mp->mnt_flag & MNT_RDONLY) == 0)
            accessmode |= VWRITE;
        error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT,
                                       KAUTH_REQ_SYSTEM_MOUNT_DEVICE, mp, devvp,
                                       KAUTH_ARG(accessmode));
    }

    if (error) {
        vrele(devvp);
        return error;
    }

    if (!update) {
        if ((error = v7fs_openfs(devvp, mp, l))) {
            vrele(devvp);
            return error;
        }

        if ((error = v7fs_mountfs(devvp, mp, args->endian))) {
            v7fs_closefs(devvp, mp);
            VOP_UNLOCK(devvp);
            vrele(devvp);
            return error;
        }
        VOP_UNLOCK(devvp);
    } else 	if (mp->mnt_flag & MNT_RDONLY) {
        /* XXX: r/w -> read only */
    }

    return set_statvfs_info(path, UIO_USERSPACE, args->fspec, UIO_USERSPACE,
                            mp->mnt_op->vfs_name, mp, l);
}