Exemple #1
0
static int
getkaudit(caddr_t info_p, int len)
{
	STRUCT_DECL(auditinfo_addr, info);
	model_t model;
	au_kcontext_t	*kctx = GET_KCTX_PZ;

	model = get_udatamodel();
	STRUCT_INIT(info, model);

	if (len < STRUCT_SIZE(info))
		return (EOVERFLOW);

	STRUCT_FSET(info, ai_auid, kctx->auk_info.ai_auid);
	STRUCT_FSET(info, ai_mask, kctx->auk_info.ai_namask);
#ifdef _LP64
	if (model == DATAMODEL_ILP32) {
		dev32_t dev;
		/* convert internal 64 bit form to 32 bit version */
		if (cmpldev(&dev, kctx->auk_info.ai_termid.at_port) == 0) {
			return (EOVERFLOW);
		}
		STRUCT_FSET(info, ai_termid.at_port, dev);
	} else {
		STRUCT_FSET(info, ai_termid.at_port,
		    kctx->auk_info.ai_termid.at_port);
	}
#else
	STRUCT_FSET(info, ai_termid.at_port,
	    kctx->auk_info.ai_termid.at_port);
#endif
	STRUCT_FSET(info, ai_termid.at_type,
	    kctx->auk_info.ai_termid.at_type);
	STRUCT_FSET(info, ai_termid.at_addr[0],
	    kctx->auk_info.ai_termid.at_addr[0]);
	STRUCT_FSET(info, ai_termid.at_addr[1],
	    kctx->auk_info.ai_termid.at_addr[1]);
	STRUCT_FSET(info, ai_termid.at_addr[2],
	    kctx->auk_info.ai_termid.at_addr[2]);
	STRUCT_FSET(info, ai_termid.at_addr[3],
	    kctx->auk_info.ai_termid.at_addr[3]);
	STRUCT_FSET(info, ai_asid, kctx->auk_info.ai_asid);

	if (copyout(STRUCT_BUF(info), info_p, STRUCT_SIZE(info)))
		return (EFAULT);

	return (0);
}
Exemple #2
0
/*
 * Get the audit state information from the current process.
 * Return EFAULT if copyout fails.
 */
int
getaudit_addr(caddr_t info_p, int len)
{
	STRUCT_DECL(auditinfo_addr, info);
	const auditinfo_addr_t	*ainfo;
	model_t	model;

	if (secpolicy_audit_getattr(CRED(), B_FALSE) != 0)
		return (EPERM);

	model = get_udatamodel();
	STRUCT_INIT(info, model);

	if (len < STRUCT_SIZE(info))
		return (EOVERFLOW);

	ainfo = crgetauinfo(CRED());

	if (ainfo == NULL)
		return (EINVAL);

	STRUCT_FSET(info, ai_auid, ainfo->ai_auid);
	STRUCT_FSET(info, ai_mask, ainfo->ai_mask);
#ifdef _LP64
	if (model == DATAMODEL_ILP32) {
		dev32_t dev;
		/* convert internal 64 bit form to 32 bit version */
		if (cmpldev(&dev, ainfo->ai_termid.at_port) == 0) {
			return (EOVERFLOW);
		}
		STRUCT_FSET(info, ai_termid.at_port, dev);
	} else
		STRUCT_FSET(info, ai_termid.at_port, ainfo->ai_termid.at_port);
#else
	STRUCT_FSET(info, ai_termid.at_port, ainfo->ai_termid.at_port);
#endif
	STRUCT_FSET(info, ai_termid.at_type, ainfo->ai_termid.at_type);
	STRUCT_FSET(info, ai_termid.at_addr[0], ainfo->ai_termid.at_addr[0]);
	STRUCT_FSET(info, ai_termid.at_addr[1], ainfo->ai_termid.at_addr[1]);
	STRUCT_FSET(info, ai_termid.at_addr[2], ainfo->ai_termid.at_addr[2]);
	STRUCT_FSET(info, ai_termid.at_addr[3], ainfo->ai_termid.at_addr[3]);
	STRUCT_FSET(info, ai_asid, ainfo->ai_asid);

	if (copyout(STRUCT_BUF(info), info_p, STRUCT_SIZE(info)))
		return (EFAULT);

	return (0);
}
/*
 * Get the audit state information from the current process.
 * Return EFAULT if copyout fails.
 */
static int
getaudit(caddr_t info_p)
{
	STRUCT_DECL(auditinfo, info);
	const auditinfo_addr_t	*ainfo;
	model_t	model;

	if (secpolicy_audit_getattr(CRED()) != 0)
		return (EPERM);

	model = get_udatamodel();
	STRUCT_INIT(info, model);

	ainfo = crgetauinfo(CRED());
	if (ainfo == NULL)
		return (EINVAL);

	/* trying to read a process with an IPv6 address? */
	if (ainfo->ai_termid.at_type == AU_IPv6)
		return (EOVERFLOW);

	STRUCT_FSET(info, ai_auid, ainfo->ai_auid);
	STRUCT_FSET(info, ai_mask, ainfo->ai_mask);
#ifdef _LP64
	if (model == DATAMODEL_ILP32) {
		dev32_t dev;
		/* convert internal 64 bit form to 32 bit version */
		if (cmpldev(&dev, ainfo->ai_termid.at_port) == 0) {
			return (EOVERFLOW);
		}
		STRUCT_FSET(info, ai_termid.port, dev);
	} else
		STRUCT_FSET(info, ai_termid.port, ainfo->ai_termid.at_port);
#else
	STRUCT_FSET(info, ai_termid.port, ainfo->ai_termid.at_port);
#endif
	STRUCT_FSET(info, ai_termid.machine, ainfo->ai_termid.at_addr[0]);
	STRUCT_FSET(info, ai_asid, ainfo->ai_asid);

	if (copyout(STRUCT_BUF(info), info_p, STRUCT_SIZE(info)))
		return (EFAULT);

	return (0);
}
Exemple #4
0
static int
getqctrl(caddr_t data)
{
	au_kcontext_t	*kctx = GET_KCTX_PZ;
	STRUCT_DECL(au_qctrl, qctrl);
	STRUCT_INIT(qctrl, get_udatamodel());

	mutex_enter(&(kctx->auk_queue.lock));
	STRUCT_FSET(qctrl, aq_hiwater, kctx->auk_queue.hiwater);
	STRUCT_FSET(qctrl, aq_lowater, kctx->auk_queue.lowater);
	STRUCT_FSET(qctrl, aq_bufsz, kctx->auk_queue.bufsz);
	STRUCT_FSET(qctrl, aq_delay, kctx->auk_queue.delay);
	mutex_exit(&(kctx->auk_queue.lock));

	if (copyout(STRUCT_BUF(qctrl), data, STRUCT_SIZE(qctrl)))
		return (EFAULT);

	return (0);
}
Exemple #5
0
/*ARGSUSED*/
static int
kcpc_ioctl(dev_t dev, int cmd, intptr_t data, int flags, cred_t *cr, int *rvp)
{
	kthread_t	*t = curthread;
	processorid_t	cpuid;
	void		*udata1 = NULL;
	void		*udata2 = NULL;
	void		*udata3 = NULL;
	int		error;
	int		code;

	STRUCT_DECL(__cpc_args, args);

	STRUCT_INIT(args, flags);

	if (curthread->t_bind_cpu != getminor(dev))
		return (EAGAIN);  /* someone unbound it? */

	cpuid = getminor(dev);

	if (cmd == CPCIO_BIND || cmd == CPCIO_SAMPLE) {
		if (copyin((void *)data, STRUCT_BUF(args),
		    STRUCT_SIZE(args)) == -1)
			return (EFAULT);

		udata1 = STRUCT_FGETP(args, udata1);
		udata2 = STRUCT_FGETP(args, udata2);
		udata3 = STRUCT_FGETP(args, udata3);
	}

	switch (cmd) {
	case CPCIO_BIND:
		/*
		 * udata1 = pointer to packed nvlist buffer
		 * udata2 = size of packed nvlist buffer
		 * udata3 = User addr to return error subcode in.
		 */
		if (t->t_cpc_set != NULL) {
			(void) kcpc_unbind(t->t_cpc_set);
			ASSERT(t->t_cpc_set == NULL);
		}

		if ((error = kcpc_copyin_set(&t->t_cpc_set, udata1,
		    (size_t)udata2)) != 0) {
			return (error);
		}

		if ((error = kcpc_verify_set(t->t_cpc_set)) != 0) {
			kcpc_free_set(t->t_cpc_set);
			t->t_cpc_set = NULL;
			if (copyout(&error, udata3, sizeof (error)) == -1)
				return (EFAULT);
			return (EINVAL);
		}

		if ((error = kcpc_bind_cpu(t->t_cpc_set, cpuid, &code)) != 0) {
			kcpc_free_set(t->t_cpc_set);
			t->t_cpc_set = NULL;
			/*
			 * Subcodes are only returned for EINVAL and EACCESS.
			 */
			if ((error == EINVAL || error == EACCES) &&
			    copyout(&code, udata3, sizeof (code)) == -1)
				return (EFAULT);
			return (error);
		}

		return (0);
	case CPCIO_SAMPLE:
		/*
		 * udata1 = pointer to user's buffer
		 * udata2 = pointer to user's hrtime
		 * udata3 = pointer to user's tick
		 */
		/*
		 * Only CPU-bound sets may be sampled via the ioctl(). If this
		 * set has no CPU-bound context, return an error.
		 */
		if (t->t_cpc_set == NULL)
			return (EINVAL);
		if ((error = kcpc_sample(t->t_cpc_set, udata1, udata2,
		    udata3)) != 0)
			return (error);
		return (0);
	case CPCIO_RELE:
		if (t->t_cpc_set == NULL)
			return (EINVAL);
		return (kcpc_unbind(t->t_cpc_set));
	default:
		return (EINVAL);
	}
}
Exemple #6
0
static int
setsmask(caddr_t data)
{
	STRUCT_DECL(auditinfo, user_info);
	struct proc *p;
	const auditinfo_addr_t	*ainfo;
	model_t	model;

	/* setsmask not applicable in non-global zones without perzone policy */
	if (!(audit_policy & AUDIT_PERZONE) && (!INGLOBALZONE(curproc)))
		return (EINVAL);

	model = get_udatamodel();
	STRUCT_INIT(user_info, model);

	if (copyin(data, STRUCT_BUF(user_info), STRUCT_SIZE(user_info)))
		return (EFAULT);

	mutex_enter(&pidlock);	/* lock the process queue against updates */
	for (p = practive; p != NULL; p = p->p_next) {
		cred_t	*cr;

		/* if in non-global zone only modify processes in same zone */
		if (!HASZONEACCESS(curproc, p->p_zone->zone_id))
			continue;

		mutex_enter(&p->p_lock);	/* so process doesn't go away */

		/* skip system processes and ones being created or going away */
		if (p->p_stat == SIDL || p->p_stat == SZOMB ||
		    (p->p_flag & (SSYS | SEXITING | SEXITLWPS))) {
			mutex_exit(&p->p_lock);
			continue;
		}

		mutex_enter(&p->p_crlock);
		crhold(cr = p->p_cred);
		mutex_exit(&p->p_crlock);
		ainfo = crgetauinfo(cr);
		if (ainfo == NULL) {
			mutex_exit(&p->p_lock);
			crfree(cr);
			continue;
		}

		if (ainfo->ai_asid == STRUCT_FGET(user_info, ai_asid)) {
			au_mask_t	mask;
			int		err;

			/*
			 * Here's a process which matches the specified asid.
			 * If its mask doesn't already match the new mask,
			 * save the new mask in the pad, to be picked up
			 * next syscall.
			 */
			mask = STRUCT_FGET(user_info, ai_mask);
			err = bcmp(&mask, &ainfo->ai_mask, sizeof (au_mask_t));
			crfree(cr);
			if (err != 0) {
				struct p_audit_data *pad = P2A(p);
				ASSERT(pad != NULL);

				mutex_enter(&(pad->pad_lock));
				pad->pad_flags |= PAD_SETMASK;
				pad->pad_newmask = mask;
				mutex_exit(&(pad->pad_lock));

				/*
				 * No need to call set_proc_pre_sys(), since
				 * t_pre_sys is ALWAYS on when audit is
				 * enabled...due to syscall auditing.
				 */
			}
		} else {
			crfree(cr);
		}
		mutex_exit(&p->p_lock);
	}
	mutex_exit(&pidlock);

	return (0);
}
Exemple #7
0
static int
setqctrl(caddr_t data)
{
	au_kcontext_t	*kctx;
	struct au_qctrl qctrl_tmp;
	STRUCT_DECL(au_qctrl, qctrl);
	STRUCT_INIT(qctrl, get_udatamodel());

	if (!(audit_policy & AUDIT_PERZONE) && !INGLOBALZONE(curproc))
		return (EINVAL);
	kctx = GET_KCTX_NGZ;

	if (copyin(data, STRUCT_BUF(qctrl), STRUCT_SIZE(qctrl)))
		return (EFAULT);

	qctrl_tmp.aq_hiwater = (size_t)STRUCT_FGET(qctrl, aq_hiwater);
	qctrl_tmp.aq_lowater = (size_t)STRUCT_FGET(qctrl, aq_lowater);
	qctrl_tmp.aq_bufsz = (size_t)STRUCT_FGET(qctrl, aq_bufsz);
	qctrl_tmp.aq_delay = (clock_t)STRUCT_FGET(qctrl, aq_delay);

	/* enforce sane values */

	if (qctrl_tmp.aq_hiwater <= qctrl_tmp.aq_lowater)
		return (EINVAL);

	if (qctrl_tmp.aq_hiwater < AQ_LOWATER)
		return (EINVAL);

	if (qctrl_tmp.aq_hiwater > AQ_MAXHIGH)
		return (EINVAL);

	if (qctrl_tmp.aq_bufsz < AQ_BUFSZ)
		return (EINVAL);

	if (qctrl_tmp.aq_bufsz > AQ_MAXBUFSZ)
		return (EINVAL);

	if (qctrl_tmp.aq_delay == 0)
		return (EINVAL);

	if (qctrl_tmp.aq_delay > AQ_MAXDELAY)
		return (EINVAL);

	/* update everything at once so things are consistant */
	mutex_enter(&(kctx->auk_queue.lock));
	kctx->auk_queue.hiwater = qctrl_tmp.aq_hiwater;
	kctx->auk_queue.lowater = qctrl_tmp.aq_lowater;
	kctx->auk_queue.bufsz = qctrl_tmp.aq_bufsz;
	kctx->auk_queue.delay = qctrl_tmp.aq_delay;

	if (kctx->auk_queue.rd_block &&
	    kctx->auk_queue.cnt > kctx->auk_queue.lowater)
		cv_broadcast(&(kctx->auk_queue.read_cv));

	if (kctx->auk_queue.wt_block &&
	    kctx->auk_queue.cnt < kctx->auk_queue.hiwater)
		cv_broadcast(&(kctx->auk_queue.write_cv));

	mutex_exit(&(kctx->auk_queue.lock));

	return (0);
}
Exemple #8
0
/*
 * the host address for AUDIT_PERZONE == 0 is that of the global
 * zone and for local zones it is of the current zone.
 */
static int
setkaudit(caddr_t info_p, int len)
{
	STRUCT_DECL(auditinfo_addr, info);
	model_t model;
	au_kcontext_t	*kctx;

	if (!(audit_policy & AUDIT_PERZONE) && !INGLOBALZONE(curproc))
		return (EINVAL);

	kctx = GET_KCTX_NGZ;

	model = get_udatamodel();
	STRUCT_INIT(info, model);

	if (len < STRUCT_SIZE(info))
		return (EOVERFLOW);

	if (copyin(info_p, STRUCT_BUF(info), STRUCT_SIZE(info)))
		return (EFAULT);

	if ((STRUCT_FGET(info, ai_termid.at_type) != AU_IPv4) &&
	    (STRUCT_FGET(info, ai_termid.at_type) != AU_IPv6))
		return (EINVAL);

	/* Set audit mask, termid and session id as specified */
	kctx->auk_info.ai_auid = STRUCT_FGET(info, ai_auid);
	kctx->auk_info.ai_namask = STRUCT_FGET(info, ai_mask);
#ifdef _LP64
	/* only convert to 64 bit if coming from a 32 bit binary */
	if (model == DATAMODEL_ILP32)
		kctx->auk_info.ai_termid.at_port =
		    DEVEXPL(STRUCT_FGET(info, ai_termid.at_port));
	else
		kctx->auk_info.ai_termid.at_port =
		    STRUCT_FGET(info, ai_termid.at_port);
#else
	kctx->auk_info.ai_termid.at_port = STRUCT_FGET(info, ai_termid.at_port);
#endif
	kctx->auk_info.ai_termid.at_type = STRUCT_FGET(info, ai_termid.at_type);
	bzero(&kctx->auk_info.ai_termid.at_addr[0],
	    sizeof (kctx->auk_info.ai_termid.at_addr));
	kctx->auk_info.ai_termid.at_addr[0] =
	    STRUCT_FGET(info, ai_termid.at_addr[0]);
	kctx->auk_info.ai_termid.at_addr[1] =
	    STRUCT_FGET(info, ai_termid.at_addr[1]);
	kctx->auk_info.ai_termid.at_addr[2] =
	    STRUCT_FGET(info, ai_termid.at_addr[2]);
	kctx->auk_info.ai_termid.at_addr[3] =
	    STRUCT_FGET(info, ai_termid.at_addr[3]);
	kctx->auk_info.ai_asid = STRUCT_FGET(info, ai_asid);

	if (kctx->auk_info.ai_termid.at_type == AU_IPv6 &&
	    IN6_IS_ADDR_V4MAPPED(
	    ((in6_addr_t *)kctx->auk_info.ai_termid.at_addr))) {
		kctx->auk_info.ai_termid.at_type = AU_IPv4;
		kctx->auk_info.ai_termid.at_addr[0] =
		    kctx->auk_info.ai_termid.at_addr[3];
		kctx->auk_info.ai_termid.at_addr[1] = 0;
		kctx->auk_info.ai_termid.at_addr[2] = 0;
		kctx->auk_info.ai_termid.at_addr[3] = 0;
	}
	if (kctx->auk_info.ai_termid.at_type == AU_IPv6)
		kctx->auk_hostaddr_valid = IN6_IS_ADDR_UNSPECIFIED(
		    (in6_addr_t *)kctx->auk_info.ai_termid.at_addr) ? 0 : 1;
	else
		kctx->auk_hostaddr_valid =
		    (kctx->auk_info.ai_termid.at_addr[0] ==
		    htonl(INADDR_ANY)) ? 0 : 1;

	return (0);
}
Exemple #9
0
/*
 * Set the audit state information for the current process.
 * Return EFAULT if copyin fails.
 */
int
setaudit_addr(caddr_t info_p, int len)
{
	STRUCT_DECL(auditinfo_addr, info);
	proc_t *p;
	cred_t	*newcred;
	model_t	model;
	int i;
	int type;
	auditinfo_addr_t *ainfo;

	if (secpolicy_audit_config(CRED()) != 0)
		return (EPERM);

	model = get_udatamodel();
	STRUCT_INIT(info, model);

	if (len < STRUCT_SIZE(info))
		return (EOVERFLOW);

	if (copyin(info_p, STRUCT_BUF(info), STRUCT_SIZE(info)))
		return (EFAULT);

	type = STRUCT_FGET(info, ai_termid.at_type);
	if ((type != AU_IPv4) && (type != AU_IPv6))
		return (EINVAL);

	newcred = cralloc();
	if ((ainfo = crgetauinfo_modifiable(newcred)) == NULL) {
		crfree(newcred);
		return (EINVAL);
	}

	/* grab p_crlock and switch to new cred */
	p = curproc;
	mutex_enter(&p->p_crlock);
	crcopy_to(p->p_cred, newcred);
	p->p_cred = newcred;

	/* Set audit mask, id, termid and session id as specified */
	ainfo->ai_auid = STRUCT_FGET(info, ai_auid);
	ainfo->ai_mask = STRUCT_FGET(info, ai_mask);
#ifdef _LP64
	/* only convert to 64 bit if coming from a 32 bit binary */
	if (model == DATAMODEL_ILP32)
		ainfo->ai_termid.at_port =
		    DEVEXPL(STRUCT_FGET(info, ai_termid.at_port));
	else
		ainfo->ai_termid.at_port = STRUCT_FGET(info, ai_termid.at_port);
#else
	ainfo->ai_termid.at_port = STRUCT_FGET(info, ai_termid.at_port);
#endif
	ainfo->ai_termid.at_type = type;
	bzero(&ainfo->ai_termid.at_addr[0], sizeof (ainfo->ai_termid.at_addr));
	for (i = 0; i < (type/sizeof (int)); i++)
		ainfo->ai_termid.at_addr[i] =
		    STRUCT_FGET(info, ai_termid.at_addr[i]);

	if (ainfo->ai_termid.at_type == AU_IPv6 &&
	    IN6_IS_ADDR_V4MAPPED(((in6_addr_t *)ainfo->ai_termid.at_addr))) {
		ainfo->ai_termid.at_type = AU_IPv4;
		ainfo->ai_termid.at_addr[0] = ainfo->ai_termid.at_addr[3];
		ainfo->ai_termid.at_addr[1] = 0;
		ainfo->ai_termid.at_addr[2] = 0;
		ainfo->ai_termid.at_addr[3] = 0;
	}

	ainfo->ai_asid = STRUCT_FGET(info, ai_asid);

	/* unlock and broadcast the cred changes */
	mutex_exit(&p->p_crlock);
	crset(p, newcred);

	return (0);
}
Exemple #10
0
static int
getpinfo_addr(caddr_t data, int len)
{
	STRUCT_DECL(auditpinfo_addr, apinfo);
	proc_t *proc;
	const auditinfo_addr_t	*ainfo;
	model_t	model;
	cred_t	*cr, *newcred;

	model = get_udatamodel();
	STRUCT_INIT(apinfo, model);

	if (len < STRUCT_SIZE(apinfo))
		return (EOVERFLOW);

	if (copyin(data, STRUCT_BUF(apinfo), STRUCT_SIZE(apinfo)))
		return (EFAULT);

	newcred = cralloc();

	mutex_enter(&pidlock);
	if ((proc = prfind(STRUCT_FGET(apinfo, ap_pid))) == NULL) {
		mutex_exit(&pidlock);
		crfree(newcred);
		return (ESRCH);
	}
	mutex_enter(&proc->p_lock);	/* so process doesn't go away */
	mutex_exit(&pidlock);

	audit_update_context(proc, newcred);	/* make sure it's up-to-date */

	mutex_enter(&proc->p_crlock);
	crhold(cr = proc->p_cred);
	mutex_exit(&proc->p_crlock);
	mutex_exit(&proc->p_lock);

	ainfo = crgetauinfo(cr);
	if (ainfo == NULL) {
		crfree(cr);
		return (EINVAL);
	}

	STRUCT_FSET(apinfo, ap_auid, ainfo->ai_auid);
	STRUCT_FSET(apinfo, ap_asid, ainfo->ai_asid);
#ifdef _LP64
	if (model == DATAMODEL_ILP32) {
		dev32_t dev;
		/* convert internal 64 bit form to 32 bit version */
		if (cmpldev(&dev, ainfo->ai_termid.at_port) == 0) {
			crfree(cr);
			return (EOVERFLOW);
		}
		STRUCT_FSET(apinfo, ap_termid.at_port, dev);
	} else
		STRUCT_FSET(apinfo, ap_termid.at_port,
		    ainfo->ai_termid.at_port);
#else
	STRUCT_FSET(apinfo, ap_termid.at_port, ainfo->ai_termid.at_port);
#endif
	STRUCT_FSET(apinfo, ap_termid.at_type, ainfo->ai_termid.at_type);
	STRUCT_FSET(apinfo, ap_termid.at_addr[0], ainfo->ai_termid.at_addr[0]);
	STRUCT_FSET(apinfo, ap_termid.at_addr[1], ainfo->ai_termid.at_addr[1]);
	STRUCT_FSET(apinfo, ap_termid.at_addr[2], ainfo->ai_termid.at_addr[2]);
	STRUCT_FSET(apinfo, ap_termid.at_addr[3], ainfo->ai_termid.at_addr[3]);
	STRUCT_FSET(apinfo, ap_mask, ainfo->ai_mask);

	crfree(cr);

	if (copyout(STRUCT_BUF(apinfo), data, STRUCT_SIZE(apinfo)))
		return (EFAULT);

	return (0);
}
Exemple #11
0
/*
 * msgctl system call.
 *
 * gets q lock (via ipc_lookup), releases before return.
 * may call users of msg_lock
 */
static int
msgctl(int msgid, int cmd, void *arg)
{
    STRUCT_DECL(msqid_ds, ds);		/* SVR4 queue work area */
    kmsqid_t		*qp;		/* ptr to associated q */
    int			error;
    struct	cred		*cr;
    model_t	mdl = get_udatamodel();
    struct msqid_ds64	ds64;
    kmutex_t		*lock;
    proc_t			*pp = curproc;

    STRUCT_INIT(ds, mdl);
    cr = CRED();

    /*
     * Perform pre- or non-lookup actions (e.g. copyins, RMID).
     */
    switch (cmd) {
    case IPC_SET:
        if (copyin(arg, STRUCT_BUF(ds), STRUCT_SIZE(ds)))
            return (set_errno(EFAULT));
        break;

    case IPC_SET64:
        if (copyin(arg, &ds64, sizeof (struct msqid_ds64)))
            return (set_errno(EFAULT));
        break;

    case IPC_RMID:
        if (error = ipc_rmid(msq_svc, msgid, cr))
            return (set_errno(error));
        return (0);
    }

    /*
     * get msqid_ds for this msgid
     */
    if ((lock = ipc_lookup(msq_svc, msgid, (kipc_perm_t **)&qp)) == NULL)
        return (set_errno(EINVAL));

    switch (cmd) {
    case IPC_SET:
        if (STRUCT_FGET(ds, msg_qbytes) > qp->msg_qbytes &&
                secpolicy_ipc_config(cr) != 0) {
            mutex_exit(lock);
            return (set_errno(EPERM));
        }
        if (error = ipcperm_set(msq_svc, cr, &qp->msg_perm,
                                &STRUCT_BUF(ds)->msg_perm, mdl)) {
            mutex_exit(lock);
            return (set_errno(error));
        }
        qp->msg_qbytes = STRUCT_FGET(ds, msg_qbytes);
        qp->msg_ctime = gethrestime_sec();
        break;

    case IPC_STAT:
        if (error = ipcperm_access(&qp->msg_perm, MSG_R, cr)) {
            mutex_exit(lock);
            return (set_errno(error));
        }

        if (qp->msg_rcv_cnt)
            qp->msg_perm.ipc_mode |= MSG_RWAIT;
        if (qp->msg_snd_cnt)
            qp->msg_perm.ipc_mode |= MSG_WWAIT;
        ipcperm_stat(&STRUCT_BUF(ds)->msg_perm, &qp->msg_perm, mdl);
        qp->msg_perm.ipc_mode &= ~(MSG_RWAIT|MSG_WWAIT);
        STRUCT_FSETP(ds, msg_first, NULL); 	/* kernel addr */
        STRUCT_FSETP(ds, msg_last, NULL);
        STRUCT_FSET(ds, msg_cbytes, qp->msg_cbytes);
        STRUCT_FSET(ds, msg_qnum, qp->msg_qnum);
        STRUCT_FSET(ds, msg_qbytes, qp->msg_qbytes);
        STRUCT_FSET(ds, msg_lspid, qp->msg_lspid);
        STRUCT_FSET(ds, msg_lrpid, qp->msg_lrpid);
        STRUCT_FSET(ds, msg_stime, qp->msg_stime);
        STRUCT_FSET(ds, msg_rtime, qp->msg_rtime);
        STRUCT_FSET(ds, msg_ctime, qp->msg_ctime);
        break;

    case IPC_SET64:
        mutex_enter(&pp->p_lock);
        if ((ds64.msgx_qbytes > qp->msg_qbytes) &&
                secpolicy_ipc_config(cr) != 0 &&
                rctl_test(rc_process_msgmnb, pp->p_rctls, pp,
                          ds64.msgx_qbytes, RCA_SAFE) & RCT_DENY) {
            mutex_exit(&pp->p_lock);
            mutex_exit(lock);
            return (set_errno(EPERM));
        }
        mutex_exit(&pp->p_lock);
        if (error = ipcperm_set64(msq_svc, cr, &qp->msg_perm,
                                  &ds64.msgx_perm)) {
            mutex_exit(lock);
            return (set_errno(error));
        }
        qp->msg_qbytes = ds64.msgx_qbytes;
        qp->msg_ctime = gethrestime_sec();
        break;

    case IPC_STAT64:
        if (qp->msg_rcv_cnt)
            qp->msg_perm.ipc_mode |= MSG_RWAIT;
        if (qp->msg_snd_cnt)
            qp->msg_perm.ipc_mode |= MSG_WWAIT;
        ipcperm_stat64(&ds64.msgx_perm, &qp->msg_perm);
        qp->msg_perm.ipc_mode &= ~(MSG_RWAIT|MSG_WWAIT);
        ds64.msgx_cbytes = qp->msg_cbytes;
        ds64.msgx_qnum = qp->msg_qnum;
        ds64.msgx_qbytes = qp->msg_qbytes;
        ds64.msgx_lspid = qp->msg_lspid;
        ds64.msgx_lrpid = qp->msg_lrpid;
        ds64.msgx_stime = qp->msg_stime;
        ds64.msgx_rtime = qp->msg_rtime;
        ds64.msgx_ctime = qp->msg_ctime;
        break;

    default:
        mutex_exit(lock);
        return (set_errno(EINVAL));
    }

    mutex_exit(lock);

    /*
     * Do copyout last (after releasing mutex).
     */
    switch (cmd) {
    case IPC_STAT:
        if (copyout(STRUCT_BUF(ds), arg, STRUCT_SIZE(ds)))
            return (set_errno(EFAULT));
        break;

    case IPC_STAT64:
        if (copyout(&ds64, arg, sizeof (struct msqid_ds64)))
            return (set_errno(EFAULT));
        break;
    }

    return (0);
}
Exemple #12
0
/*
 * semctl - Semctl system call.
 */
static int
semctl(int semid, uint_t semnum, int cmd, uintptr_t arg)
{
    ksemid_t		*sp;	/* ptr to semaphore header */
    struct sem		*p;	/* ptr to semaphore */
    unsigned int		i;	/* loop control */
    ushort_t		*vals, *vp;
    size_t			vsize = 0;
    int			error = 0;
    int			retval = 0;
    struct cred		*cr;
    kmutex_t		*lock;
    model_t			mdl = get_udatamodel();
    STRUCT_DECL(semid_ds, sid);
    struct semid_ds64	ds64;

    STRUCT_INIT(sid, mdl);
    cr = CRED();

    /*
     * Perform pre- or non-lookup actions (e.g. copyins, RMID).
     */
    switch (cmd) {
    case IPC_SET:
        if (copyin((void *)arg, STRUCT_BUF(sid), STRUCT_SIZE(sid)))
            return (set_errno(EFAULT));
        break;

    case IPC_SET64:
        if (copyin((void *)arg, &ds64, sizeof (struct semid_ds64)))
            return (set_errno(EFAULT));
        break;

    case SETALL:
        if ((lock = ipc_lookup(sem_svc, semid,
                               (kipc_perm_t **)&sp)) == NULL)
            return (set_errno(EINVAL));
        vsize = sp->sem_nsems * sizeof (*vals);
        mutex_exit(lock);

        /* allocate space to hold all semaphore values */
        vals = kmem_alloc(vsize, KM_SLEEP);

        if (copyin((void *)arg, vals, vsize)) {
            kmem_free(vals, vsize);
            return (set_errno(EFAULT));
        }
        break;

    case IPC_RMID:
        if (error = ipc_rmid(sem_svc, semid, cr))
            return (set_errno(error));
        return (0);
    }

    if ((lock = ipc_lookup(sem_svc, semid, (kipc_perm_t **)&sp)) == NULL) {
        if (vsize != 0)
            kmem_free(vals, vsize);
        return (set_errno(EINVAL));
    }
    switch (cmd) {
    /* Set ownership and permissions. */
    case IPC_SET:

        if (error = ipcperm_set(sem_svc, cr, &sp->sem_perm,
                                &STRUCT_BUF(sid)->sem_perm, mdl)) {
            mutex_exit(lock);
            return (set_errno(error));
        }
        sp->sem_ctime = gethrestime_sec();
        mutex_exit(lock);
        return (0);

    /* Get semaphore data structure. */
    case IPC_STAT:

        if (error = ipcperm_access(&sp->sem_perm, SEM_R, cr)) {
            mutex_exit(lock);
            return (set_errno(error));
        }

        ipcperm_stat(&STRUCT_BUF(sid)->sem_perm, &sp->sem_perm, mdl);
        STRUCT_FSETP(sid, sem_base, NULL);	/* kernel addr */
        STRUCT_FSET(sid, sem_nsems, sp->sem_nsems);
        STRUCT_FSET(sid, sem_otime, sp->sem_otime);
        STRUCT_FSET(sid, sem_ctime, sp->sem_ctime);
        STRUCT_FSET(sid, sem_binary, sp->sem_binary);
        mutex_exit(lock);

        if (copyout(STRUCT_BUF(sid), (void *)arg, STRUCT_SIZE(sid)))
            return (set_errno(EFAULT));
        return (0);

    case IPC_SET64:

        if (error = ipcperm_set64(sem_svc, cr, &sp->sem_perm,
                                  &ds64.semx_perm)) {
            mutex_exit(lock);
            return (set_errno(error));
        }
        sp->sem_ctime = gethrestime_sec();
        mutex_exit(lock);
        return (0);

    case IPC_STAT64:

        ipcperm_stat64(&ds64.semx_perm, &sp->sem_perm);
        ds64.semx_nsems = sp->sem_nsems;
        ds64.semx_otime = sp->sem_otime;
        ds64.semx_ctime = sp->sem_ctime;

        mutex_exit(lock);
        if (copyout(&ds64, (void *)arg, sizeof (struct semid_ds64)))
            return (set_errno(EFAULT));

        return (0);

    /* Get # of processes sleeping for greater semval. */
    case GETNCNT:
        if (error = ipcperm_access(&sp->sem_perm, SEM_R, cr)) {
            mutex_exit(lock);
            return (set_errno(error));
        }
        if (semnum >= sp->sem_nsems) {
            mutex_exit(lock);
            return (set_errno(EINVAL));
        }
        retval = sp->sem_base[semnum].semncnt;
        mutex_exit(lock);
        return (retval);

    /* Get pid of last process to operate on semaphore. */
    case GETPID:
        if (error = ipcperm_access(&sp->sem_perm, SEM_R, cr)) {
            mutex_exit(lock);
            return (set_errno(error));
        }
        if (semnum >= sp->sem_nsems) {
            mutex_exit(lock);
            return (set_errno(EINVAL));
        }
        retval = sp->sem_base[semnum].sempid;
        mutex_exit(lock);
        return (retval);

    /* Get semval of one semaphore. */
    case GETVAL:
        if (error = ipcperm_access(&sp->sem_perm, SEM_R, cr)) {
            mutex_exit(lock);
            return (set_errno(error));
        }
        if (semnum >= sp->sem_nsems) {
            mutex_exit(lock);
            return (set_errno(EINVAL));
        }
        retval = sp->sem_base[semnum].semval;
        mutex_exit(lock);
        return (retval);

    /* Get all semvals in set. */
    case GETALL:
        if (error = ipcperm_access(&sp->sem_perm, SEM_R, cr)) {
            mutex_exit(lock);
            return (set_errno(error));
        }

        /* allocate space to hold all semaphore values */
        vsize = sp->sem_nsems * sizeof (*vals);
        vals = vp = kmem_alloc(vsize, KM_SLEEP);

        for (i = sp->sem_nsems, p = sp->sem_base; i--; p++, vp++)
            bcopy(&p->semval, vp, sizeof (p->semval));

        mutex_exit(lock);

        if (copyout((void *)vals, (void *)arg, vsize)) {
            kmem_free(vals, vsize);
            return (set_errno(EFAULT));
        }

        kmem_free(vals, vsize);
        return (0);

    /* Get # of processes sleeping for semval to become zero. */
    case GETZCNT:
        if (error = ipcperm_access(&sp->sem_perm, SEM_R, cr)) {
            mutex_exit(lock);
            return (set_errno(error));
        }
        if (semnum >= sp->sem_nsems) {
            mutex_exit(lock);
            return (set_errno(EINVAL));
        }
        retval = sp->sem_base[semnum].semzcnt;
        mutex_exit(lock);
        return (retval);

    /* Set semval of one semaphore. */
    case SETVAL:
        if (error = ipcperm_access(&sp->sem_perm, SEM_A, cr)) {
            mutex_exit(lock);
            return (set_errno(error));
        }
        if (semnum >= sp->sem_nsems) {
            mutex_exit(lock);
            return (set_errno(EINVAL));
        }
        if ((uint_t)arg > USHRT_MAX) {
            mutex_exit(lock);
            return (set_errno(ERANGE));
        }
        p = &sp->sem_base[semnum];
        if ((p->semval = (ushort_t)arg) != 0) {
            if (p->semncnt) {
                cv_broadcast(&p->semncnt_cv);
            }
        } else if (p->semzcnt) {
            cv_broadcast(&p->semzcnt_cv);
        }
        p->sempid = curproc->p_pid;
        sem_undo_clear(sp, (ushort_t)semnum, (ushort_t)semnum);
        mutex_exit(lock);
        return (0);

    /* Set semvals of all semaphores in set. */
    case SETALL:
        /* Check if semaphore set has been deleted and reallocated. */
        if (sp->sem_nsems * sizeof (*vals) != vsize) {
            error = set_errno(EINVAL);
            goto seterr;
        }
        if (error = ipcperm_access(&sp->sem_perm, SEM_A, cr)) {
            error = set_errno(error);
            goto seterr;
        }
        sem_undo_clear(sp, 0, sp->sem_nsems - 1);
        for (i = 0, p = sp->sem_base; i < sp->sem_nsems;
                (p++)->sempid = curproc->p_pid) {
            if ((p->semval = vals[i++]) != 0) {
                if (p->semncnt) {
                    cv_broadcast(&p->semncnt_cv);
                }
            } else if (p->semzcnt) {
                cv_broadcast(&p->semzcnt_cv);
            }
        }
seterr:
        mutex_exit(lock);
        kmem_free(vals, vsize);
        return (error);

    default:
        mutex_exit(lock);
        return (set_errno(EINVAL));
    }

    /* NOTREACHED */
}
Exemple #13
0
/* ARGSUSED */
int
ufs_fioio(
	struct vnode	*vp,		/* any file on the fs */
	struct fioio	*fiou,		/* fioio struct in userland */
	int		flag,		/* flag from VOP_IOCTL() */
	struct cred	*cr)		/* credentials from ufs_ioctl */
{
	int		error	= 0;
	struct vnode	*vpio	= NULL;	/* vnode for inode open */
	struct inode	*ipio	= NULL;	/* inode for inode open */
	struct file	*fpio	= NULL;	/* file  for inode open */
	struct inode	*ip;		/* inode for file system */
	struct fs	*fs;		/* fs    for file system */
	STRUCT_DECL(fioio, fio);	/* copy of user's fioio struct */

	/*
	 * must be privileged
	 */
	if (secpolicy_fs_config(cr, vp->v_vfsp) != 0)
		return (EPERM);

	STRUCT_INIT(fio, flag & DATAMODEL_MASK);

	/*
	 * get user's copy of fioio struct
	 */
	if (copyin(fiou, STRUCT_BUF(fio), STRUCT_SIZE(fio)))
		return (EFAULT);

	ip = VTOI(vp);
	fs = ip->i_fs;

	/*
	 * check the inode number against the fs's inode number bounds
	 */
	if (STRUCT_FGET(fio, fio_ino) < UFSROOTINO)
		return (ESRCH);
	if (STRUCT_FGET(fio, fio_ino) >= fs->fs_ncg * fs->fs_ipg)
		return (ESRCH);

	rw_enter(&ip->i_ufsvfs->vfs_dqrwlock, RW_READER);

	/*
	 * get the inode
	 */
	error = ufs_iget(ip->i_vfs, STRUCT_FGET(fio, fio_ino), &ipio, cr);

	rw_exit(&ip->i_ufsvfs->vfs_dqrwlock);

	if (error)
		return (error);

	/*
	 * check the generation number
	 */
	rw_enter(&ipio->i_contents, RW_READER);
	if (ipio->i_gen != STRUCT_FGET(fio, fio_gen)) {
		error = ESTALE;
		rw_exit(&ipio->i_contents);
		goto errout;
	}

	/*
	 * check if the inode is free
	 */
	if (ipio->i_mode == 0) {
		error = ENOENT;
		rw_exit(&ipio->i_contents);
		goto errout;
	}
	rw_exit(&ipio->i_contents);

	/*
	 *	Adapted from copen: get a file struct
	 *	Large Files: We open this file descriptor with FOFFMAX flag
	 *	set so that it will be like a large file open.
	 */
	if (falloc(NULL, (FREAD|FOFFMAX), &fpio, STRUCT_FADDR(fio, fio_fd)))
		goto errout;

	/*
	 *	Adapted from vn_open: check access and then open the file
	 */
	vpio = ITOV(ipio);
	if (error = VOP_ACCESS(vpio, VREAD, 0, cr, NULL))
		goto errout;

	if (error = VOP_OPEN(&vpio, FREAD, cr, NULL))
		goto errout;

	/*
	 *	Adapted from copen: initialize the file struct
	 */
	fpio->f_vnode = vpio;

	/*
	 * return the fd
	 */
	if (copyout(STRUCT_BUF(fio), fiou, STRUCT_SIZE(fio))) {
		error = EFAULT;
		goto errout;
	}
	setf(STRUCT_FGET(fio, fio_fd), fpio);
	mutex_exit(&fpio->f_tlock);
	return (0);
errout:
	/*
	 * free the file struct and fd
	 */
	if (fpio) {
		setf(STRUCT_FGET(fio, fio_fd), NULL);
		unfalloc(fpio);
	}

	/*
	 * release the hold on the inode
	 */
	if (ipio)
		VN_RELE(ITOV(ipio));
	return (error);
}
Exemple #14
0
/* ARGSUSED  */
int
dadk_ioctl(opaque_t objp, dev_t dev, int cmd, intptr_t arg, int flag,
	cred_t *cred_p, int *rval_p)
{
	struct dadk *dadkp = (struct dadk *)objp;

	switch (cmd) {
	case DKIOCGETDEF:
		{
		struct buf	*bp;
		int		err, head;
		unsigned char	*secbuf;
		STRUCT_DECL(defect_header, adh);

		STRUCT_INIT(adh, flag & FMODELS);

		/*
		 * copyin header ....
		 * yields head number and buffer address
		 */
		if (ddi_copyin((caddr_t)arg, STRUCT_BUF(adh), STRUCT_SIZE(adh),
		    flag))
			return (EFAULT);
		head = STRUCT_FGET(adh, head);
		if (head < 0 || head >= dadkp->dad_phyg.g_head)
			return (ENXIO);
		secbuf = kmem_zalloc(NBPSCTR, KM_SLEEP);
		if (!secbuf)
			return (ENOMEM);
		bp = getrbuf(KM_SLEEP);
		if (!bp) {
			kmem_free(secbuf, NBPSCTR);
			return (ENOMEM);
		}

		bp->b_edev = dev;
		bp->b_dev  = cmpdev(dev);
		bp->b_flags = B_BUSY;
		bp->b_resid = 0;
		bp->b_bcount = NBPSCTR;
		bp->b_un.b_addr = (caddr_t)secbuf;
		bp->b_blkno = head; /* I had to put it somwhere! */
		bp->b_forw = (struct buf *)dadkp;
		bp->b_back = (struct buf *)DCMD_GETDEF;

		mutex_enter(&dadkp->dad_cmd_mutex);
		dadkp->dad_cmd_count++;
		mutex_exit(&dadkp->dad_cmd_mutex);
		FLC_ENQUE(dadkp->dad_flcobjp, bp);
		err = biowait(bp);
		if (!err) {
			if (ddi_copyout((caddr_t)secbuf,
			    STRUCT_FGETP(adh, buffer), NBPSCTR, flag))
				err = ENXIO;
		}
		kmem_free(secbuf, NBPSCTR);
		freerbuf(bp);
		return (err);
		}
	case DIOCTL_RWCMD:
		{
		struct dadkio_rwcmd *rwcmdp;
		int status, rw;

		/*
		 * copied in by cmdk and, if necessary, converted to the
		 * correct datamodel
		 */
		rwcmdp = (struct dadkio_rwcmd *)(intptr_t)arg;

		/*
		 * handle the complex cases here; we pass these
		 * through to the driver, which will queue them and
		 * handle the requests asynchronously.  The simpler
		 * cases ,which can return immediately, fail here, and
		 * the request reverts to the dadk_ioctl routine, while
		 *  will reroute them directly to the ata driver.
		 */
		switch (rwcmdp->cmd) {
			case DADKIO_RWCMD_READ :
				/*FALLTHROUGH*/
			case DADKIO_RWCMD_WRITE:
				rw = ((rwcmdp->cmd == DADKIO_RWCMD_WRITE) ?
				    B_WRITE : B_READ);
				status = dadk_dk_buf_setup(dadkp,
				    (opaque_t)rwcmdp, dev, ((flag &FKIOCTL) ?
				    UIO_SYSSPACE : UIO_USERSPACE), rw);
				return (status);
			default:
				return (EINVAL);
			}
		}
	case DKIOC_UPDATEFW:

		/*
		 * Require PRIV_ALL privilege to invoke DKIOC_UPDATEFW
		 * to protect the firmware update from malicious use
		 */
		if (PRIV_POLICY(cred_p, PRIV_ALL, B_FALSE, EPERM, NULL) != 0)
			return (EPERM);
		else
			return (dadk_ctl_ioctl(dadkp, cmd, arg, flag));

	case DKIOCFLUSHWRITECACHE:
		{
			struct buf *bp;
			int err = 0;
			struct dk_callback *dkc = (struct dk_callback *)arg;
			struct cmpkt *pktp;
			int is_sync = 1;

			mutex_enter(&dadkp->dad_mutex);
			if (dadkp->dad_noflush || !  dadkp->dad_wce) {
				err = dadkp->dad_noflush ? ENOTSUP : 0;
				mutex_exit(&dadkp->dad_mutex);
				/*
				 * If a callback was requested: a
				 * callback will always be done if the
				 * caller saw the DKIOCFLUSHWRITECACHE
				 * ioctl return 0, and never done if the
				 * caller saw the ioctl return an error.
				 */
				if ((flag & FKIOCTL) && dkc != NULL &&
				    dkc->dkc_callback != NULL) {
					(*dkc->dkc_callback)(dkc->dkc_cookie,
					    err);
					/*
					 * Did callback and reported error.
					 * Since we did a callback, ioctl
					 * should return 0.
					 */
					err = 0;
				}
				return (err);
			}
			mutex_exit(&dadkp->dad_mutex);

			bp = getrbuf(KM_SLEEP);

			bp->b_edev = dev;
			bp->b_dev  = cmpdev(dev);
			bp->b_flags = B_BUSY;
			bp->b_resid = 0;
			bp->b_bcount = 0;
			SET_BP_SEC(bp, 0);

			if ((flag & FKIOCTL) && dkc != NULL &&
			    dkc->dkc_callback != NULL) {
				struct dk_callback *dkc2 =
				    (struct dk_callback *)kmem_zalloc(
				    sizeof (struct dk_callback), KM_SLEEP);

				bcopy(dkc, dkc2, sizeof (*dkc2));
				bp->b_private = dkc2;
				bp->b_iodone = dadk_flushdone;
				is_sync = 0;
			}

			/*
			 * Setup command pkt
			 * dadk_pktprep() can't fail since DDI_DMA_SLEEP set
			 */
			pktp = dadk_pktprep(dadkp, NULL, bp,
			    dadk_iodone, DDI_DMA_SLEEP, NULL);

			pktp->cp_time = DADK_FLUSH_CACHE_TIME;

			*((char *)(pktp->cp_cdbp)) = DCMD_FLUSH_CACHE;
			pktp->cp_byteleft = 0;
			pktp->cp_private = NULL;
			pktp->cp_secleft = 0;
			pktp->cp_srtsec = -1;
			pktp->cp_bytexfer = 0;

			CTL_IOSETUP(dadkp->dad_ctlobjp, pktp);

			mutex_enter(&dadkp->dad_cmd_mutex);
			dadkp->dad_cmd_count++;
			mutex_exit(&dadkp->dad_cmd_mutex);
			FLC_ENQUE(dadkp->dad_flcobjp, bp);

			if (is_sync) {
				err = biowait(bp);
				freerbuf(bp);
			}
			return (err);
		}
	default:
		if (!dadkp->dad_rmb)
			return (dadk_ctl_ioctl(dadkp, cmd, arg, flag));
	}

	switch (cmd) {
	case CDROMSTOP:
		return (dadk_rmb_ioctl(dadkp, DCMD_STOP_MOTOR, 0,
		    0, DADK_SILENT));
	case CDROMSTART:
		return (dadk_rmb_ioctl(dadkp, DCMD_START_MOTOR, 0,
		    0, DADK_SILENT));
	case DKIOCLOCK:
		return (dadk_rmb_ioctl(dadkp, DCMD_LOCK, 0, 0, DADK_SILENT));
	case DKIOCUNLOCK:
		return (dadk_rmb_ioctl(dadkp, DCMD_UNLOCK, 0, 0, DADK_SILENT));
	case DKIOCEJECT:
	case CDROMEJECT:
		{
			int ret;

			if (ret = dadk_rmb_ioctl(dadkp, DCMD_UNLOCK, 0, 0,
			    DADK_SILENT)) {
				return (ret);
			}
			if (ret = dadk_rmb_ioctl(dadkp, DCMD_EJECT, 0, 0,
			    DADK_SILENT)) {
				return (ret);
			}
			mutex_enter(&dadkp->dad_mutex);
			dadkp->dad_iostate = DKIO_EJECTED;
			cv_broadcast(&dadkp->dad_state_cv);
			mutex_exit(&dadkp->dad_mutex);

			return (0);

		}
	default:
		return (ENOTTY);
	/*
	 * cdrom audio commands
	 */
	case CDROMPAUSE:
		cmd = DCMD_PAUSE;
		break;
	case CDROMRESUME:
		cmd = DCMD_RESUME;
		break;
	case CDROMPLAYMSF:
		cmd = DCMD_PLAYMSF;
		break;
	case CDROMPLAYTRKIND:
		cmd = DCMD_PLAYTRKIND;
		break;
	case CDROMREADTOCHDR:
		cmd = DCMD_READTOCHDR;
		break;
	case CDROMREADTOCENTRY:
		cmd = DCMD_READTOCENT;
		break;
	case CDROMVOLCTRL:
		cmd = DCMD_VOLCTRL;
		break;
	case CDROMSUBCHNL:
		cmd = DCMD_SUBCHNL;
		break;
	case CDROMREADMODE2:
		cmd = DCMD_READMODE2;
		break;
	case CDROMREADMODE1:
		cmd = DCMD_READMODE1;
		break;
	case CDROMREADOFFSET:
		cmd = DCMD_READOFFSET;
		break;
	}
	return (dadk_rmb_ioctl(dadkp, cmd, arg, flag, 0));
}
Exemple #15
0
/*
 * smbfs mount vfsop
 * Set up mount info record and attach it to vfs struct.
 */
static int
smbfs_mount(vfs_t *vfsp, vnode_t *mvp, struct mounta *uap, cred_t *cr)
{
	char		*data = uap->dataptr;
	int		error;
	smbnode_t 	*rtnp = NULL;	/* root of this fs */
	smbmntinfo_t 	*smi = NULL;
	dev_t 		smbfs_dev;
	int 		version;
	int 		devfd;
	zone_t		*zone = curproc->p_zone;
	zone_t		*mntzone = NULL;
	smb_share_t 	*ssp = NULL;
	smb_cred_t 	scred;
	int		flags, sec;

	STRUCT_DECL(smbfs_args, args);		/* smbfs mount arguments */

	if ((error = secpolicy_fs_mount(cr, mvp, vfsp)) != 0)
		return (error);

	if (mvp->v_type != VDIR)
		return (ENOTDIR);

	/*
	 * get arguments
	 *
	 * uap->datalen might be different from sizeof (args)
	 * in a compatible situation.
	 */
	STRUCT_INIT(args, get_udatamodel());
	bzero(STRUCT_BUF(args), SIZEOF_STRUCT(smbfs_args, DATAMODEL_NATIVE));
	if (copyin(data, STRUCT_BUF(args), MIN(uap->datalen,
	    SIZEOF_STRUCT(smbfs_args, DATAMODEL_NATIVE))))
		return (EFAULT);

	/*
	 * Check mount program version
	 */
	version = STRUCT_FGET(args, version);
	if (version != SMBFS_VERSION) {
		cmn_err(CE_WARN, "mount version mismatch:"
		    " kernel=%d, mount=%d\n",
		    SMBFS_VERSION, version);
		return (EINVAL);
	}

	/*
	 * Deal with re-mount requests.
	 */
	if (uap->flags & MS_REMOUNT) {
		cmn_err(CE_WARN, "MS_REMOUNT not implemented");
		return (ENOTSUP);
	}

	/*
	 * Check for busy
	 */
	mutex_enter(&mvp->v_lock);
	if (!(uap->flags & MS_OVERLAY) &&
	    (mvp->v_count != 1 || (mvp->v_flag & VROOT))) {
		mutex_exit(&mvp->v_lock);
		return (EBUSY);
	}
	mutex_exit(&mvp->v_lock);

	/*
	 * Get the "share" from the netsmb driver (ssp).
	 * It is returned with a "ref" (hold) for us.
	 * Release this hold: at errout below, or in
	 * smbfs_freevfs().
	 */
	devfd = STRUCT_FGET(args, devfd);
	error = smb_dev2share(devfd, &ssp);
	if (error) {
		cmn_err(CE_WARN, "invalid device handle %d (%d)\n",
		    devfd, error);
		return (error);
	}

	/*
	 * Use "goto errout" from here on.
	 * See: ssp, smi, rtnp, mntzone
	 */

	/*
	 * Determine the zone we're being mounted into.
	 */
	zone_hold(mntzone = zone);		/* start with this assumption */
	if (getzoneid() == GLOBAL_ZONEID) {
		zone_rele(mntzone);
		mntzone = zone_find_by_path(refstr_value(vfsp->vfs_mntpt));
		ASSERT(mntzone != NULL);
		if (mntzone != zone) {
			error = EBUSY;
			goto errout;
		}
	}

	/*
	 * Stop the mount from going any further if the zone is going away.
	 */
	if (zone_status_get(mntzone) >= ZONE_IS_SHUTTING_DOWN) {
		error = EBUSY;
		goto errout;
	}

	/*
	 * On a Trusted Extensions client, we may have to force read-only
	 * for read-down mounts.
	 */
	if (is_system_labeled()) {
		void *addr;
		int ipvers = 0;
		struct smb_vc *vcp;

		vcp = SSTOVC(ssp);
		addr = smb_vc_getipaddr(vcp, &ipvers);
		error = smbfs_mount_label_policy(vfsp, addr, ipvers, cr);

		if (error > 0)
			goto errout;

		if (error == -1) {
			/* change mount to read-only to prevent write-down */
			vfs_setmntopt(vfsp, MNTOPT_RO, NULL, 0);
		}
	}

	/* Prevent unload. */
	atomic_inc_32(&smbfs_mountcount);

	/*
	 * Create a mount record and link it to the vfs struct.
	 * No more possiblities for errors from here on.
	 * Tear-down of this stuff is in smbfs_free_smi()
	 *
	 * Compare with NFS: nfsrootvp()
	 */
	smi = kmem_zalloc(sizeof (*smi), KM_SLEEP);

	mutex_init(&smi->smi_lock, NULL, MUTEX_DEFAULT, NULL);
	cv_init(&smi->smi_statvfs_cv, NULL, CV_DEFAULT, NULL);

	rw_init(&smi->smi_hash_lk, NULL, RW_DEFAULT, NULL);
	smbfs_init_hash_avl(&smi->smi_hash_avl);

	smi->smi_share = ssp;
	ssp = NULL;

	/*
	 * Convert the anonymous zone hold acquired via zone_hold() above
	 * into a zone reference.
	 */
	zone_init_ref(&smi->smi_zone_ref);
	zone_hold_ref(mntzone, &smi->smi_zone_ref, ZONE_REF_SMBFS);
	zone_rele(mntzone);
	mntzone = NULL;

	/*
	 * Initialize option defaults
	 */
	smi->smi_flags	= SMI_LLOCK;
	smi->smi_acregmin = SEC2HR(SMBFS_ACREGMIN);
	smi->smi_acregmax = SEC2HR(SMBFS_ACREGMAX);
	smi->smi_acdirmin = SEC2HR(SMBFS_ACDIRMIN);
	smi->smi_acdirmax = SEC2HR(SMBFS_ACDIRMAX);

	/*
	 * All "generic" mount options have already been
	 * handled in vfs.c:domount() - see mntopts stuff.
	 * Query generic options using vfs_optionisset().
	 */
	if (vfs_optionisset(vfsp, MNTOPT_INTR, NULL))
		smi->smi_flags |= SMI_INT;
	if (vfs_optionisset(vfsp, MNTOPT_ACL, NULL))
		smi->smi_flags |= SMI_ACL;

	/*
	 * Get the mount options that come in as smbfs_args,
	 * starting with args.flags (SMBFS_MF_xxx)
	 */
	flags = STRUCT_FGET(args, flags);
	smi->smi_uid 	= STRUCT_FGET(args, uid);
	smi->smi_gid 	= STRUCT_FGET(args, gid);
	smi->smi_fmode	= STRUCT_FGET(args, file_mode) & 0777;
	smi->smi_dmode	= STRUCT_FGET(args, dir_mode) & 0777;

	/*
	 * Hande the SMBFS_MF_xxx flags.
	 */
	if (flags & SMBFS_MF_NOAC)
		smi->smi_flags |= SMI_NOAC;
	if (flags & SMBFS_MF_ACREGMIN) {
		sec = STRUCT_FGET(args, acregmin);
		if (sec < 0 || sec > SMBFS_ACMINMAX)
			sec = SMBFS_ACMINMAX;
		smi->smi_acregmin = SEC2HR(sec);
	}
	if (flags & SMBFS_MF_ACREGMAX) {
		sec = STRUCT_FGET(args, acregmax);
		if (sec < 0 || sec > SMBFS_ACMAXMAX)
			sec = SMBFS_ACMAXMAX;
		smi->smi_acregmax = SEC2HR(sec);
	}
	if (flags & SMBFS_MF_ACDIRMIN) {
		sec = STRUCT_FGET(args, acdirmin);
		if (sec < 0 || sec > SMBFS_ACMINMAX)
			sec = SMBFS_ACMINMAX;
		smi->smi_acdirmin = SEC2HR(sec);
	}
	if (flags & SMBFS_MF_ACDIRMAX) {
		sec = STRUCT_FGET(args, acdirmax);
		if (sec < 0 || sec > SMBFS_ACMAXMAX)
			sec = SMBFS_ACMAXMAX;
		smi->smi_acdirmax = SEC2HR(sec);
	}

	/*
	 * Get attributes of the remote file system,
	 * i.e. ACL support, named streams, etc.
	 */
	smb_credinit(&scred, cr);
	error = smbfs_smb_qfsattr(smi->smi_share, &smi->smi_fsa, &scred);
	smb_credrele(&scred);
	if (error) {
		SMBVDEBUG("smbfs_smb_qfsattr error %d\n", error);
	}

	/*
	 * We enable XATTR by default (via smbfs_mntopts)
	 * but if the share does not support named streams,
	 * force the NOXATTR option (also clears XATTR).
	 * Caller will set or clear VFS_XATTR after this.
	 */
	if ((smi->smi_fsattr & FILE_NAMED_STREAMS) == 0)
		vfs_setmntopt(vfsp, MNTOPT_NOXATTR, NULL, 0);

	/*
	 * Ditto ACLs (disable if not supported on this share)
	 */
	if ((smi->smi_fsattr & FILE_PERSISTENT_ACLS) == 0) {
		vfs_setmntopt(vfsp, MNTOPT_NOACL, NULL, 0);
		smi->smi_flags &= ~SMI_ACL;
	}

	/*
	 * Assign a unique device id to the mount
	 */
	mutex_enter(&smbfs_minor_lock);
	do {
		smbfs_minor = (smbfs_minor + 1) & MAXMIN32;
		smbfs_dev = makedevice(smbfs_major, smbfs_minor);
	} while (vfs_devismounted(smbfs_dev));
	mutex_exit(&smbfs_minor_lock);

	vfsp->vfs_dev	= smbfs_dev;
	vfs_make_fsid(&vfsp->vfs_fsid, smbfs_dev, smbfsfstyp);
	vfsp->vfs_data	= (caddr_t)smi;
	vfsp->vfs_fstype = smbfsfstyp;
	vfsp->vfs_bsize = MAXBSIZE;
	vfsp->vfs_bcount = 0;

	smi->smi_vfsp	= vfsp;
	smbfs_zonelist_add(smi);	/* undo in smbfs_freevfs */

	/*
	 * Create the root vnode, which we need in unmount
	 * for the call to smbfs_check_table(), etc.
	 * Release this hold in smbfs_unmount.
	 */
	rtnp = smbfs_node_findcreate(smi, "\\", 1, NULL, 0, 0,
	    &smbfs_fattr0);
	ASSERT(rtnp != NULL);
	rtnp->r_vnode->v_type = VDIR;
	rtnp->r_vnode->v_flag |= VROOT;
	smi->smi_root = rtnp;

	/*
	 * NFS does other stuff here too:
	 *   async worker threads
	 *   init kstats
	 *
	 * End of code from NFS nfsrootvp()
	 */
	return (0);

errout:
	vfsp->vfs_data = NULL;
	if (smi != NULL)
		smbfs_free_smi(smi);

	if (mntzone != NULL)
		zone_rele(mntzone);

	if (ssp != NULL)
		smb_share_rele(ssp);

	return (error);
}