/*ARGSUSED*/
static int
xcalwd_open(dev_t *devp, int flag, int otyp, cred_t *credp)
{
	int			instance;

	if (secpolicy_sys_config(credp, B_FALSE) != 0)
		return (EPERM);

	if (otyp != OTYP_CHR)
		return (EINVAL);

	instance = getminor(*devp);
	if (instance < 0)
		return (ENXIO);

	if (ddi_get_soft_state(xcalwd_statep, instance) == NULL) {
		return (ENXIO);
	}

	return (0);
}
Exemple #2
0
/*ARGSUSED*/
static int
ipmi_ioctl(dev_t dv, int cmd, intptr_t data, int flags, cred_t *cr, int *rvalp)
{
	struct ipmi_device *dev;
	struct ipmi_request *kreq;
	struct ipmi_req req;
	struct ipmi_recv recv;
	struct ipmi_recv32 recv32;
	struct ipmi_addr addr;
	int error, len;
	model_t model;
	int orig_cmd = 0;
	uchar_t	t_lun;

	if (secpolicy_sys_config(cr, B_FALSE) != 0)
		return (EPERM);

	if ((dev = lookup_ipmidev_by_dev(dv)) == NULL)
		return (ENODEV);

	model = get_udatamodel();
	if (model == DATAMODEL_NATIVE) {
		switch (cmd) {
		case IPMICTL_SEND_COMMAND:
			if (copyin((void *)data, &req, sizeof (req)))
				return (EFAULT);
			break;
		case IPMICTL_RECEIVE_MSG_TRUNC:
		case IPMICTL_RECEIVE_MSG:
			if (copyin((void *)data, &recv, sizeof (recv)))
				return (EFAULT);
			break;
		}
	} else {
		/* Convert 32-bit structures to native. */
		struct ipmi_req32 req32;

		switch (cmd) {
		case IPMICTL_SEND_COMMAND_32:
			if (copyin((void *)data, &req32, sizeof (req32)))
				return (EFAULT);

			req.addr = PTRIN(req32.addr);
			req.addr_len = req32.addr_len;
			req.msgid = req32.msgid;
			req.msg.netfn = req32.msg.netfn;
			req.msg.cmd = req32.msg.cmd;
			req.msg.data_len = req32.msg.data_len;
			req.msg.data = PTRIN(req32.msg.data);

			cmd = IPMICTL_SEND_COMMAND;
			break;

		case IPMICTL_RECEIVE_MSG_TRUNC_32:
		case IPMICTL_RECEIVE_MSG_32:
			if (copyin((void *)data, &recv32, sizeof (recv32)))
				return (EFAULT);

			recv.addr = PTRIN(recv32.addr);
			recv.addr_len = recv32.addr_len;
			recv.msg.data_len = recv32.msg.data_len;
			recv.msg.data = PTRIN(recv32.msg.data);

			orig_cmd = cmd;
			cmd = (cmd == IPMICTL_RECEIVE_MSG_TRUNC_32) ?
			    IPMICTL_RECEIVE_MSG_TRUNC : IPMICTL_RECEIVE_MSG;
			break;
		}
	}

	switch (cmd) {
	case IPMICTL_SEND_COMMAND:
		IPMI_LOCK(sc);
		/* clear out old stuff in queue of stuff done */
		while ((kreq = TAILQ_FIRST(&dev->ipmi_completed_requests))
		    != NULL) {
			TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq,
			    ir_link);
			dev->ipmi_requests--;
			ipmi_free_request(kreq);
		}
		IPMI_UNLOCK(sc);

		/* Check that we didn't get a ridiculous length */
		if (req.msg.data_len > IPMI_MAX_RX)
			return (EINVAL);

		kreq = ipmi_alloc_request(dev, req.msgid,
		    IPMI_ADDR(req.msg.netfn, 0), req.msg.cmd,
		    req.msg.data_len, IPMI_MAX_RX);
		/* This struct is the same for 32/64 */
		if (req.msg.data_len > 0 &&
		    copyin(req.msg.data, kreq->ir_request, req.msg.data_len)) {
			ipmi_free_request(kreq);
			return (EFAULT);
		}
		IPMI_LOCK(sc);
		dev->ipmi_requests++;
		error = sc->ipmi_enqueue_request(sc, kreq);
		IPMI_UNLOCK(sc);
		if (error)
			return (error);
		break;

	case IPMICTL_RECEIVE_MSG_TRUNC:
	case IPMICTL_RECEIVE_MSG:
		/* This struct is the same for 32/64 */
		if (copyin(recv.addr, &addr, sizeof (addr)))
			return (EFAULT);

		IPMI_LOCK(sc);
		kreq = TAILQ_FIRST(&dev->ipmi_completed_requests);
		if (kreq == NULL) {
			IPMI_UNLOCK(sc);
			return (EAGAIN);
		}
		addr.channel = IPMI_BMC_CHANNEL;
		recv.recv_type = IPMI_RESPONSE_RECV_TYPE;
		recv.msgid = kreq->ir_msgid;
		recv.msg.netfn = IPMI_REPLY_ADDR(kreq->ir_addr) >> 2;
		recv.msg.cmd = kreq->ir_command;
		error = kreq->ir_error;
		if (error) {
			TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq,
			    ir_link);
			dev->ipmi_requests--;
			IPMI_UNLOCK(sc);
			ipmi_free_request(kreq);
			return (error);
		}
		len = kreq->ir_replylen + 1;
		if (recv.msg.data_len < len && cmd == IPMICTL_RECEIVE_MSG) {
			IPMI_UNLOCK(sc);
			ipmi_free_request(kreq);
			return (EMSGSIZE);
		}
		TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq, ir_link);
		dev->ipmi_requests--;
		IPMI_UNLOCK(sc);
		len = min(recv.msg.data_len, len);
		recv.msg.data_len = (unsigned short)len;

		if (orig_cmd == IPMICTL_RECEIVE_MSG_TRUNC_32 ||
		    orig_cmd == IPMICTL_RECEIVE_MSG_32) {
			/* Update changed fields in 32-bit structure. */
			recv32.recv_type = recv.recv_type;
			recv32.msgid = (int32_t)recv.msgid;
			recv32.msg.netfn = recv.msg.netfn;
			recv32.msg.cmd = recv.msg.cmd;
			recv32.msg.data_len = recv.msg.data_len;

			error = copyout(&recv32, (void *)data, sizeof (recv32));
		} else {
			error = copyout(&recv, (void *)data, sizeof (recv));
		}

		/* This struct is the same for 32/64 */
		if (error == 0)
			error = copyout(&addr, recv.addr, sizeof (addr));
		if (error == 0)
			error = copyout(&kreq->ir_compcode, recv.msg.data, 1);
		if (error == 0)
			error = copyout(kreq->ir_reply, recv.msg.data + 1,
			    len - 1);
		ipmi_free_request(kreq);

		if (error)
			return (EFAULT);

		break;

	case IPMICTL_SET_MY_ADDRESS_CMD:
		IPMI_LOCK(sc);
		if (copyin((void *)data, &dev->ipmi_address,
		    sizeof (dev->ipmi_address))) {
			IPMI_UNLOCK(sc);
			return (EFAULT);
		}
		IPMI_UNLOCK(sc);
		break;

	case IPMICTL_GET_MY_ADDRESS_CMD:
		IPMI_LOCK(sc);
		if (copyout(&dev->ipmi_address, (void *)data,
		    sizeof (dev->ipmi_address))) {
			IPMI_UNLOCK(sc);
			return (EFAULT);
		}
		IPMI_UNLOCK(sc);
		break;

	case IPMICTL_SET_MY_LUN_CMD:
		IPMI_LOCK(sc);
		if (copyin((void *)data, &t_lun, sizeof (t_lun))) {
			IPMI_UNLOCK(sc);
			return (EFAULT);
		}
		dev->ipmi_lun = t_lun & 0x3;
		IPMI_UNLOCK(sc);
		break;

	case IPMICTL_GET_MY_LUN_CMD:
		IPMI_LOCK(sc);
		if (copyout(&dev->ipmi_lun, (void *)data,
		    sizeof (dev->ipmi_lun))) {
			IPMI_UNLOCK(sc);
			return (EFAULT);
		}
		IPMI_UNLOCK(sc);
		break;

	case IPMICTL_SET_GETS_EVENTS_CMD:
		break;

	case IPMICTL_REGISTER_FOR_CMD:
	case IPMICTL_UNREGISTER_FOR_CMD:
		return (EINVAL);

	default:
		return (EINVAL);
	}

	return (0);
}
Exemple #3
0
static int
log_wput(queue_t *q, mblk_t *mp)
{
	log_t *lp = (log_t *)q->q_ptr;
	struct iocblk *iocp;
	mblk_t *mp2;
	cred_t *cr = DB_CRED(mp);
	zoneid_t zoneid;

	/*
	 * Default to global zone if dblk doesn't have a valid cred.
	 * Calls to syslog() go through putmsg(), which does set up
	 * the cred.
	 */
	zoneid = (cr != NULL) ? crgetzoneid(cr) : GLOBAL_ZONEID;

	switch (DB_TYPE(mp)) {
	case M_FLUSH:
		if (*mp->b_rptr & FLUSHW) {
			flushq(q, FLUSHALL);
			*mp->b_rptr &= ~FLUSHW;
		}
		if (*mp->b_rptr & FLUSHR) {
			flushq(RD(q), FLUSHALL);
			qreply(q, mp);
			return (0);
		}
		break;

	case M_IOCTL:
		iocp = (struct iocblk *)mp->b_rptr;

		if (lp->log_major != LOG_LOGMIN) {
			/* write-only device */
			miocnak(q, mp, 0, EINVAL);
			return (0);
		}

		if (iocp->ioc_count == TRANSPARENT) {
			miocnak(q, mp, 0, EINVAL);
			return (0);
		}

		if (lp->log_flags) {
			miocnak(q, mp, 0, EBUSY);
			return (0);
		}

		freemsg(lp->log_data);
		lp->log_data = mp->b_cont;
		mp->b_cont = NULL;

		switch (iocp->ioc_cmd) {

		case I_CONSLOG:
			log_update(lp, RD(q), SL_CONSOLE, log_console);
			break;

		case I_TRCLOG:
			if (lp->log_data == NULL) {
				miocnak(q, mp, 0, EINVAL);
				return (0);
			}
			log_update(lp, RD(q), SL_TRACE, log_trace);
			break;

		case I_ERRLOG:
			log_update(lp, RD(q), SL_ERROR, log_error);
			break;

		default:
			miocnak(q, mp, 0, EINVAL);
			return (0);
		}
		miocack(q, mp, 0, 0);
		return (0);

	case M_PROTO:
		if (MBLKL(mp) == sizeof (log_ctl_t) && mp->b_cont != NULL) {
			log_ctl_t *lc = (log_ctl_t *)mp->b_rptr;
			/* This code is used by savecore to log dump msgs */
			if (mp->b_band != 0 &&
			    secpolicy_sys_config(CRED(), B_FALSE) == 0) {
				(void) putq(log_consq, mp);
				return (0);
			}
			if ((lc->pri & LOG_FACMASK) == LOG_KERN)
				lc->pri |= LOG_USER;
			mp2 = log_makemsg(LOG_MID, LOG_CONSMIN, lc->level,
			    lc->flags, lc->pri, mp->b_cont->b_rptr,
			    MBLKL(mp->b_cont) + 1, 0);
			if (mp2 != NULL)
				log_sendmsg(mp2, zoneid);
		}
		break;

	case M_DATA:
		mp2 = log_makemsg(LOG_MID, LOG_CONSMIN, 0, SL_CONSOLE,
		    LOG_USER | LOG_INFO, mp->b_rptr, MBLKL(mp) + 1, 0);
		if (mp2 != NULL)
			log_sendmsg(mp2, zoneid);
		break;
	}

	freemsg(mp);
	return (0);
}
int
kadmin(int cmd, int fcn, void *mdep, cred_t *credp)
{
	int error = 0;
	char *buf;
	size_t buflen = 0;
	boolean_t invoke_cb = B_FALSE;

	/*
	 * We might be called directly by the kernel's fault-handling code, so
	 * we can't assert that the caller is in the global zone.
	 */

	/*
	 * Make sure that cmd is one of the valid <sys/uadmin.h> command codes
	 * and that we have appropriate privileges for this action.
	 */
	switch (cmd) {
	case A_FTRACE:
	case A_SHUTDOWN:
	case A_REBOOT:
	case A_REMOUNT:
	case A_FREEZE:
	case A_DUMP:
	case A_SDTTEST:
	case A_CONFIG:
		if (secpolicy_sys_config(credp, B_FALSE) != 0)
			return (EPERM);
		break;

	default:
		return (EINVAL);
	}

	/*
	 * Serialize these operations on ualock.  If it is held, the
	 * system should shutdown, reboot, or remount shortly, unless there is
	 * an error.  We need a cv rather than just a mutex because proper
	 * functioning of A_REBOOT relies on being able to interrupt blocked
	 * userland callers.
	 *
	 * We only clear ua_shutdown_thread after A_REMOUNT or A_CONFIG.
	 * Other commands should never return.
	 */
	if (cmd == A_SHUTDOWN || cmd == A_REBOOT || cmd == A_REMOUNT ||
	    cmd == A_CONFIG) {
		mutex_enter(&ualock);
		while (ua_shutdown_thread != NULL) {
			if (cv_wait_sig(&uacond, &ualock) == 0) {
				/*
				 * If we were interrupted, leave, and handle
				 * the signal (or exit, depending on what
				 * happened)
				 */
				mutex_exit(&ualock);
				return (EINTR);
			}
		}
		ua_shutdown_thread = curthread;
		mutex_exit(&ualock);
	}

	switch (cmd) {
	case A_SHUTDOWN:
	{
		proc_t *p = ttoproc(curthread);

		/*
		 * Release (almost) all of our own resources if we are called
		 * from a user context, however if we are calling kadmin() from
		 * a kernel context then we do not release these resources.
		 */
		if (p != &p0) {
			proc_is_exiting(p);
			if ((error = exitlwps(0)) != 0) {
				/*
				 * Another thread in this process also called
				 * exitlwps().
				 */
				mutex_enter(&ualock);
				ua_shutdown_thread = NULL;
				cv_signal(&uacond);
				mutex_exit(&ualock);
				return (error);
			}
			mutex_enter(&p->p_lock);
			p->p_flag |= SNOWAIT;
			sigfillset(&p->p_ignore);
			curthread->t_lwp->lwp_cursig = 0;
			curthread->t_lwp->lwp_extsig = 0;
			if (p->p_exec) {
				vnode_t *exec_vp = p->p_exec;
				p->p_exec = NULLVP;
				mutex_exit(&p->p_lock);
				VN_RELE(exec_vp);
			} else {
				mutex_exit(&p->p_lock);
			}

			pollcleanup();
			closeall(P_FINFO(curproc));
			relvm();

		} else {
			/*
			 * Reset t_cred if not set because much of the
			 * filesystem code depends on CRED() being valid.
			 */
			if (curthread->t_cred == NULL)
				curthread->t_cred = kcred;
		}

		/* indicate shutdown in progress */
		sys_shutdown = 1;

		/*
		 * Communcate that init shouldn't be restarted.
		 */
		zone_shutdown_global();

		killall(ALL_ZONES);
		/*
		 * If we are calling kadmin() from a kernel context then we
		 * do not release these resources.
		 */
		if (ttoproc(curthread) != &p0) {
			VN_RELE(PTOU(curproc)->u_cdir);
			if (PTOU(curproc)->u_rdir)
				VN_RELE(PTOU(curproc)->u_rdir);
			if (PTOU(curproc)->u_cwd)
				refstr_rele(PTOU(curproc)->u_cwd);

			PTOU(curproc)->u_cdir = rootdir;
			PTOU(curproc)->u_rdir = NULL;
			PTOU(curproc)->u_cwd = NULL;
		}

		/*
		 * Allow the reboot/halt/poweroff code a chance to do
		 * anything it needs to whilst we still have filesystems
		 * mounted, like loading any modules necessary for later
		 * performing the actual poweroff.
		 */
		if ((mdep != NULL) && (*(char *)mdep == '/')) {
			buf = i_convert_boot_device_name(mdep, NULL, &buflen);
			mdpreboot(cmd, fcn, buf);
		} else
			mdpreboot(cmd, fcn, mdep);

		/*
		 * Allow fsflush to finish running and then prevent it
		 * from ever running again so that vfs_unmountall() and
		 * vfs_syncall() can acquire the vfs locks they need.
		 */
		sema_p(&fsflush_sema);
		(void) callb_execute_class(CB_CL_UADMIN_PRE_VFS, NULL);

		vfs_unmountall();
		(void) VFS_MOUNTROOT(rootvfs, ROOT_UNMOUNT);
		vfs_syncall();

		dump_ereports();
		dump_messages();

		invoke_cb = B_TRUE;

		/* FALLTHROUGH */
	}

	case A_REBOOT:
		if ((mdep != NULL) && (*(char *)mdep == '/')) {
			buf = i_convert_boot_device_name(mdep, NULL, &buflen);
			mdboot(cmd, fcn, buf, invoke_cb);
		} else
			mdboot(cmd, fcn, mdep, invoke_cb);
		/* no return expected */
		break;

	case A_CONFIG:
		switch (fcn) {
		case AD_UPDATE_BOOT_CONFIG:
#ifndef	__sparc
		{
			extern void fastboot_update_config(const char *);

			fastboot_update_config(mdep);
		}
#endif

			break;
		}
		/* Let other threads enter the shutdown path now */
		mutex_enter(&ualock);
		ua_shutdown_thread = NULL;
		cv_signal(&uacond);
		mutex_exit(&ualock);
		break;

	case A_REMOUNT:
		(void) VFS_MOUNTROOT(rootvfs, ROOT_REMOUNT);
		/* Let other threads enter the shutdown path now */
		mutex_enter(&ualock);
		ua_shutdown_thread = NULL;
		cv_signal(&uacond);
		mutex_exit(&ualock);
		break;

	case A_FREEZE:
	{
		/*
		 * This is the entrypoint for all suspend/resume actions.
		 */
		extern int cpr(int, void *);

		if (modload("misc", "cpr") == -1)
			return (ENOTSUP);
		/* Let the CPR module decide what to do with mdep */
		error = cpr(fcn, mdep);
		break;
	}

	case A_FTRACE:
	{
		switch (fcn) {
		case AD_FTRACE_START:
			(void) FTRACE_START();
			break;
		case AD_FTRACE_STOP:
			(void) FTRACE_STOP();
			break;
		default:
			error = EINVAL;
		}
		break;
	}

	case A_DUMP:
	{
		if (fcn == AD_NOSYNC) {
			in_sync = 1;
			break;
		}

		panic_bootfcn = fcn;
		panic_forced = 1;

		if ((mdep != NULL) && (*(char *)mdep == '/')) {
			panic_bootstr = i_convert_boot_device_name(mdep,
			    NULL, &buflen);
		} else
			panic_bootstr = mdep;

#ifndef	__sparc
		extern void fastboot_update_and_load(int, char *);

		fastboot_update_and_load(fcn, mdep);
#endif

		panic("forced crash dump initiated at user request");
		/*NOTREACHED*/
	}

	case A_SDTTEST:
	{
		DTRACE_PROBE7(test, int, 1, int, 2, int, 3, int, 4, int, 5,
		    int, 6, int, 7);
		break;
	}

	default:
		error = EINVAL;
	}

	return (error);
}
Exemple #5
0
/*ARGSUSED*/
static int
fm_ioctl(dev_t dev, int cmd, intptr_t data, int flag, cred_t *cred, int *rvalp)
{
	char *buf;
	int err;
	uint_t model;
	const fm_subr_t *subr;
	uint32_t vers;
	fm_ioc_data_t fid;
	nvlist_t *invl = NULL, *onvl = NULL;
#ifdef _MULTI_DATAMODEL
	fm_ioc_data32_t fid32;
#endif

	if (getminor(dev) != 0)
		return (ENXIO);

	for (subr = fm_subrs; subr->cmd != cmd; subr++)
		if (subr->cmd == -1)
			return (ENOTTY);

	if (subr->priv && (flag & FWRITE) == 0 &&
	    secpolicy_sys_config(CRED(), 0) != 0)
		return (EPERM);

	model = ddi_model_convert_from(flag & FMODELS);

	switch (model) {
#ifdef _MULTI_DATAMODEL
	case DDI_MODEL_ILP32:
		if (ddi_copyin((void *)data, &fid32,
		    sizeof (fm_ioc_data32_t), flag) != 0)
			return (EFAULT);
		fid.fid_version = fid32.fid_version;
		fid.fid_insz = fid32.fid_insz;
		fid.fid_inbuf = (caddr_t)(uintptr_t)fid32.fid_inbuf;
		fid.fid_outsz = fid32.fid_outsz;
		fid.fid_outbuf = (caddr_t)(uintptr_t)fid32.fid_outbuf;
		break;
#endif /* _MULTI_DATAMODEL */
	case DDI_MODEL_NONE:
	default:
		if (ddi_copyin((void *)data, &fid, sizeof (fm_ioc_data_t),
		    flag) != 0)
			return (EFAULT);
	}

	if (nvlist_lookup_uint32(fm_vers_nvl, subr->version, &vers) != 0 ||
	    fid.fid_version != vers)
		return (ENOTSUP);

	if (fid.fid_insz > FM_IOC_MAXBUFSZ)
		return (ENAMETOOLONG);
	if (fid.fid_outsz > FM_IOC_OUT_MAXBUFSZ)
		return (EINVAL);

	/*
	 * Copy in and unpack the input nvlist.
	 */
	if (fid.fid_insz != 0 && fid.fid_inbuf != (caddr_t)0) {
		buf = kmem_alloc(fid.fid_insz, KM_SLEEP);
		if (ddi_copyin(fid.fid_inbuf, buf, fid.fid_insz, flag) != 0) {
			kmem_free(buf, fid.fid_insz);
			return (EFAULT);
		}
		err = nvlist_unpack(buf, fid.fid_insz, &invl, KM_SLEEP);
		kmem_free(buf, fid.fid_insz);
		if (err != 0)
			return (err);
	}

	err = subr->func(cmd, invl, &onvl);

	if (invl != NULL)
		nvlist_free(invl);

	if (err != 0) {
		if (onvl != NULL)
			nvlist_free(onvl);
		return (err);
	}

	/*
	 * If the output nvlist contains any data, pack it and copyout.
	 */
	if (onvl != NULL) {
		size_t sz;

		if ((err = nvlist_size(onvl, &sz, NV_ENCODE_NATIVE)) != 0) {
			nvlist_free(onvl);
			return (err);
		}
		if (sz > fid.fid_outsz) {
			nvlist_free(onvl);
			return (ENAMETOOLONG);
		}

		buf = kmem_alloc(sz, KM_SLEEP);
		if ((err = nvlist_pack(onvl, &buf, &sz, NV_ENCODE_NATIVE,
		    KM_SLEEP)) != 0) {
			kmem_free(buf, sz);
			nvlist_free(onvl);
			return (err);
		}
		nvlist_free(onvl);
		if (ddi_copyout(buf, fid.fid_outbuf, sz, flag) != 0) {
			kmem_free(buf, sz);
			return (EFAULT);
		}
		kmem_free(buf, sz);
		fid.fid_outsz = sz;

		switch (model) {
#ifdef _MULTI_DATAMODEL
		case DDI_MODEL_ILP32:
			fid32.fid_outsz = (size32_t)fid.fid_outsz;
			if (ddi_copyout(&fid32, (void *)data,
			    sizeof (fm_ioc_data32_t), flag) != 0)
				return (EFAULT);
			break;
#endif /* _MULTI_DATAMODEL */
		case DDI_MODEL_NONE:
		default:
			if (ddi_copyout(&fid, (void *)data,
			    sizeof (fm_ioc_data_t), flag) != 0)
				return (EFAULT);
		}
	}

	return (err);
}
/*ARGSUSED*/
static int
xcalwd_ioctl(dev_t dev, int cmd, intptr_t arg, int flag,
			cred_t *cred_p, int *rvalp)
{
	int		instance;
	xcalwd_state_t	*tsp;
	int		intvl;
	int		o_intvl;
	boolean_t	curstate;
	timeout_id_t	tid;

	if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
		return (EPERM);

	instance = getminor(dev);
	if (instance < 0)
		return (ENXIO);

	tsp = ddi_get_soft_state(xcalwd_statep, instance);
	if (tsp == NULL)
		return (ENXIO);

	switch (cmd) {
	case XCALWD_STOPWATCHDOG:
		/*
		 * cancels any pending timer and disables the timer.
		 */
		tid = 0;
		mutex_enter(&tsp->lock);
		if (tsp->started == B_FALSE) {
			mutex_exit(&tsp->lock);
			return (0);
		}
		tid = tsp->tid;
		tsp->started = B_FALSE;
		tsp->tid = 0;
		mutex_exit(&tsp->lock);
		if (tid != 0)
			(void) untimeout(tid);
		return (0);
	case XCALWD_STARTWATCHDOG:
		if (ddi_copyin((void *)arg, &intvl, sizeof (intvl), flag))
			return (EFAULT);
		if (intvl == 0)
			return (EINVAL);

		mutex_enter(&tsp->lock);
		o_intvl = tsp->intvl;
		mutex_exit(&tsp->lock);

		if (ddi_copyout((const void *)&o_intvl, (void *)arg,
		    sizeof (o_intvl), flag))
			return (EFAULT);

		mutex_enter(&tsp->lock);
		if (tsp->started == B_TRUE) {
			mutex_exit(&tsp->lock);
			return (EINVAL);
		}
		tsp->intvl = intvl;
		tsp->tid = realtime_timeout(xcalwd_timeout,
		    (void *)(uintptr_t)instance,
		    drv_usectohz(1000000) * tsp->intvl);
		tsp->started = B_TRUE;
		mutex_exit(&tsp->lock);
		return (0);
	case XCALWD_KEEPALIVE:
		tid = 0;
		mutex_enter(&tsp->lock);
		tid = tsp->tid;
		tsp->tid = 0;
		mutex_exit(&tsp->lock);
		if (tid != 0)
			(void) untimeout(tid);	/* cancel */

		mutex_enter(&tsp->lock);
		if (tsp->started == B_TRUE)	/* reinstate */
			tsp->tid = realtime_timeout(xcalwd_timeout,
			    (void *)(uintptr_t)instance,
			    drv_usectohz(1000000) * tsp->intvl);
		mutex_exit(&tsp->lock);
		return (0);
	case XCALWD_GETSTATE:
		mutex_enter(&tsp->lock);
		curstate = tsp->started;
		mutex_exit(&tsp->lock);
		if (ddi_copyout((const void *)&curstate, (void *)arg,
		    sizeof (curstate), flag))
			return (EFAULT);
		return (0);
	default:
		return (EINVAL);
	}
	/*NOTREACHED*/
}