Esempio n. 1
0
void
hxge_hw_ioctl(p_hxge_t hxgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
{
	int cmd;

	HXGE_DEBUG_MSG((hxgep, IOC_CTL, "==> hxge_hw_ioctl"));

	if (hxgep == NULL) {
		miocnak(wq, mp, 0, EINVAL);
		return;
	}

	iocp->ioc_error = 0;
	cmd = iocp->ioc_cmd;

	switch (cmd) {
	default:
		miocnak(wq, mp, 0, EINVAL);
		return;

	case HXGE_PUT_TCAM:
		hxge_put_tcam(hxgep, mp->b_cont);
		miocack(wq, mp, 0, 0);
		break;

	case HXGE_GET_TCAM:
		hxge_get_tcam(hxgep, mp->b_cont);
		miocack(wq, mp, 0, 0);
		break;

	case HXGE_RTRACE:
		hxge_rtrace_ioctl(hxgep, wq, mp, iocp);
		break;
	}
}
Esempio n. 2
0
static void
cvcr_ioctl(queue_t *q, mblk_t *mp)
{
	struct iocblk	*iocp = (struct iocblk *)mp->b_rptr;
	int		error;

	switch (iocp->ioc_cmd) {
	case CVC_BREAK:
		abort_sequence_enter(NULL);
		miocack(q, mp, 0, 0);
		break;

	case CVC_DISCONNECT:
	case TIOCSWINSZ:
		/*
		 * Generate a SIGHUP or SIGWINCH to the console.  Note in this
		 * case cvc_redir does not free up mp, so we can reuse it for
		 * the ACK/NAK.
		 */
		error = cvc_redir(mp);
		if (error != 0)
			miocnak(q, mp, 0, error);
		else
			miocack(q, mp, 0, 0);
		break;

	default:
		miocnak(q, mp, 0, EINVAL);
		break;
	}
}
Esempio n. 3
0
/*
 * vml_iocdata() -
 * Handle the M_IOCDATA messages associated with
 * a request to validate a module list.
 */
static void
vml_iocdata(
	queue_t *qp,	/* pointer to write queue */
	mblk_t *mp)	/* message pointer */
{
	long i;
	int	nmods;
	struct copyresp *csp;
	struct str_mlist *lp;
	STRUCT_HANDLE(str_list, slp);
	struct saddev *sadp;

	csp = (struct copyresp *)mp->b_rptr;
	if (csp->cp_rval) {	/* if there was an error */
		freemsg(mp);
		return;
	}

	ASSERT(csp->cp_cmd == SAD_VML);
	sadp = (struct saddev *)qp->q_ptr;
	switch ((long)csp->cp_private) {
	case GETSTRUCT:
		STRUCT_SET_HANDLE(slp, csp->cp_flag,
		    (struct str_list *)mp->b_cont->b_rptr);
		nmods = STRUCT_FGET(slp, sl_nmods);
		if (nmods <= 0) {
			miocnak(qp, mp, 0, EINVAL);
			break;
		}
		sadp->sa_addr = (caddr_t)(uintptr_t)nmods;

		mcopyin(mp, (void *)GETLIST, nmods * sizeof (struct str_mlist),
		    STRUCT_FGETP(slp, sl_modlist));
		qreply(qp, mp);
		break;

	case GETLIST:
		lp = (struct str_mlist *)mp->b_cont->b_rptr;
		for (i = 0; i < (long)sadp->sa_addr; i++, lp++) {
			lp->l_name[FMNAMESZ] = '\0';
			if (fmodsw_find(lp->l_name, FMODSW_LOAD) == NULL) {
				miocack(qp, mp, 0, 1);
				return;
			}
		}
		miocack(qp, mp, 0, 0);
		break;

	default:
		cmn_err(CE_WARN, "vml_iocdata: invalid cp_private value: %p",
		    (void *)csp->cp_private);
		freemsg(mp);
		break;
	} /* switch (cp_private) */
}
Esempio n. 4
0
/*ARGSUSED*/
static void
hxge_rtrace_ioctl(p_hxge_t hxgep, queue_t *wq, mblk_t *mp,
    struct iocblk *iocp)
{
	ssize_t		size;
	rtrace_t	*rtp;
	mblk_t		*nmp;
	uint32_t	i, j;
	uint32_t	start_blk;
	uint32_t	base_entry;
	uint32_t	num_entries;

	HXGE_DEBUG_MSG((hxgep, STR_CTL, "==> hxge_rtrace_ioctl"));

	size = 1024;
	if (mp->b_cont == NULL || MBLKL(mp->b_cont) < size) {
		HXGE_DEBUG_MSG((hxgep, STR_CTL,
		    "malformed M_IOCTL MBLKL = %d size = %d",
		    MBLKL(mp->b_cont), size));
		miocnak(wq, mp, 0, EINVAL);
		return;
	}

	nmp = mp->b_cont;
	rtp = (rtrace_t *)nmp->b_rptr;
	start_blk = rtp->next_idx;
	num_entries = rtp->last_idx;
	base_entry = start_blk * MAX_RTRACE_IOC_ENTRIES;

	HXGE_DEBUG_MSG((hxgep, STR_CTL, "start_blk = %d\n", start_blk));
	HXGE_DEBUG_MSG((hxgep, STR_CTL, "num_entries = %d\n", num_entries));
	HXGE_DEBUG_MSG((hxgep, STR_CTL, "base_entry = %d\n", base_entry));

	rtp->next_idx = hpi_rtracebuf.next_idx;
	rtp->last_idx = hpi_rtracebuf.last_idx;
	rtp->wrapped = hpi_rtracebuf.wrapped;
	for (i = 0, j = base_entry; i < num_entries; i++, j++) {
		rtp->buf[i].ctl_addr = hpi_rtracebuf.buf[j].ctl_addr;
		rtp->buf[i].val_l32 = hpi_rtracebuf.buf[j].val_l32;
		rtp->buf[i].val_h32 = hpi_rtracebuf.buf[j].val_h32;
	}

	nmp->b_wptr = nmp->b_rptr + size;
	HXGE_DEBUG_MSG((hxgep, STR_CTL, "<== hxge_rtrace_ioctl"));
	miocack(wq, mp, (int)size, 0);
}
Esempio n. 5
0
static void
kb8042_iocdatamsg(queue_t *qp, mblk_t *mp)
{
	struct copyresp	*csp;

	csp = (struct copyresp *)mp->b_rptr;
	if (csp->cp_rval) {
		freemsg(mp);
		return;
	}

	switch (csp->cp_cmd) {
	default:
		miocack(qp, mp, 0, 0);
		break;
	}
}
Esempio n. 6
0
/*
 * cvc_ioctl()
 *	handle normal console ioctls.
 */
static void
cvc_ioctl(register queue_t *q, register mblk_t *mp)
{
	register cvc_t			*cp = q->q_ptr;
	int				datasize;
	int				error = 0;

	/*
	 * Let ttycommon_ioctl take the first shot at processing the ioctl.  If
	 * it fails because it can't allocate memory, schedule processing of the
	 * ioctl later when a proper buffer is available.  The mblk that
	 * couldn't be processed will have been stored in the tty structure by
	 * ttycommon_ioctl.
	 */
	datasize = ttycommon_ioctl(&cp->cvc_tty, q, mp, &error);
	if (datasize != 0) {
		if (cp->cvc_wbufcid) {
			unbufcall(cp->cvc_wbufcid);
		}
		cp->cvc_wbufcid = bufcall(datasize, BPRI_HI, cvc_reioctl, cp);
		return;
	}

	/*
	 * ttycommon_ioctl didn't do anything, but there's nothing we really
	 * support either with the exception of TCSBRK, which is supported
	 * only to appear a bit more like a serial device for software that
	 * expects TCSBRK to work.
	 */
	if (error != 0) {
		struct iocblk *iocp = (struct iocblk *)mp->b_rptr;

		if (iocp->ioc_cmd == TCSBRK) {
			miocack(q, mp, 0, 0);
		} else {
			miocnak(q, mp, 0, EINVAL);
		}
	} else {
		qreply(q, mp);
	}
}
/*
 * set the q_ptr of the 'q' to the conn_t pointer passed in
 */
static void
ip_helper_share_conn(queue_t *q, mblk_t *mp, cred_t *crp)
{
	conn_t *connp = *((conn_t **)mp->b_cont->b_rptr);

	/*
	 * This operation is allowed only on helper streams with kcred
	 */

	if (kcred != crp || msgdsize(mp->b_cont) != sizeof (void *)) {
		miocnak(q, mp, 0, EINVAL);
		return;
	}

	connp->conn_helper_info->iphs_minfo = q->q_ptr;
	connp->conn_helper_info->iphs_rq = RD(q);
	connp->conn_helper_info->iphs_wq = WR(q);
	WR(q)->q_ptr = RD(q)->q_ptr = (void *)connp;
	connp->conn_rq = RD(q);
	connp->conn_wq = WR(q);
	miocack(q, mp, 0, 0);
}
Esempio n. 8
0
static int
zap_m_ioctl(struct zt_chan *chan, queue_t *q, mblk_t *mp)
{
	struct iocblk *ioc = (typeof(ioc)) mp->b_rptr;
	mblk_t *dp = mp->b_cont;

	switch (ioc->ioc_cmd) {
	case ZT_GET_BLOCKSIZE:
	case ZT_SET_BLOCKSIZE:
	case ZT_FLUSH:
	case ZT_SYNC:
	case ZT_GET_PARAMS:
	case ZT_SET_PARAMS:
	case ZT_HOOK:
	case ZT_SPANSTAT:
	case ZT_MAINT:
	case ZT_GETCONF:
	case ZT_SETCONF:
	case ZT_CONFLINK:
	case ZT_CONFDIAG:
	case ZT_GETGAINS:
	case ZT_SETGAINS:
	case ZT_SPANCONFIG:
	case ZT_CHANCONFIG:
	case ZT_CONFMUTE:
	case ZT_SENDTONE:
	case ZT_SETTONEZONE:
	case ZT_GETTONEZONE:
	case ZT_DEFAULTZONE:
	case ZT_LOADZONE:
	case ZT_FREEZONE:
	case ZT_SET_BUFINFO:
	case ZT_GET_BUFINFO:
	case ZT_GET_DIALPARAMS:
	case ZT_SET_DIALPARAMS:
	case ZT_DIAL:
	case ZT_AUDIOMODE:
	case ZT_ECHOCANCEL:
	case ZT_CHANNO:
	case ZT_DIALING:
	case ZT_HDLCRAWMODE:
	case ZT_HDLCFCSMODE:
	case ZT_SPECIFY:
	{
		int channo = *(int *) dp->b_rptr;

		if ((channo < 1) || (channo > ZT_MAX_CHANNELS))
			miocnak(q, mp, 0, EINVAL);
		if ((err = zt_specchan_open(NULL, qstream(q)->sd_file, channo, 0)) == 0) {
			RD(q)->q_ptr = WR(q)->q_ptr = &chans[channo];
			miocack(q, mp, 0, 0);
		} else {
			miocnak(q, mp, 0, (err < 0 ? -err : err));
		}
		return (0);
	}
	case ZT_SETLAW:
	case ZT_SETLINEAR:
	case ZT_HDLCPPP:
	case ZT_SETCADENCE:
	case ZT_SETTXBITS:
	case ZT_CHANDIAG:
	case ZT_GETRXBITS:
	case ZT_SFCONFIG:
	case ZT_TIMERCONFIG:
	case ZT_TIMERACK:
	case ZT_GETCONFMUTE:
	case ZT_ECHOTRAIN:
	case ZT_ONHOOKTRANSFER:
	case ZT_TIMERPING:
	case ZT_TIMERPONG:
	case ZT_SIGFREEZE:
	case ZT_GETSIGFREEZE:
		mi_copyout(q, mp, NULL, sizeof(int));
		return (0);
	case ZT_INDIRECT:
	case ZT_DYNAMIC_CREATE:
	case ZT_DYNAMIC_DESTROY:
	case ZT_TONEDETECT:
	case ZT_SETPOLARITY:
	case ZT_STARTUP:
	case ZT_SHUTDOWN:
	}
}
Esempio n. 9
0
/*
 * Pass on M_IOCTL messages passed to the DLD, and support
 * private IOCTLs for debugging and ndd.
 */
void
ixgbe_m_ioctl(void *arg, queue_t *q, mblk_t *mp)
{
	ixgbe_t *ixgbe = (ixgbe_t *)arg;
	struct iocblk *iocp;
	enum ioc_reply status;

	iocp = (struct iocblk *)(uintptr_t)mp->b_rptr;
	iocp->ioc_error = 0;

	mutex_enter(&ixgbe->gen_lock);
	if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
		mutex_exit(&ixgbe->gen_lock);
		miocnak(q, mp, 0, EINVAL);
		return;
	}
	mutex_exit(&ixgbe->gen_lock);

	switch (iocp->ioc_cmd) {
	case LB_GET_INFO_SIZE:
	case LB_GET_INFO:
	case LB_GET_MODE:
	case LB_SET_MODE:
		status = ixgbe_loopback_ioctl(ixgbe, iocp, mp);
		break;

	default:
		status = IOC_INVAL;
		break;
	}

	/*
	 * Decide how to reply
	 */
	switch (status) {
	default:
	case IOC_INVAL:
		/*
		 * Error, reply with a NAK and EINVAL or the specified error
		 */
		miocnak(q, mp, 0, iocp->ioc_error == 0 ?
		    EINVAL : iocp->ioc_error);
		break;

	case IOC_DONE:
		/*
		 * OK, reply already sent
		 */
		break;

	case IOC_ACK:
		/*
		 * OK, reply with an ACK
		 */
		miocack(q, mp, 0, 0);
		break;

	case IOC_REPLY:
		/*
		 * OK, send prepared reply as ACK or NAK
		 */
		mp->b_datap->db_type = iocp->ioc_error == 0 ?
		    M_IOCACK : M_IOCNAK;
		qreply(q, mp);
		break;
	}
}
Esempio n. 10
0
/*
 * telmodwput:
 * M_DATA is processed and forwarded if we aren't stopped awaiting the daemon
 * to process something.  M_CTL's are data from the daemon bound for the
 * network.  We forward them immediately.  There are two classes of ioctl's
 * we must handle here also.  One is ioctl's forwarded by ptem which we
 * ignore.  The other is ioctl's issued by the daemon to control us.
 * Process them appropriately.  M_PROTO's we pass along, figuring they are
 * are TPI operations for TCP.  M_FLUSH requires careful processing, since
 * telnet cannot tolerate flushing its protocol requests.  Also the flushes
 * can be running either daemon<->TCP or application<->telmod.  We must
 * carefully deal with this.
 */
static void
telmodwput(
	queue_t *q,	/* Pointer to the read queue */
	mblk_t *mp)	/* Pointer to current message block */
{
	struct telmod_info	*tmip;
	struct iocblk *ioc;
	mblk_t *savemp;
	int rw;
	int error;

	tmip = (struct telmod_info *)q->q_ptr;

	switch (mp->b_datap->db_type) {
	case M_DATA:
		if (!canputnext(q) || (tmip->flags & TEL_STOPPED) ||
			(q->q_first)) {
			noenable(q);
			(void) putq(q, mp);
			break;
		}
		/*
		 * This routine parses data generating from ptm side.
		 * Insert a null character if carraige return
		 * is not followed by line feed unless we are in binary mode.
		 * Also, duplicate IAC if found in the data.
		 */
		(void) snd_parse(q, mp);
		break;

	case M_CTL:
		if (((mp->b_wptr - mp->b_rptr) == 1) &&
			(*(mp->b_rptr) == M_CTL_MAGIC_NUMBER)) {
			savemp = mp->b_cont;
			freeb(mp);
			mp = savemp;
		}
		putnext(q, mp);
		break;

	case M_IOCTL:
		ioc = (struct iocblk *)mp->b_rptr;
		switch (ioc->ioc_cmd) {

		/*
		 * This ioctl is issued by user level daemon to
		 * request one more message block to process protocol
		 */
		case TEL_IOC_GETBLK:
			if (!(tmip->flags & TEL_STOPPED)) {
				miocnak(q, mp, 0, EINVAL);
				break;
			}
			tmip->flags |= TEL_GETBLK;
			qenable(RD(q));
			enableok(RD(q));

			miocack(q, mp, 0, 0);
			break;

		/*
		 * This ioctl is issued by user level daemon to reenable the
		 * read and write queues. This is issued during startup time
		 * after setting up the mux links and also after processing
		 * the protocol.  It is also issued after each time an
		 * an unrecognized telnet option is forwarded to the daemon.
		 */
		case TEL_IOC_ENABLE:

			/*
			 * Send negative ack if TEL_STOPPED flag is not set
			 */
			if (!(tmip->flags & TEL_STOPPED)) {
				miocnak(q, mp, 0, EINVAL);
				break;
			}
			tmip->flags &= ~TEL_STOPPED;
			if (mp->b_cont) {
				(void) putbq(RD(q), mp->b_cont);
				mp->b_cont = 0;
			}

			qenable(RD(q));
			enableok(RD(q));
			qenable(q);
			enableok(q);

			miocack(q, mp, 0, 0);
			break;

		/*
		 * Set binary/normal mode for input and output
		 * according to the instructions from the daemon.
		 */
		case TEL_IOC_MODE:
			error = miocpullup(mp, sizeof (uchar_t));
			if (error != 0) {
				miocnak(q, mp, 0, error);
				break;
			}
			tmip->flags |= *(mp->b_cont->b_rptr) &
			    (TEL_BINARY_IN|TEL_BINARY_OUT);
			miocack(q, mp, 0, 0);
			break;

#ifdef DEBUG
		case TCSETAF:
		case TCSETSF:
		case TCSETA:
		case TCSETAW:
		case TCSETS:
		case TCSETSW:
		case TCSBRK:
		case TIOCSTI:
		case TIOCSWINSZ:
			miocnak(q, mp, 0, EINVAL);
			break;
#endif
		case CRYPTPASSTHRU:
			error = miocpullup(mp, sizeof (uchar_t));
			if (error != 0) {
				miocnak(q, mp, 0, error);
				break;
			}
			if (*(mp->b_cont->b_rptr) == 0x01)
				tmip->flags |= TEL_IOCPASSTHRU;
			else
				tmip->flags &= ~TEL_IOCPASSTHRU;

			miocack(q, mp, 0, 0);
			break;

		default:
			if (tmip->flags & TEL_IOCPASSTHRU) {
				putnext(q, mp);
			} else {
#ifdef DEBUG
				cmn_err(CE_NOTE,
				"telmodwput: unexpected ioctl type 0x%x",
					ioc->ioc_cmd);
#endif
				miocnak(q, mp, 0, EINVAL);
			}
			break;
		}
		break;

	case M_FLUSH:
		/*
		 * Flushing is tricky:  We try to flush all we can, but certain
		 * data cannot be flushed.  Telnet protocol sequences cannot
		 * be flushed.  So, TCP's queues cannot be flushed since we
		 * cannot tell what might be telnet protocol data.  Then we
		 * must take care to create and forward out-of-band data
		 * indicating the flush to the far side.
		 */
		rw = *mp->b_rptr;
		if (rw & FLUSHR) {
			/*
			 * We cannot flush our read queue, since there may
			 * be telnet protocol bits in the queue, awaiting
			 * processing.  However, once it leaves this module
			 * it's guaranteed that all protocol data is in
			 * M_CTL, so we do flush read data beyond us, expecting
			 * them (actually logindmux) to do FLUSHDATAs also.
			 */
			*mp->b_rptr = rw & ~FLUSHW;
			qreply(q, mp);
		} else {
			freemsg(mp);
		}
		if (rw & FLUSHW) {
			/*
			 * Since all telnet protocol data comes from the
			 * daemon, stored as M_CTL messages, flushq will
			 * do exactly what's needed:  Flush bytes which do
			 * not have telnet protocol data.
			 */
			flushq(q, FLUSHDATA);
		}
		break;

	case M_PCPROTO:
		putnext(q, mp);
		break;

	case M_PROTO:
		/* We may receive T_DISCON_REQ from the mux */
		if (!canputnext(q) || q->q_first != NULL)
			(void) putq(q, mp);
		else
			putnext(q, mp);
		break;

	default:
#ifdef DEBUG
		cmn_err(CE_NOTE,
		    "telmodwput: unexpected msg type 0x%x",
		    mp->b_datap->db_type);
#endif
		freemsg(mp);
		break;
	}
}
Esempio n. 11
0
/*
 * function to handle dlpi streams message from GLDv3 mac layer
 */
void
oce_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
{
	struct oce_dev *dev = arg;
	struct  iocblk *iocp;
	int cmd;
	uint32_t payload_length;
	int ret;

	iocp = (struct iocblk *)voidptr(mp->b_rptr);
	iocp->ioc_error = 0;
	cmd = iocp->ioc_cmd;

	DEV_LOCK(dev);
	if (dev->suspended) {
		miocnak(wq, mp, 0, EINVAL);
		DEV_UNLOCK(dev);
		return;
	}
	DEV_UNLOCK(dev);

	switch (cmd) {

	case OCE_ISSUE_MBOX: {
		ret = oce_issue_mbox(dev, wq, mp, &payload_length);
		miocack(wq, mp, payload_length, ret);
		break;
	}
	case OCE_QUERY_DRIVER_DATA: {
		struct oce_driver_query *drv_query =
		    (struct oce_driver_query *)(void *)mp->b_cont->b_rptr;

		/* if the driver version does not match bail */
		if (drv_query->version != OCN_VERSION_SUPPORTED) {
			oce_log(dev, CE_NOTE, MOD_CONFIG, "%s",
			    "One Connect version mismatch");
			miocnak(wq, mp, 0, ENOTSUP);
			break;
		}

		/* fill the return values */
		bcopy(OCE_MOD_NAME, drv_query->driver_name,
		    (sizeof (OCE_MOD_NAME) > 32) ?
		    31 : sizeof (OCE_MOD_NAME));
		drv_query->driver_name[31] = '\0';

		bcopy(OCE_VERSION, drv_query->driver_version,
		    (sizeof (OCE_VERSION) > 32) ? 31 :
		    sizeof (OCE_VERSION));
		drv_query->driver_version[31] = '\0';

		if (dev->num_smac == 0) {
			drv_query->num_smac = 1;
			bcopy(dev->mac_addr, drv_query->smac_addr[0],
			    ETHERADDRL);
		} else {
			drv_query->num_smac = dev->num_smac;
			bcopy(dev->unicast_addr, drv_query->smac_addr[0],
			    ETHERADDRL);
		}

		bcopy(dev->mac_addr, drv_query->pmac_addr, ETHERADDRL);

		payload_length = sizeof (struct oce_driver_query);
		miocack(wq, mp, payload_length, 0);
		break;
	}

	default:
		miocnak(wq, mp, 0, ENOTSUP);
		break;
	}
} /* oce_m_ioctl */
Esempio n. 12
0
/*
 * apush_iocdata() -
 * Handle the M_IOCDATA messages associated with
 * the autopush feature.
 */
static void
apush_iocdata(
	queue_t *qp,	/* pointer to write queue */
	mblk_t *mp)	/* message pointer */
{
	int i, ret;
	struct copyresp *csp;
	struct strapush *sap;
	struct autopush *ap;
	struct saddev *sadp;
	uint_t size;

	csp = (struct copyresp *)mp->b_rptr;
	if (csp->cp_rval) {	/* if there was an error */
		freemsg(mp);
		return;
	}
	if (mp->b_cont)
		/* sap needed only if mp->b_cont is set */
		sap = (struct strapush *)mp->b_cont->b_rptr;
	switch (SAD_CMD(csp->cp_cmd)) {
	case SAD_CMD(SAD_SAP):
		switch ((long)csp->cp_private) {
		case GETSTRUCT:
			switch (sap->sap_cmd) {
			case SAP_ONE:
			case SAP_RANGE:
			case SAP_ALL:
				if ((sap->sap_npush == 0) ||
				    (sap->sap_npush > MAXAPUSH) ||
				    (sap->sap_npush > nstrpush)) {

					/* invalid number of modules to push */

					miocnak(qp, mp, 0, EINVAL);
					break;
				}
				if (ret = valid_major(sap->sap_major)) {
					miocnak(qp, mp, 0, ret);
					break;
				}
				if ((sap->sap_cmd == SAP_RANGE) &&
				    (sap->sap_lastminor <= sap->sap_minor)) {

					/* bad range */

					miocnak(qp, mp, 0, ERANGE);
					break;
				}

				/*
				 * Validate that the specified list of
				 * modules exist.
				 */
				for (i = 0; i < sap->sap_npush; i++) {
					sap->sap_list[i][FMNAMESZ] = '\0';
					if (fmodsw_find(sap->sap_list[i],
					    FMODSW_LOAD) == NULL) {
						miocnak(qp, mp, 0, EINVAL);
						return;
					}
				}

				mutex_enter(&sad_lock);
				if (ap_hfind(sap->sap_major, sap->sap_minor,
				    sap->sap_lastminor, sap->sap_cmd)) {
					mutex_exit(&sad_lock);

					/* already configured */

					miocnak(qp, mp, 0, EEXIST);
					break;
				}
				if ((ap = ap_alloc()) == NULL) {
					mutex_exit(&sad_lock);

					/* no autopush structures */

					miocnak(qp, mp, 0, ENOSR);
					break;
				}
				ap->ap_cnt++;
				ap->ap_common = sap->sap_common;
				if (SAD_VER(csp->cp_cmd) > 0)
					ap->ap_anchor = sap->sap_anchor;
				else
					ap->ap_anchor = 0;
				for (i = 0; i < ap->ap_npush; i++)
					(void) strcpy(ap->ap_list[i],
					    sap->sap_list[i]);
				ap_hadd(ap);
				mutex_exit(&sad_lock);
				miocack(qp, mp, 0, 0);
				break;

			case SAP_CLEAR:
				if (ret = valid_major(sap->sap_major)) {
					miocnak(qp, mp, 0, ret);
					break;
				}
				mutex_enter(&sad_lock);
				if ((ap = ap_hfind(sap->sap_major,
				    sap->sap_minor, sap->sap_lastminor,
				    sap->sap_cmd)) == NULL) {
					mutex_exit(&sad_lock);

					/* not configured */

					miocnak(qp, mp, 0, ENODEV);
					break;
				}
				if ((ap->ap_type == SAP_RANGE) &&
				    (sap->sap_minor != ap->ap_minor)) {
					mutex_exit(&sad_lock);

					/* starting minors do not match */

					miocnak(qp, mp, 0, ERANGE);
					break;
				}
				if ((ap->ap_type == SAP_ALL) &&
				    (sap->sap_minor != 0)) {
					mutex_exit(&sad_lock);

					/* SAP_ALL must have minor == 0 */

					miocnak(qp, mp, 0, EINVAL);
					break;
				}
				ap_hrmv(ap);
				if (--(ap->ap_cnt) <= 0)
					ap_free(ap);
				mutex_exit(&sad_lock);
				miocack(qp, mp, 0, 0);
				break;

			default:
				miocnak(qp, mp, 0, EINVAL);
				break;
			} /* switch (sap_cmd) */
			break;

		default:
			cmn_err(CE_WARN,
			    "apush_iocdata: cp_private bad in SAD_SAP: %p",
			    (void *)csp->cp_private);
			freemsg(mp);
			break;
		} /* switch (cp_private) */
		break;

	case SAD_CMD(SAD_GAP):
		switch ((long)csp->cp_private) {

		case GETSTRUCT: {
			if (ret = valid_major(sap->sap_major)) {
				miocnak(qp, mp, 0, ret);
				break;
			}
			mutex_enter(&sad_lock);
			if ((ap = ap_hfind(sap->sap_major, sap->sap_minor,
			    sap->sap_lastminor, SAP_ONE)) == NULL) {
				mutex_exit(&sad_lock);

				/* not configured */

				miocnak(qp, mp, 0, ENODEV);
				break;
			}

			sap->sap_common = ap->ap_common;
			if (SAD_VER(csp->cp_cmd) > 0)
				sap->sap_anchor = ap->ap_anchor;
			for (i = 0; i < ap->ap_npush; i++)
				(void) strcpy(sap->sap_list[i], ap->ap_list[i]);
			for (; i < MAXAPUSH; i++)
				bzero(sap->sap_list[i], FMNAMESZ + 1);
			mutex_exit(&sad_lock);

			if (SAD_VER(csp->cp_cmd) == 1)
				size = STRAPUSH_V1_LEN;
			else
				size = STRAPUSH_V0_LEN;

			sadp = (struct saddev *)qp->q_ptr;
			mcopyout(mp, (void *)GETRESULT, size, sadp->sa_addr,
			    NULL);
			qreply(qp, mp);
			break;
			}
		case GETRESULT:
			miocack(qp, mp, 0, 0);
			break;

		default:
			cmn_err(CE_WARN,
			    "apush_iocdata: cp_private bad case SAD_GAP: %p",
			    (void *)csp->cp_private);
			freemsg(mp);
			break;
		} /* switch (cp_private) */
		break;

	default:	/* can't happen */
		ASSERT(0);
		freemsg(mp);
		break;
	} /* switch (cp_cmd) */
}
Esempio n. 13
0
static void
kb8042_ioctlmsg(struct kb8042 *kb8042, queue_t *qp, mblk_t *mp)
{
	struct iocblk	*iocp;
	mblk_t		*datap;
	int		error;
	int		tmp;

	iocp = (struct iocblk *)mp->b_rptr;

	switch (iocp->ioc_cmd) {

	case CONSOPENPOLLEDIO:
		error = miocpullup(mp, sizeof (struct cons_polledio *));
		if (error != 0) {
			miocnak(qp, mp, 0, error);
			return;
		}

		/*
		 * We are given an appropriate-sized data block,
		 * and return a pointer to our structure in it.
		 */
		*(struct cons_polledio **)mp->b_cont->b_rptr =
		    &kb8042->polledio;
		mp->b_datap->db_type = M_IOCACK;
		iocp->ioc_error = 0;
		qreply(qp, mp);
		break;

	case CONSCLOSEPOLLEDIO:
		miocack(qp, mp, 0, 0);
		break;

	case CONSSETABORTENABLE:
		if (iocp->ioc_count != TRANSPARENT) {
			miocnak(qp, mp, 0, EINVAL);
			return;
		}

		kb8042->debugger.enabled = *(intptr_t *)mp->b_cont->b_rptr;
		miocack(qp, mp, 0, 0);
		break;

	/*
	 * Valid only in TR_UNTRANS_MODE mode.
	 */
	case CONSSETKBDTYPE:
		error = miocpullup(mp, sizeof (int));
		if (error != 0) {
			miocnak(qp, mp, 0, error);
			return;
		}
		tmp =  *(int *)mp->b_cont->b_rptr;
		if (tmp != KB_PC && tmp != KB_USB) {
			miocnak(qp, mp, 0, EINVAL);
			break;
		}
		kb8042->simulated_kbd_type = tmp;
		miocack(qp, mp, 0, 0);
		break;

	case KIOCLAYOUT:
		if (kb8042->w_kblayout == -1) {
			miocnak(qp, mp, 0, EINVAL);
			return;
		}

		if ((datap = allocb(sizeof (int), BPRI_HI)) == NULL) {
			miocnak(qp, mp, 0, ENOMEM);
			return;
		}

		if (kb8042->simulated_kbd_type == KB_USB)
			*(int *)datap->b_wptr = KBTRANS_USBKB_DEFAULT_LAYOUT;
		else
			*(int *)datap->b_wptr = kb8042->w_kblayout;

		datap->b_wptr += sizeof (int);
		if (mp->b_cont)
			freemsg(mp->b_cont);
		mp->b_cont = datap;
		iocp->ioc_count = sizeof (int);
		mp->b_datap->db_type = M_IOCACK;
		iocp->ioc_error = 0;
		qreply(qp, mp);
		break;

	case KIOCSLAYOUT:
		if (iocp->ioc_count != TRANSPARENT) {
			miocnak(qp, mp, 0, EINVAL);
			return;
		}

		kb8042->w_kblayout = *(intptr_t *)mp->b_cont->b_rptr;
		miocack(qp, mp, 0, 0);
		break;

	case KIOCCMD:
		error = miocpullup(mp, sizeof (int));
		if (error != 0) {
			miocnak(qp, mp, 0, error);
			return;
		}

		kb8042_type4_cmd(kb8042, *(int *)mp->b_cont->b_rptr);
		miocack(qp, mp, 0, 0);
		break;

	default:
#ifdef DEBUG1
		cmn_err(CE_NOTE, "!kb8042_ioctlmsg %x", iocp->ioc_cmd);
#endif
		miocnak(qp, mp, 0, EINVAL);
		return;
	}
}
Esempio n. 14
0
static int
log_wput(queue_t *q, mblk_t *mp)
{
	log_t *lp = (log_t *)q->q_ptr;
	struct iocblk *iocp;
	mblk_t *mp2;
	cred_t *cr = DB_CRED(mp);
	zoneid_t zoneid;

	/*
	 * Default to global zone if dblk doesn't have a valid cred.
	 * Calls to syslog() go through putmsg(), which does set up
	 * the cred.
	 */
	zoneid = (cr != NULL) ? crgetzoneid(cr) : GLOBAL_ZONEID;

	switch (DB_TYPE(mp)) {
	case M_FLUSH:
		if (*mp->b_rptr & FLUSHW) {
			flushq(q, FLUSHALL);
			*mp->b_rptr &= ~FLUSHW;
		}
		if (*mp->b_rptr & FLUSHR) {
			flushq(RD(q), FLUSHALL);
			qreply(q, mp);
			return (0);
		}
		break;

	case M_IOCTL:
		iocp = (struct iocblk *)mp->b_rptr;

		if (lp->log_major != LOG_LOGMIN) {
			/* write-only device */
			miocnak(q, mp, 0, EINVAL);
			return (0);
		}

		if (iocp->ioc_count == TRANSPARENT) {
			miocnak(q, mp, 0, EINVAL);
			return (0);
		}

		if (lp->log_flags) {
			miocnak(q, mp, 0, EBUSY);
			return (0);
		}

		freemsg(lp->log_data);
		lp->log_data = mp->b_cont;
		mp->b_cont = NULL;

		switch (iocp->ioc_cmd) {

		case I_CONSLOG:
			log_update(lp, RD(q), SL_CONSOLE, log_console);
			break;

		case I_TRCLOG:
			if (lp->log_data == NULL) {
				miocnak(q, mp, 0, EINVAL);
				return (0);
			}
			log_update(lp, RD(q), SL_TRACE, log_trace);
			break;

		case I_ERRLOG:
			log_update(lp, RD(q), SL_ERROR, log_error);
			break;

		default:
			miocnak(q, mp, 0, EINVAL);
			return (0);
		}
		miocack(q, mp, 0, 0);
		return (0);

	case M_PROTO:
		if (MBLKL(mp) == sizeof (log_ctl_t) && mp->b_cont != NULL) {
			log_ctl_t *lc = (log_ctl_t *)mp->b_rptr;
			/* This code is used by savecore to log dump msgs */
			if (mp->b_band != 0 &&
			    secpolicy_sys_config(CRED(), B_FALSE) == 0) {
				(void) putq(log_consq, mp);
				return (0);
			}
			if ((lc->pri & LOG_FACMASK) == LOG_KERN)
				lc->pri |= LOG_USER;
			mp2 = log_makemsg(LOG_MID, LOG_CONSMIN, lc->level,
			    lc->flags, lc->pri, mp->b_cont->b_rptr,
			    MBLKL(mp->b_cont) + 1, 0);
			if (mp2 != NULL)
				log_sendmsg(mp2, zoneid);
		}
		break;

	case M_DATA:
		mp2 = log_makemsg(LOG_MID, LOG_CONSMIN, 0, SL_CONSOLE,
		    LOG_USER | LOG_INFO, mp->b_rptr, MBLKL(mp) + 1, 0);
		if (mp2 != NULL)
			log_sendmsg(mp2, zoneid);
		break;
	}

	freemsg(mp);
	return (0);
}
Esempio n. 15
0
/*
 * 10G is the only loopback mode for Hydra.
 */
void
hxge_loopback_ioctl(p_hxge_t hxgep, queue_t *wq, mblk_t *mp,
    struct iocblk *iocp)
{
	p_lb_property_t lb_props;
	size_t		size;
	int		i;

	if (mp->b_cont == NULL) {
		miocnak(wq, mp, 0, EINVAL);
	}

	switch (iocp->ioc_cmd) {
	case LB_GET_MODE:
		HXGE_DEBUG_MSG((hxgep, IOC_CTL, "HXGE_GET_LB_MODE command"));
		if (hxgep != NULL) {
			*(lb_info_sz_t *)mp->b_cont->b_rptr =
			    hxgep->statsp->port_stats.lb_mode;
			miocack(wq, mp, sizeof (hxge_lb_t), 0);
		} else
			miocnak(wq, mp, 0, EINVAL);
		break;

	case LB_SET_MODE:
		HXGE_DEBUG_MSG((hxgep, IOC_CTL, "HXGE_SET_LB_MODE command"));
		if (iocp->ioc_count != sizeof (uint32_t)) {
			miocack(wq, mp, 0, 0);
			break;
		}
		if ((hxgep != NULL) && hxge_set_lb(hxgep, wq, mp->b_cont)) {
			miocack(wq, mp, 0, 0);
		} else {
			miocnak(wq, mp, 0, EPROTO);
		}
		break;

	case LB_GET_INFO_SIZE:
		HXGE_DEBUG_MSG((hxgep, IOC_CTL, "LB_GET_INFO_SIZE command"));
		if (hxgep != NULL) {
			size = sizeof (lb_normal) + sizeof (lb_mac10g);

			*(lb_info_sz_t *)mp->b_cont->b_rptr = size;

			HXGE_DEBUG_MSG((hxgep, IOC_CTL,
			    "HXGE_GET_LB_INFO command: size %d", size));
			miocack(wq, mp, sizeof (lb_info_sz_t), 0);
		} else
			miocnak(wq, mp, 0, EINVAL);
		break;

	case LB_GET_INFO:
		HXGE_DEBUG_MSG((hxgep, IOC_CTL, "HXGE_GET_LB_INFO command"));
		if (hxgep != NULL) {
			size = sizeof (lb_normal) + sizeof (lb_mac10g);
			HXGE_DEBUG_MSG((hxgep, IOC_CTL,
			    "HXGE_GET_LB_INFO command: size %d", size));
			if (size == iocp->ioc_count) {
				i = 0;
				lb_props = (p_lb_property_t)mp->b_cont->b_rptr;
				lb_props[i++] = lb_normal;
				lb_props[i++] = lb_mac10g;

				miocack(wq, mp, size, 0);
			} else
				miocnak(wq, mp, 0, EINVAL);
		} else {
			miocnak(wq, mp, 0, EINVAL);
			cmn_err(CE_NOTE, "hxge_hw_ioctl: invalid command 0x%x",
			    iocp->ioc_cmd);
		}

		break;
	}
}
Esempio n. 16
0
/*
 * wput(9E) is symmetric for master and slave sides, so this handles both
 * without splitting the codepath.  (The only exception to this is the
 * processing of zcons ioctls, which is restricted to the master side.)
 *
 * zc_wput() looks at the other side; if there is no process holding that
 * side open, it frees the message.  This prevents processes from hanging
 * if no one is holding open the console.  Otherwise, it putnext's high
 * priority messages, putnext's normal messages if possible, and otherwise
 * enqueues the messages; in the case that something is enqueued, wsrv(9E)
 * will take care of eventually shuttling I/O to the other side.
 */
static void
zc_wput(queue_t *qp, mblk_t *mp)
{
	unsigned char type = mp->b_datap->db_type;
	zc_state_t *zcs;
	struct iocblk *iocbp;
	file_t *slave_filep;
	struct snode *slave_snodep;
	int slave_fd;

	ASSERT(qp->q_ptr);

	DBG1("entering zc_wput, %s side", zc_side(qp));

	/*
	 * Process zcons ioctl messages if qp is the master console's write
	 * queue.
	 */
	zcs = (zc_state_t *)qp->q_ptr;
	if (zcs->zc_master_rdq != NULL && qp == WR(zcs->zc_master_rdq) &&
	    type == M_IOCTL) {
		iocbp = (struct iocblk *)(void *)mp->b_rptr;
		switch (iocbp->ioc_cmd) {
		case ZC_HOLDSLAVE:
			/*
			 * Hold the slave's vnode and increment the refcount
			 * of the snode.  If the vnode is already held, then
			 * indicate success.
			 */
			if (iocbp->ioc_count != TRANSPARENT) {
				miocack(qp, mp, 0, EINVAL);
				return;
			}
			if (zcs->zc_slave_vnode != NULL) {
				miocack(qp, mp, 0, 0);
				return;
			}

			/*
			 * The process that passed the ioctl must be running in
			 * the global zone.
			 */
			if (curzone != global_zone) {
				miocack(qp, mp, 0, EINVAL);
				return;
			}

			/*
			 * The calling process must pass a file descriptor for
			 * the slave device.
			 */
			slave_fd =
			    (int)(intptr_t)*(caddr_t *)(void *)mp->b_cont->
			    b_rptr;
			slave_filep = getf(slave_fd);
			if (slave_filep == NULL) {
				miocack(qp, mp, 0, EINVAL);
				return;
			}
			if (ZC_STATE_TO_SLAVEDEV(zcs) !=
			    slave_filep->f_vnode->v_rdev) {
				releasef(slave_fd);
				miocack(qp, mp, 0, EINVAL);
				return;
			}

			/*
			 * Get a reference to the slave's vnode.  Also bump the
			 * reference count on the associated snode.
			 */
			ASSERT(vn_matchops(slave_filep->f_vnode,
			    spec_getvnodeops()));
			zcs->zc_slave_vnode = slave_filep->f_vnode;
			VN_HOLD(zcs->zc_slave_vnode);
			slave_snodep = VTOCS(zcs->zc_slave_vnode);
			mutex_enter(&slave_snodep->s_lock);
			++slave_snodep->s_count;
			mutex_exit(&slave_snodep->s_lock);
			releasef(slave_fd);
			miocack(qp, mp, 0, 0);
			return;
		case ZC_RELEASESLAVE:
			/*
			 * Release the master's handle on the slave's vnode.
			 * If there isn't a handle for the vnode, then indicate
			 * success.
			 */
			if (iocbp->ioc_count != TRANSPARENT) {
				miocack(qp, mp, 0, EINVAL);
				return;
			}
			if (zcs->zc_slave_vnode == NULL) {
				miocack(qp, mp, 0, 0);
				return;
			}

			/*
			 * The process that passed the ioctl must be running in
			 * the global zone.
			 */
			if (curzone != global_zone) {
				miocack(qp, mp, 0, EINVAL);
				return;
			}

			/*
			 * The process that passed the ioctl must have provided
			 * a file descriptor for the slave device.  Make sure
			 * this is correct.
			 */
			slave_fd =
			    (int)(intptr_t)*(caddr_t *)(void *)mp->b_cont->
			    b_rptr;
			slave_filep = getf(slave_fd);
			if (slave_filep == NULL) {
				miocack(qp, mp, 0, EINVAL);
				return;
			}
			if (zcs->zc_slave_vnode->v_rdev !=
			    slave_filep->f_vnode->v_rdev) {
				releasef(slave_fd);
				miocack(qp, mp, 0, EINVAL);
				return;
			}

			/*
			 * Decrement the snode's reference count and release the
			 * vnode.
			 */
			ASSERT(vn_matchops(slave_filep->f_vnode,
			    spec_getvnodeops()));
			slave_snodep = VTOCS(zcs->zc_slave_vnode);
			mutex_enter(&slave_snodep->s_lock);
			--slave_snodep->s_count;
			mutex_exit(&slave_snodep->s_lock);
			VN_RELE(zcs->zc_slave_vnode);
			zcs->zc_slave_vnode = NULL;
			releasef(slave_fd);
			miocack(qp, mp, 0, 0);
			return;
		default:
			break;
		}
	}

	if (zc_switch(RD(qp)) == NULL) {
		DBG1("wput to %s side (no one listening)", zc_side(qp));
		switch (type) {
		case M_FLUSH:
			handle_mflush(qp, mp);
			break;
		case M_IOCTL:
			miocnak(qp, mp, 0, 0);
			break;
		default:
			freemsg(mp);
			break;
		}
		return;
	}

	if (type >= QPCTL) {
		DBG1("(hipri) wput, %s side", zc_side(qp));
		switch (type) {
		case M_READ:		/* supposedly from ldterm? */
			DBG("zc_wput: tossing M_READ\n");
			freemsg(mp);
			break;
		case M_FLUSH:
			handle_mflush(qp, mp);
			break;
		default:
			/*
			 * Put this to the other side.
			 */
			ASSERT(zc_switch(RD(qp)) != NULL);
			putnext(zc_switch(RD(qp)), mp);
			break;
		}
		DBG1("done (hipri) wput, %s side", zc_side(qp));
		return;
	}

	/*
	 * Only putnext if there isn't already something in the queue.
	 * otherwise things would wind up out of order.
	 */
	if (qp->q_first == NULL && bcanputnext(RD(zc_switch(qp)), mp->b_band)) {
		DBG("wput: putting message to other side\n");
		putnext(RD(zc_switch(qp)), mp);
	} else {
		DBG("wput: putting msg onto queue\n");
		(void) putq(qp, mp);
	}
	DBG1("done wput, %s side", zc_side(qp));
}
Esempio n. 17
0
/*
 * Handle write-side M_IOCTL messages.
 */
static void
pfioctl(queue_t *wq, mblk_t *mp)
{
	struct	epacketfilt	*pfp = (struct epacketfilt *)wq->q_ptr;
	struct	Pf_ext_packetfilt	*upfp;
	struct	packetfilt	*opfp;
	ushort_t	*fwp;
	int	arg;
	int	maxoff = 0;
	int	maxoffreg = 0;
	struct iocblk	*iocp = (struct iocblk *)mp->b_rptr;
	int	error;

	switch (iocp->ioc_cmd) {
	case PFIOCSETF:
		/*
		 * Verify argument length. Since the size of packet filter
		 * got increased (ENMAXFILTERS was bumped up to 2047), to
		 * maintain backwards binary compatibility, we need to
		 * check for both possible sizes.
		 */
		switch (iocp->ioc_count) {
		case sizeof (struct Pf_ext_packetfilt):
			error = miocpullup(mp,
			    sizeof (struct Pf_ext_packetfilt));
			if (error != 0) {
				miocnak(wq, mp, 0, error);
				return;
			}
			upfp = (struct Pf_ext_packetfilt *)mp->b_cont->b_rptr;
			if (upfp->Pf_FilterLen > PF_MAXFILTERS) {
				miocnak(wq, mp, 0, EINVAL);
				return;
			}

			bcopy(upfp, pfp, sizeof (struct Pf_ext_packetfilt));
			pfp->pf_FilterEnd = &pfp->pf_Filter[pfp->pf_FilterLen];
			break;

		case sizeof (struct packetfilt):
			error = miocpullup(mp, sizeof (struct packetfilt));
			if (error != 0) {
				miocnak(wq, mp, 0, error);
				return;
			}
			opfp = (struct packetfilt *)mp->b_cont->b_rptr;
			/* this strange comparison keeps gcc from complaining */
			if (opfp->Pf_FilterLen - 1 >= ENMAXFILTERS) {
				miocnak(wq, mp, 0, EINVAL);
				return;
			}

			pfp->pf.Pf_Priority = opfp->Pf_Priority;
			pfp->pf.Pf_FilterLen = (unsigned int)opfp->Pf_FilterLen;

			bcopy(opfp->Pf_Filter, pfp->pf.Pf_Filter,
			    sizeof (opfp->Pf_Filter));
			pfp->pf_FilterEnd = &pfp->pf_Filter[pfp->pf_FilterLen];
			break;

		default:
			miocnak(wq, mp, 0, EINVAL);
			return;
		}

		/*
		 * Find and record maximum byte offset that the
		 * filter users.  We use this when executing the
		 * filter to determine how much of the packet
		 * body to pull up.  This code depends on the
		 * filter encoding.
		 */
		for (fwp = pfp->pf_Filter; fwp < pfp->pf_FilterEnd; fwp++) {
			arg = *fwp & ((1 << ENF_NBPA) - 1);
			switch (arg) {
			default:
				if ((arg -= ENF_PUSHWORD) > maxoff)
					maxoff = arg;
				break;

			case ENF_LOAD_OFFSET:
				/* Point to the offset */
				fwp++;
				if (*fwp > maxoffreg)
					maxoffreg = *fwp;
				break;

			case ENF_PUSHLIT:
			case ENF_BRTR:
			case ENF_BRFL:
				/* Skip over the literal. */
				fwp++;
				break;

			case ENF_PUSHZERO:
			case ENF_PUSHONE:
			case ENF_PUSHFFFF:
			case ENF_PUSHFF00:
			case ENF_PUSH00FF:
			case ENF_NOPUSH:
			case ENF_POP:
				break;
			}
		}

		/*
		 * Convert word offset to length in bytes.
		 */
		pfp->pf_PByteLen = (maxoff + maxoffreg + 1) * sizeof (ushort_t);
		miocack(wq, mp, 0, 0);
		break;

	default:
		putnext(wq, mp);
		break;
	}
}