Пример #1
0
STATIC void
spm_free_priv(queue_t *q)
{
	uint t;
	spm_t *s = PRIV(q);

	ensure(s, return);
	if (s->rbid)
		unbufcall(xchg(&s->rbid, 0));
	if (s->wbid)
		unbufcall(xchg(&s->wbid, 0));
	if ((*(s->prev) = s->next))
		s->next->prev = s->prev;
	s->next = NULL;
	s->prev = NULL;
	if ((t = xchg(&s->wtim, 0)))
		untimeout(t);
	if ((t = xchg(&s->rtim, 0)))
		untimeout(t);
	noenable(s->wq);
	noenable(s->rq);
	assure(s->refcnt == 0);
	printd(("spm: unlinked module private structure\n"));
	kmem_cache_free(spm_priv_cachep, s);
	printd(("spm: freed module private structure\n"));
	return;
}
Пример #2
0
/************************************************************************
 *
 * Function Name: ip2xinet_lwsrv
 * Title: IP2XINET Lower Write Service routine
 *
 * Description:
 *      Send all of the messages on this queue down to the next driver.
 *      If we discover that we can't do a put, then stop the Linux
 *      devices from sending us stuff.
 *
 ************************************************************************/
STATIC streamscall int
ip2xinet_lwsrv(queue_t *q)
{
	mblk_t *mp;
	int allsent = 1;
	int i;
	struct ip2xinet_dev *dev = ip2xinet_devs;
	struct ip2xinet_priv *privp;

	while ((mp = getq(q))) {
		/* M_PROTO's should be last on the list.  If it is something else, then it should
		   be ahead, and we can just go ahead and put it down. */
		if (mp->b_datap->db_type == M_PROTO) {
			if (canputnext(q)) {
				putnext(q, mp);
			} else {
				noenable(q);
				if (!putbq(q, mp))
					freemsg(mp);	/* FIXME */
				enableok(q);
				allsent = 0;
				break;
			}
		} else {
			putnext(q, mp);
		}
	}

	/* Handle the flow control.  If we were able to send everything then it is ok for the
	   kernel to send us more stuff.  Otherwise it is not ok.  Go through all of the devices
	   and set the appropriate state. */
	spin_lock(&ip2xinet_lock);
	for (i = 0; i < NUMIP2XINET; i++, dev++) {
		privp = &dev->priv;
		if (privp->state == 1 && ip2xinet_status.ip2x_dlstate == DL_IDLE) {
			if (allsent) {
				netif_start_queue(&dev->dev);	/* kernel can transmit */
			} else {
				netif_stop_queue(&dev->dev);	/* We are flow controlled. */
			}
		}
	}
	spin_unlock(&ip2xinet_lock);

	return (0);
}
Пример #3
0
static void
recover(queue_t *q, mblk_t *mp, size_t size)
{
	bufcall_id_t bid;
	timeout_id_t tid;
	struct	telmod_info	*tmip = (struct telmod_info *)q->q_ptr;

	ASSERT(mp->b_datap->db_type < QPCTL);
	noenable(q);
	(void) putbq(q, mp);

	/*
	 * Make sure there is at most one outstanding request per queue.
	 */
	if (q->q_flag & QREADR) {
		if (tmip->rtimoutid || tmip->rbufcid) {
			return;
		}
	} else {
		if (tmip->wtimoutid || tmip->wbufcid) {
			return;
		}
	}
	if (!(bid = qbufcall(RD(q), size, BPRI_MED, telmod_buffer, q))) {
		tid = qtimeout(RD(q), telmod_timer, q, SIMWAIT);
		if (q->q_flag & QREADR)
			tmip->rtimoutid = tid;
		else
			tmip->wtimoutid = tid;
	} else	{
		if (q->q_flag & QREADR)
			tmip->rbufcid = bid;
		else
			tmip->wbufcid = bid;
	}
}
Пример #4
0
/*
 * This routine is called from read put/service procedure and parses
 * message block to check for telnet protocol by detecting an IAC.
 * The routine processes the data part of the message block first and
 * then sends protocol followed after IAC to the telnet daemon. The
 * routine also processes CR/LF by eliminating LF/NULL followed after CR.
 *
 * Since the code to do this with streams mblks is complicated, some
 * explanations are in order.  If an IAC is found, a dupb() is done,
 * and the pointers are adjusted to create two streams message.  The
 * (possibly empty) first message contains preceeding data, and the
 * second begins with the IAC and contains the rest of the streams
 * message.
 *
 * The variables:
 * datamp:	Points to the head of a chain of mblks containing data
 *		which requires no expansion, and can be forwarded directly
 *		to the pty.
 * prevmp:	Points to the last mblk on the datamp chain, used to add
 *		to the chain headed by datamp.
 * newmp:	When an M_CTL header is required, this pointer references
 *		that "header" mblk.
 * protomp:	When an IAC is discovered, a dupb() is done on the first mblk
 *		containing an IAC.  protomp points to this dup'ed mblk.
 *		This mblk is eventually forwarded to the daemon.
 */
static int
rcv_parse(queue_t *q, mblk_t *mp)
{
	mblk_t	*protomp, *newmp, *datamp, *prevmp;
	unsigned char *tmp;
	size_t	msgsize;

	struct telmod_info    *tmip = (struct telmod_info *)q->q_ptr;

	datamp = mp;
	prevmp = protomp = 0;

	while (mp) {
		/*
		 * If the mblk is empty, just continue scanning.
		 */
		if (mp->b_rptr == mp->b_wptr) {
			prevmp = mp;
			mp = mp->b_cont;
			continue;
		}
		/*
		 * First check to see if we have received CR and are checking
		 * for a following LF/NULL.  If so, do what's necessary to
		 * trim the LF/NULL.  This case is for when the LF/NULL is
		 * at the beginning of a subsequent mblk.
		 */
		if (!(tmip->flags & TEL_BINARY_IN) &&
		    (tmip->flags & TEL_CRRCV)) {
			if ((*mp->b_rptr == '\n') || (*mp->b_rptr == NULL)) {
				if (mp->b_wptr == (mp->b_rptr + 1)) {
					tmip->flags &= ~TEL_CRRCV;
					if (prevmp) {
						prevmp->b_cont = mp->b_cont;
						freeb(mp);
						mp = prevmp->b_cont;
						continue;
					} else {
						datamp = mp->b_cont;
						freeb(mp);
						if (datamp == NULL) {
							/*
							 * Message contained
							 * only a '\0' after
							 * a '\r' in a previous
							 * message, so we can
							 * read more, even
							 * though we have
							 * nothing to putnext.
							 */
							return (1);
						} else {
							mp = datamp;
							continue;
						}
					}
				}
				mp->b_rptr += 1;
			}
			tmip->flags &= ~TEL_CRRCV;
		}
		tmp = mp->b_rptr;
		/*
		 * Now scan through the entire message block, for IACs
		 * and CR characters, which need processing.
		 */
		while (tmp < mp->b_wptr) {

			if (tmp[0] == IAC) {
				/*
				 * Telnet protocol - parse it now
				 * process data part of mblk
				 * before sending the protocol.
				 */
				if (tmp > mp->b_rptr) {
					if ((protomp = dupb(mp)) == NULL) {
						msgsize = msgdsize(datamp);
						recover(q, datamp, msgsize);
						return (0);
					}
					ASSERT(tmp >= mp->b_datap->db_base);
					ASSERT(tmp <= mp->b_datap->db_lim);
					ASSERT(tmp >=
					    protomp->b_datap->db_base);
					ASSERT(tmp <= protomp->b_datap->db_lim);
					mp->b_wptr = tmp;
					protomp->b_rptr = tmp;
					protomp->b_cont = mp->b_cont;
					mp->b_cont = 0;

					if (prevmp)
						prevmp->b_cont = mp;

				} else {
					protomp = mp;

					if (prevmp)
						prevmp->b_cont = 0;
					else
						datamp = 0;
				}
				if (datamp) {
					putnext(q, datamp);
				}
				/*
				 * create a 1 byte M_CTL message block with
				 * protomp and send it down.
				 */

				if ((newmp = allocb(sizeof (char),
					BPRI_MED)) == NULL) {
					/*
					 * Save the dup'ed mp containing
					 * the protocol information which
					 * we couldn't get an M_CTL header
					 * for.
					 */
					msgsize = msgdsize(protomp);
					recover(q, protomp, msgsize);
					return (0);
				}
				newmp->b_datap->db_type = M_CTL;
				newmp->b_wptr = newmp->b_rptr + 1;
				*(newmp->b_rptr) = M_CTL_MAGIC_NUMBER;
				newmp->b_cont = protomp;
				noenable(q);
				tmip->flags |= TEL_STOPPED;
				putnext(q, newmp);

				return (0);
			}
			if (!(tmip->flags & TEL_BINARY_IN)) {
				/*
				 * Set TEL_CRRCV flag if last character is CR
				 */
				if ((tmp == (mp->b_wptr - 1)) &&
					(tmp[0] == '\r')) {
					tmip->flags |= TEL_CRRCV;
					break;
				}

				/*
				 * If CR is followed by LF/NULL, get rid of
				 * LF/NULL and realign the message block.
				 */
				if ((tmp[0] == '\r') && ((tmp[1] == '\n') ||
				    (tmp[1] == NULL))) {
					/*
					 * If CR is in the middle of a block,
					 * we need to get rid of LF and join
					 * the two pieces together.
					 */
					if (mp->b_wptr > (tmp + 2)) {
						bcopy(tmp + 2, tmp + 1,
						    (mp->b_wptr - tmp - 2));
						mp->b_wptr -= 1;
					} else {
						mp->b_wptr = tmp + 1;
					}

					if (prevmp)
						prevmp->b_cont = mp;
				}
			}
			tmp++;
		}
		prevmp = mp;
		mp = mp->b_cont;
	}
	putnext(q, datamp);

	return (1);
}
Пример #5
0
/*
 * telmodwput:
 * M_DATA is processed and forwarded if we aren't stopped awaiting the daemon
 * to process something.  M_CTL's are data from the daemon bound for the
 * network.  We forward them immediately.  There are two classes of ioctl's
 * we must handle here also.  One is ioctl's forwarded by ptem which we
 * ignore.  The other is ioctl's issued by the daemon to control us.
 * Process them appropriately.  M_PROTO's we pass along, figuring they are
 * are TPI operations for TCP.  M_FLUSH requires careful processing, since
 * telnet cannot tolerate flushing its protocol requests.  Also the flushes
 * can be running either daemon<->TCP or application<->telmod.  We must
 * carefully deal with this.
 */
static void
telmodwput(
	queue_t *q,	/* Pointer to the read queue */
	mblk_t *mp)	/* Pointer to current message block */
{
	struct telmod_info	*tmip;
	struct iocblk *ioc;
	mblk_t *savemp;
	int rw;
	int error;

	tmip = (struct telmod_info *)q->q_ptr;

	switch (mp->b_datap->db_type) {
	case M_DATA:
		if (!canputnext(q) || (tmip->flags & TEL_STOPPED) ||
			(q->q_first)) {
			noenable(q);
			(void) putq(q, mp);
			break;
		}
		/*
		 * This routine parses data generating from ptm side.
		 * Insert a null character if carraige return
		 * is not followed by line feed unless we are in binary mode.
		 * Also, duplicate IAC if found in the data.
		 */
		(void) snd_parse(q, mp);
		break;

	case M_CTL:
		if (((mp->b_wptr - mp->b_rptr) == 1) &&
			(*(mp->b_rptr) == M_CTL_MAGIC_NUMBER)) {
			savemp = mp->b_cont;
			freeb(mp);
			mp = savemp;
		}
		putnext(q, mp);
		break;

	case M_IOCTL:
		ioc = (struct iocblk *)mp->b_rptr;
		switch (ioc->ioc_cmd) {

		/*
		 * This ioctl is issued by user level daemon to
		 * request one more message block to process protocol
		 */
		case TEL_IOC_GETBLK:
			if (!(tmip->flags & TEL_STOPPED)) {
				miocnak(q, mp, 0, EINVAL);
				break;
			}
			tmip->flags |= TEL_GETBLK;
			qenable(RD(q));
			enableok(RD(q));

			miocack(q, mp, 0, 0);
			break;

		/*
		 * This ioctl is issued by user level daemon to reenable the
		 * read and write queues. This is issued during startup time
		 * after setting up the mux links and also after processing
		 * the protocol.  It is also issued after each time an
		 * an unrecognized telnet option is forwarded to the daemon.
		 */
		case TEL_IOC_ENABLE:

			/*
			 * Send negative ack if TEL_STOPPED flag is not set
			 */
			if (!(tmip->flags & TEL_STOPPED)) {
				miocnak(q, mp, 0, EINVAL);
				break;
			}
			tmip->flags &= ~TEL_STOPPED;
			if (mp->b_cont) {
				(void) putbq(RD(q), mp->b_cont);
				mp->b_cont = 0;
			}

			qenable(RD(q));
			enableok(RD(q));
			qenable(q);
			enableok(q);

			miocack(q, mp, 0, 0);
			break;

		/*
		 * Set binary/normal mode for input and output
		 * according to the instructions from the daemon.
		 */
		case TEL_IOC_MODE:
			error = miocpullup(mp, sizeof (uchar_t));
			if (error != 0) {
				miocnak(q, mp, 0, error);
				break;
			}
			tmip->flags |= *(mp->b_cont->b_rptr) &
			    (TEL_BINARY_IN|TEL_BINARY_OUT);
			miocack(q, mp, 0, 0);
			break;

#ifdef DEBUG
		case TCSETAF:
		case TCSETSF:
		case TCSETA:
		case TCSETAW:
		case TCSETS:
		case TCSETSW:
		case TCSBRK:
		case TIOCSTI:
		case TIOCSWINSZ:
			miocnak(q, mp, 0, EINVAL);
			break;
#endif
		case CRYPTPASSTHRU:
			error = miocpullup(mp, sizeof (uchar_t));
			if (error != 0) {
				miocnak(q, mp, 0, error);
				break;
			}
			if (*(mp->b_cont->b_rptr) == 0x01)
				tmip->flags |= TEL_IOCPASSTHRU;
			else
				tmip->flags &= ~TEL_IOCPASSTHRU;

			miocack(q, mp, 0, 0);
			break;

		default:
			if (tmip->flags & TEL_IOCPASSTHRU) {
				putnext(q, mp);
			} else {
#ifdef DEBUG
				cmn_err(CE_NOTE,
				"telmodwput: unexpected ioctl type 0x%x",
					ioc->ioc_cmd);
#endif
				miocnak(q, mp, 0, EINVAL);
			}
			break;
		}
		break;

	case M_FLUSH:
		/*
		 * Flushing is tricky:  We try to flush all we can, but certain
		 * data cannot be flushed.  Telnet protocol sequences cannot
		 * be flushed.  So, TCP's queues cannot be flushed since we
		 * cannot tell what might be telnet protocol data.  Then we
		 * must take care to create and forward out-of-band data
		 * indicating the flush to the far side.
		 */
		rw = *mp->b_rptr;
		if (rw & FLUSHR) {
			/*
			 * We cannot flush our read queue, since there may
			 * be telnet protocol bits in the queue, awaiting
			 * processing.  However, once it leaves this module
			 * it's guaranteed that all protocol data is in
			 * M_CTL, so we do flush read data beyond us, expecting
			 * them (actually logindmux) to do FLUSHDATAs also.
			 */
			*mp->b_rptr = rw & ~FLUSHW;
			qreply(q, mp);
		} else {
			freemsg(mp);
		}
		if (rw & FLUSHW) {
			/*
			 * Since all telnet protocol data comes from the
			 * daemon, stored as M_CTL messages, flushq will
			 * do exactly what's needed:  Flush bytes which do
			 * not have telnet protocol data.
			 */
			flushq(q, FLUSHDATA);
		}
		break;

	case M_PCPROTO:
		putnext(q, mp);
		break;

	case M_PROTO:
		/* We may receive T_DISCON_REQ from the mux */
		if (!canputnext(q) || q->q_first != NULL)
			(void) putq(q, mp);
		else
			putnext(q, mp);
		break;

	default:
#ifdef DEBUG
		cmn_err(CE_NOTE,
		    "telmodwput: unexpected msg type 0x%x",
		    mp->b_datap->db_type);
#endif
		freemsg(mp);
		break;
	}
}
Пример #6
0
/*
 * telmodrsrv:
 * Mostly we end up here because of M_DATA processing delayed due to flow
 * control or lack of memory.  XXX.sparker: TLI primitives here?
 */
static void
telmodrsrv(queue_t *q)
{
	mblk_t	*mp, *newmp;
	struct telmod_info    *tmip = (struct telmod_info *)q->q_ptr;
	union T_primitives *tip;

	while ((mp = getq(q)) != NULL) {
		if (((tmip->flags & TEL_STOPPED) &&
		    !(tmip->flags & TEL_GETBLK)) || !canputnext(q)) {
			(void) putbq(q, mp);
			return;
		}
		switch (mp->b_datap->db_type) {

		case M_DATA:
is_mdata:
			if (tmip->flags & TEL_GETBLK) {
				if ((newmp = allocb(sizeof (char),
				    BPRI_MED)) == NULL) {
					recover(q, mp, msgdsize(mp));
					return;
				}
				newmp->b_datap->db_type = M_CTL;
				newmp->b_wptr = newmp->b_rptr + 1;
				*(newmp->b_rptr) = M_CTL_MAGIC_NUMBER;
				newmp->b_cont = mp;
				tmip->flags &= ~TEL_GETBLK;
				noenable(q);
				tmip->flags |= TEL_STOPPED;

				putnext(q, newmp);

				break;
			}
			if (!rcv_parse(q, mp)) {
				return;
			}
			break;

		case M_PROTO:

			tip = (union T_primitives *)mp->b_rptr;

			/*
			 * Unless the M_PROTO message indicates data, clear
			 * TEL_GETBLK so that we stop passing our messages
			 * up to the telnet daemon.
			 */
			if (tip->type != T_DATA_IND &&
			    tip->type != T_EXDATA_IND)
				tmip->flags &= ~TEL_GETBLK;

			switch (tip->type) {
			case T_ORDREL_IND:
			case T_DISCON_IND:
			/* Make into M_HANGUP and putnext */
				ASSERT(mp->b_cont == NULL);
				mp->b_datap->db_type = M_HANGUP;
				mp->b_wptr = mp->b_rptr;
				if (mp->b_cont) {
					freemsg(mp->b_cont);
					mp->b_cont = NULL;
				}
				/*
				 * If we haven't already, send T_UNBIND_REQ
				 * to prevent TCP from going into "BOUND"
				 * state and locking up the port.
				 */
				if (tip->type == T_DISCON_IND &&
				    tmip->unbind_mp != NULL) {
					putnext(q, mp);
					qreply(q, tmip->unbind_mp);
					tmip->unbind_mp = NULL;
				} else {
					putnext(q, mp);
				}
				break;

			case T_DATA_IND: /* conform to TPI, but never happens */
			case T_EXDATA_IND:
				newmp = mp->b_cont;
				freeb(mp);
				mp = newmp;
				if (mp) {
					ASSERT(mp->b_datap->db_type == M_DATA);
					if (msgdsize(mp) != 0) {
						goto is_mdata;
					}
					freemsg(mp);
				}
				break;

			/*
			 * We only get T_OK_ACK when we issue the unbind, and
			 * it can be ignored safely.
			 */
			case T_OK_ACK:
				ASSERT(tmip->unbind_mp == NULL);
				freemsg(mp);
				break;

			default:
#ifdef DEBUG
				cmn_err(CE_NOTE,
				    "telmodrsrv: unexpected TLI primitive "
				    "msg type 0x%x", tip->type);
#endif
				freemsg(mp);
			}
			break;

		case M_SETOPTS:
			putnext(q, mp);
			break;

		default:
#ifdef DEBUG
			cmn_err(CE_NOTE,
			    "telmodrsrv: unexpected msg type 0x%x",
			    mp->b_datap->db_type);
#endif
			freemsg(mp);
		}
	}
}
Пример #7
0
/*
 * telmodrput:
 * Be sure to preserve data order.  If the daemon is waiting for additional
 * data (TEL_GETBLK state) forward new data.  Otherwise, apply normal
 * telnet protocol processing to M_DATA.  Take notice of TLI messages
 * indicating connection tear-down, and change them into M_HANGUP's.
 */
static void
telmodrput(queue_t *q, mblk_t *mp)
{
	mblk_t	*newmp;
	struct telmod_info    *tmip = (struct telmod_info *)q->q_ptr;
	union T_primitives *tip;

	if ((mp->b_datap->db_type < QPCTL) &&
	    ((q->q_first) || ((tmip->flags & TEL_STOPPED) &&
	    !(tmip->flags & TEL_GETBLK)) || !canputnext(q))) {
		(void) putq(q, mp);
		return;
	}

	switch (mp->b_datap->db_type) {
	case M_DATA:

		/*
		 * If the user level daemon requests for 1 more
		 * block of data (needs more data for protocol processing)
		 * create a M_CTL message block with the mp.
		 */
is_mdata:
		if (tmip->flags & TEL_GETBLK) {
			if ((newmp = allocb(sizeof (char), BPRI_MED)) == NULL) {
				recover(q, mp, msgdsize(mp));
				return;
			}
			newmp->b_datap->db_type = M_CTL;
			newmp->b_wptr = newmp->b_rptr + 1;
			*(newmp->b_rptr) = M_CTL_MAGIC_NUMBER;
			newmp->b_cont = mp;
			tmip->flags &= ~TEL_GETBLK;
			noenable(q);
			tmip->flags |= TEL_STOPPED;

			putnext(q, newmp);

			break;
		}
		/*
		 * call the protocol parsing routine which processes
		 * the data part of the message block first. Then it
		 * handles protocol and CR/LF processing.
		 * If an error is found inside allocb/dupb, recover
		 * routines inside rcv_parse will queue up the
		 * original message block in its service queue.
		 */
		(void) rcv_parse(q, mp);
		break;

	case M_FLUSH:
		/*
		 * Since M_FLUSH came from TCP, we mark it bound for
		 * daemon, not tty.  This only happens when TCP expects
		 * to do a connection reset.
		 */
		mp->b_flag |= MSGMARK;
		if (*mp->b_rptr & FLUSHR)
			flushq(q, FLUSHALL);
		putnext(q, mp);
		break;

	case M_PCSIG:
	case M_ERROR:
		if (tmip->flags & TEL_GETBLK)
			tmip->flags &= ~TEL_GETBLK;
		/* FALLTHRU */
	case M_IOCACK:
	case M_IOCNAK:
	case M_SETOPTS:
		putnext(q, mp);
		break;

	case M_PROTO:
	case M_PCPROTO:
		if (tmip->flags & TEL_GETBLK)
			tmip->flags &= ~TEL_GETBLK;

		tip = (union T_primitives *)mp->b_rptr;
		switch (tip->type) {

		case T_ORDREL_IND:
		case T_DISCON_IND:
			/* Make into M_HANGUP and putnext */
			ASSERT(mp->b_cont == NULL);
			mp->b_datap->db_type = M_HANGUP;
			mp->b_wptr = mp->b_rptr;
			if (mp->b_cont) {
				freemsg(mp->b_cont);
				mp->b_cont = NULL;
			}
			/*
			 * If we haven't already, send T_UNBIND_REQ to prevent
			 * TCP from going into "BOUND" state and locking up the
			 * port.
			 */
			if (tip->type == T_DISCON_IND && tmip->unbind_mp !=
			    NULL) {
				putnext(q, mp);
				qreply(q, tmip->unbind_mp);
				tmip->unbind_mp = NULL;
			} else {
				putnext(q, mp);
			}
			break;

		case T_EXDATA_IND:
		case T_DATA_IND:	/* conform to TPI, but never happens */
			newmp = mp->b_cont;
			freeb(mp);
			mp = newmp;
			if (mp) {
				ASSERT(mp->b_datap->db_type == M_DATA);
				if (msgdsize(mp) != 0) {
					goto is_mdata;
				}
				freemsg(mp);
			}
			break;

		/*
		 * We only get T_OK_ACK when we issue the unbind, and it can
		 * be ignored safely.
		 */
		case T_OK_ACK:
			ASSERT(tmip->unbind_mp == NULL);
			freemsg(mp);
			break;

		default:
#ifdef DEBUG
			cmn_err(CE_NOTE,
			    "telmodrput: unexpected TLI primitive msg "
			    "type 0x%x", tip->type);
#endif
			freemsg(mp);
		}
		break;

	default:
#ifdef DEBUG
		cmn_err(CE_NOTE,
		    "telmodrput: unexpected msg type 0x%x",
		    mp->b_datap->db_type);
#endif
		freemsg(mp);
	}
}
Пример #8
0
/*ARGSUSED*/
static int
telmodopen(queue_t *q, dev_t *devp, int oflag, int sflag, cred_t *credp)
{
	struct telmod_info	*tmip;
	mblk_t *bp;
	union T_primitives *tp;
	int	error;

	if (sflag != MODOPEN)
		return (EINVAL);

	if (q->q_ptr != NULL) {
		/* It's already attached. */
		return (0);
	}
	/*
	 * Allocate state structure.
	 */
	tmip = kmem_zalloc(sizeof (*tmip), KM_SLEEP);

	/*
	 * Cross-link.
	 */
	q->q_ptr = tmip;
	WR(q)->q_ptr = tmip;

	noenable(q);
	tmip->flags |= TEL_STOPPED;
	qprocson(q);

	/*
	 * Since TCP operates in the TLI-inspired brain-dead fashion,
	 * the connection will revert to bound state if the connection
	 * is reset by the client.  We must send a T_UNBIND_REQ in
	 * that case so the port doesn't get "wedged" (preventing
	 * inetd from being able to restart the listener).  Allocate
	 * it here, so that we don't need to worry about allocb()
	 * failures later.
	 */
	while ((tmip->unbind_mp = allocb(sizeof (union T_primitives),
	    BPRI_HI)) == NULL) {
		bufcall_id_t id = qbufcall(q, sizeof (union T_primitives),
		    BPRI_HI, dummy_callback, NULL);
		if (!qwait_sig(q)) {
			qunbufcall(q, id);
			error = EINTR;
			goto fail;
		}
		qunbufcall(q, id);
	}
	tmip->unbind_mp->b_wptr = tmip->unbind_mp->b_rptr +
	    sizeof (struct T_unbind_req);
	tmip->unbind_mp->b_datap->db_type = M_PROTO;
	tp = (union T_primitives *)tmip->unbind_mp->b_rptr;
	tp->type = T_UNBIND_REQ;
	/*
	 * Send a M_PROTO msg of type T_DATA_REQ (this is unique for
	 * read queue since only write queue can get T_DATA_REQ).
	 * Readstream routine in telnet daemon will do a getmsg() till
	 * it receives this proto message
	 */
	while ((bp = allocb(sizeof (union T_primitives), BPRI_HI)) == NULL) {
		bufcall_id_t id = qbufcall(q, sizeof (union T_primitives),
		    BPRI_HI, dummy_callback, NULL);
		if (!qwait_sig(q)) {
			qunbufcall(q, id);
			error = EINTR;
			goto fail;
		}
		qunbufcall(q, id);
	}
	bp->b_datap->db_type = M_PROTO;
	bp->b_wptr = bp->b_rptr + sizeof (union T_primitives);
	tp = (union T_primitives *)bp->b_rptr;
	tp->type = T_DATA_REQ;
	tp->data_req.MORE_flag = 0;

	putnext(q, bp);
	return (0);

fail:
	qprocsoff(q);
	if (tmip->unbind_mp != NULL) {
		freemsg(tmip->unbind_mp);
	}
	kmem_free(tmip, sizeof (struct telmod_info));
	q->q_ptr = NULL;
	WR(q)->q_ptr = NULL;
	return (error);
}
Пример #9
0
/*
 * cvc_wsrv()
 *	cvc_wsrv handles mblks that have been queued by cvc_wput either because
 *	the IOSRAM path was selected or the queue contained preceding mblks.  To
 *	optimize processing (particularly if the IOSRAM path is selected), all
 *	mblks are pulled off of the queue and chained together.  Then, if there
 *	are any mblks on the chain, they are either forwarded to cvcredir or
 *	sent for IOSRAM processing as appropriate given current circumstances.
 *	IOSRAM processing may not be able to handle all of the data in the
 *	chain, in which case the remaining data is placed back on the queue and
 *	a timeout routine is registered to reschedule cvc_wsrv in the future.
 *	Automatic scheduling of the queue is disabled (noenable(q)) while
 *	cvc_wsrv is running to avoid superfluous calls.
 */
static int
cvc_wsrv(queue_t *q)
{
	mblk_t *total_mp = NULL;
	mblk_t *mp;

	if (cvc_stopped == 1 || cvc_suspended == 1) {
		return (0);
	}

	rw_enter(&cvclock, RW_READER);
	noenable(q);

	/*
	 * If there's already a timeout registered for scheduling this routine
	 * in the future, it's a safe bet that we don't want to run right now.
	 */
	if (cvc_timeout_id != (timeout_id_t)-1) {
		enableok(q);
		rw_exit(&cvclock);
		return (0);
	}

	/*
	 * Start by linking all of the queued M_DATA mblks into a single chain
	 * so we can flush as much as possible to IOSRAM (if we choose that
	 * route).
	 */
	while ((mp = getq(q)) != NULL) {
		/*
		 * Technically, certain IOCTLs are supposed to be processed only
		 * after all preceding data has completely "drained".  In an
		 * attempt to support that, we delay processing of those IOCTLs
		 * until this point.  It is still possible that an IOCTL will be
		 * processed before all preceding data is drained, for instance
		 * in the case where not all of the preceding data would fit
		 * into IOSRAM and we have to place it back on the queue.
		 * However, since none of these IOCTLs really appear to have any
		 * relevance for cvc, and we weren't supporting delayed
		 * processing at _all_ previously, this partial implementation
		 * should suffice.  (Fully implementing the delayed IOCTL
		 * processing would be unjustifiably difficult given the nature
		 * of the underlying IOSRAM console protocol.)
		 */
		if (mp->b_datap->db_type == M_IOCTL) {
			cvc_ioctl(q, mp);
			continue;
		}

		/*
		 * We know that only M_IOCTL and M_DATA blocks are placed on our
		 * queue.  Since this block isn't an M_IOCTL, it must be M_DATA.
		 */
		if (total_mp != NULL) {
			linkb(total_mp, mp);
		} else {
			total_mp = mp;
		}
	}

	/*
	 * Do we actually have anything to do?
	 */
	if (total_mp == NULL) {
		enableok(q);
		rw_exit(&cvclock);
		return (0);
	}

	/*
	 * Yes, we do, so send the data to either cvcredir or IOSRAM as
	 * appropriate.  In the latter case, we might not be able to transmit
	 * everything right now, so re-queue the remainder.
	 */
	if (cvcoutput_q != NULL && !via_iosram) {
		CVC_DBG0(CVC_DBG_NETWORK_WR, "Sending to cvcredir.");
		/*
		 * XXX - should canputnext be called here?  Starfire's cvc
		 * doesn't do that, and it appears to work anyway.
		 */
		(void) putnext(cvcoutput_q, total_mp);
	} else {
		CVC_DBG0(CVC_DBG_IOSRAM_WR, "Send to IOSRAM.");
		cvc_send_to_iosram(&total_mp);
		if (total_mp != NULL) {
			(void) putbq(q, total_mp);
		}
	}

	/*
	 * If there is still data queued at this point, make sure the queue
	 * gets scheduled again after an appropriate delay (which has been
	 * somewhat arbitrarily selected as half of the SC's input polling
	 * frequency).
	 */
	enableok(q);
	if (q->q_first != NULL) {
		if (cvc_timeout_id == (timeout_id_t)-1) {
			cvc_timeout_id = timeout(cvc_flush_queue,
			    NULL, drv_usectohz(CVC_IOSRAM_POLL_USECS / 2));
		}
	}
	rw_exit(&cvclock);
	return (0);
}
Пример #10
0
static int
pckt_r_msg(queue_t *q, mblk_t *mp)
{
	mblk_t *bp, *fp;
	struct pckt *p = PCKT_PRIV(q);

	switch (mp->b_datap->db_type) {
	case M_FLUSH:
		/* The pseudo-terminal device, pty(4), slave side reverses the sense of the
		   M_FLUSH(9) flush bits so that they can be used directly by the master side
		   Stream head. This is similar to the pipemod(4) module. To provide an
		   encapsulated message that contains flush bits that are exactly as they were
		   issued by the user of the slave side of the pty(4), pckt reverses the FLUSHR and 
		   FLUSHW bits before packetizing the M_FLUSH(9) message. Also, because every
		   Stream must respond to M_FLUSH(9) by flushing queues, pckt also passes the
		   M_FLUSH(9) message to the Stream head. However, to preserve packetized messages
		   that may be sitting on the Stream head read queue, the read side is not flushed
		   and the FLUSHR bit in any M_FLUSH(9) message passed to the Stream head will be
		   cleared. The result is as follows, depending on the value of the M_FLUSH(9)
		   bits: FLUSHR: The bits are set to FLUSHW and the message is packetized. No
		   M_FLUSH(9) message is sent to the Stream head. FLUSHW: The bits are set to
		   FLUSHR and the message is packetized. An M_FLUSH(9) message is sent to the
		   Stream head containing the FLUSHW flag. FLUSHRW: The bits are set to FLUSHRW and 
		   the message is packetized. An M_FLUSH(9) message is sent to the Stream head
		   containing only the FLUSHW flag.  */
		if (!(bp = allocb(1, BPRI_MED)))
			goto bufcall;
		if ((mp->b_rptr[0] & FLUSHW)) {
			if (!(fp = copyb(mp))) {
				freeb(bp);
				goto bufcall;
			}
			fp->b_rptr[0] &= ~FLUSHR;
			putnext(q, fp);
		} else {
			fp = NULL;
		}
		switch (mp->b_rptr[0] & (FLUSHR | FLUSHW)) {
		case FLUSHR:
			mp->b_rptr[0] &= ~FLUSHR;
			mp->b_rptr[0] |= FLUSHW;
			break;
		case FLUSHW:
			mp->b_rptr[0] &= ~FLUSHW;
			mp->b_rptr[0] |= FLUSHR;
			break;
		}
		goto finish_it;
	case M_IOCTL:
		/* The M_IOCTL(9) message is packetized as normal on 32-bit systems. On 64-bit
		   systems, where the user process that pushed the pckt module on the master side
		   of the pseudo-terminal, pty(4), device is a 32-bit process, the iocblk(9)
		   structure contained in the message block is transformed by the pckt module into
		   a 32-bit representation of the iocblk(9) structure (struct iocblk32) before
		   being packetized.  */
		if ((p->flags & FILP32)) {
			struct iocblk *ioc;
			struct iocblk32 ioc32 = { 0, };

			/* Need to convert from native to ILP32. */
			if ((bp = allocb(1, BPRI_MED)) == NULL)
				goto bufcall;
			ioc = (typeof(ioc)) mp->b_rptr;
			ioc32.ioc_cmd32 = ioc->ioc_cmd;
			ioc32.ioc_cr32 = (uint32_t) (long) ioc->ioc_cr;
			ioc32.ioc_id32 = ioc->ioc_id;
			ioc32.ioc_count32 = ioc->ioc_count;
			ioc32.ioc_error32 = ioc->ioc_error;
			ioc32.ioc_rval32 = ioc->ioc_rval;
			ioc32.ioc_filler32[0] = ioc->ioc_filler[0];
			ioc32.ioc_filler32[1] = ioc->ioc_filler[1];
			ioc32.ioc_filler32[2] = ioc->ioc_filler[2];
			ioc32.ioc_filler32[3] = ioc->ioc_filler[3];
			mp->b_wptr = mp->b_wptr - sizeof(*ioc) + sizeof(ioc32);
			*(struct iocblk32 *) mp->b_rptr = ioc32;
			goto finish_it;
		}
		goto pass_it;
	case M_READ:
		/* The M_READ(9) message is packetized as normal on 32-bit systems.  On 64-bit
		   systems, where the user process that pushed the pckt module on the master side
		   of the pseudo-terminal, pty(4), device is a 32-bit process, the size_t count
		   contained in the message block is transformed by the pckt module into a 32-bit
		   representation of the size_t (size32_t) before being packetized.  */
		if ((p->flags & FILP32)) {
			uint32_t size32;

			/* Need to convert from native to ILP32. */
			if ((bp = allocb(1, BPRI_MED)) == NULL)
				goto bufcall;
			size32 = *(size_t *) mp->b_rptr;
			*(uint32_t *) mp->b_rptr = size32;
			mp->b_wptr = mp->b_wptr - sizeof(size_t) + sizeof(uint32_t);
			goto finish_it;
		}
		goto pass_it;
	case M_PROTO:
	case M_PCPROTO:
	case M_STOP:
	case M_STOPI:
	case M_START:
	case M_STARTI:
	case M_DATA:
	      pass_it:
		/* Problem: UnixWare says 4 bytes.  Solaris says 1 byte.  The user must determine
		   the size and alignment of the message type by the length of the control part.
		   We'll go with 1 byte.  On little endian it should line up. */
		if ((bp = allocb(1, BPRI_MED))) {
		      finish_it:
			bp->b_datap->db_type = M_PROTO;
			bp->b_wptr[0] = mp->b_datap->db_type;
			bp->b_wptr++;
			mp->b_datap->db_type = M_DATA;
			bp->b_cont = mp;
			putnext(q, bp);
			return (1);
		}
	      bufcall:
		noenable(q);
		if (!(p->bufcall = bufcall(1, BPRI_MED, pckt_enable, (long) q)))
			qenable(q);	/* spin through service procedure */
		return (0);
	default:
		putnext(q, mp);
		return (1);
	}
}
Пример #11
0
static streamscall __hot_put int
ptem_wput(queue_t *q, mblk_t *mp)
{
	struct ptem *p = PTEM_PRIV(q);

	/* fast path */
	if (likely(mp->b_datap->db_type == M_DATA)) {
	      m_data:
		/* free zero-length messages */
		if (msgdsize(mp) != 0) {
			if ((p->flags & PTEM_OUTPUT_STOPPED)
			    || (q->q_first != NULL)
			    || (q->q_flag & QSVCBUSY)
			    || (!bcanputnext(q, mp->b_band))) {
				/* Note, the only reason for failinng putq() is the lack of a queue 
				   band, in which case the band is empty and no loss of order will
				   result from putting it to the next queue. */
				if (putq(q, mp))
					return (0);
			}
			putnext(q, mp);
			return (0);
		}
		freemsg(mp);
		return (0);
	}

	switch (mp->b_datap->db_type) {
	case M_DATA:
		goto m_data;
	case M_IOCTL:
	{
		struct iocblk *ioc = (struct iocblk *) mp->b_rptr;

		/* The Stream head is set to recognized all transparent terminal input-output
		   controls and pass them downstream as though they were I_STR input-output
		   controls.  There is also the opportunity to register input-output controls with
		   the Stream head using the TIOC_REPLY message. */
		if (unlikely(ioc->ioc_count == TRANSPARENT))
			goto do_it;

		switch (ioc->ioc_cmd) {
		case TCSETAW:
		case TCSETAF:
		case TCSETSW:
		case TCSETSF:
		case TCSBRK:
			/* These need to wait for the output to drain before being processed, queue 
			   them. */
			putq(q, mp);
			break;
		default:
			/* Process others immediately, regardless of whether there is any data or
			   other messages in queue. */
			goto do_it;
		}
		break;
	}
	case M_DELAY:
	case M_READ:
		freemsg(mp);
		break;
	case M_STOP:
		if (canenable(q)) {
			noenable(q);
			p->flags |= PTEM_OUTPUT_STOPPED;
		}
		putnext(q, mp);
		break;
	case M_START:
		if (!canenable(q)) {
			p->flags &= ~PTEM_OUTPUT_STOPPED;
			enableok(q);
			qenable(q);
		}
		putnext(q, mp);
		break;
	case M_STOPI:
	case M_STARTI:
		/* We have no read side queue so we cannot queue in this direction.  Tell master so 
		   that pckt(4) can tell master not to send anything more. */
		putnext(q, mp);
		break;
	default:
	      do_it:
		if (ptem_w_msg(q, mp) && !putq(q, mp))
			freemsg(mp);
		break;
	}
	return (0);
}
Пример #12
0
/*
 * Returns 1 if the caller should put the message (bp) back on the queue
 */
static int
mouse8042_initiate_reset(queue_t *q, mblk_t *mp, struct mouse_state *state)
{
	mutex_enter(&state->reset_mutex);
	/*
	 * If we're in the middle of a reset, put the message back on the queue
	 * for processing later.
	 */
	if (state->reset_state != MSE_RESET_IDLE) {
		/*
		 * We noenable the queue again here in case it was backenabled
		 * by an upper-level module.
		 */
		noenable(q);

		mutex_exit(&state->reset_mutex);
		return (1);
	}

	/*
	 * Drop the reset state lock before allocating the response message and
	 * grabbing the 8042 exclusive-access lock (since those operations
	 * may take an extended period of time to complete).
	 */
	mutex_exit(&state->reset_mutex);

	if (state->reply_mp == NULL)
		state->reply_mp = allocb(2, BPRI_MED);
	if (state->reset_ack_mp == NULL)
		state->reset_ack_mp = allocb(1, BPRI_MED);

	if (state->reply_mp == NULL || state->reset_ack_mp == NULL) {
		/*
		 * Allocation failed -- set up a bufcall to enable the queue
		 * whenever there is enough memory to allocate the response
		 * message.
		 */
		state->bc_id = qbufcall(q, (state->reply_mp == NULL) ? 2 : 1,
		    BPRI_MED, (void (*)(void *))qenable, q);

		if (state->bc_id == 0) {
			/*
			 * If the qbufcall failed, we cannot proceed, so use the
			 * message we were sent to respond with an error.
			 */
			*mp->b_rptr = MSEERROR;
			mp->b_wptr = mp->b_rptr + 1;
			qreply(q, mp);
			return (0);
		}

		return (1);
	} else {
		/* Bufcall completed successfully (or wasn't needed) */
		state->bc_id = 0;
	}

	/*
	 * Gain exclusive access to the 8042 for the duration of the reset.
	 * The unlock will occur when the reset has either completed or timed
	 * out.
	 */
	(void) ddi_get8(state->ms_handle,
	    state->ms_addr + I8042_LOCK);

	mutex_enter(&state->reset_mutex);

	state->reset_state = MSE_RESET_PRE;
	noenable(q);

	state->reset_tid = qtimeout(q,
	    mouse8042_reset_timeout,
	    state,
	    drv_usectohz(
	    MOUSE8042_RESET_TIMEOUT_USECS));

	ddi_put8(state->ms_handle,
	    state->ms_addr +
	    I8042_INT_OUTPUT_DATA, MSERESET);

	mp->b_rptr++;

	mutex_exit(&state->reset_mutex);
	return (1);
}
Пример #13
0
/**
 * sl_w_ioctl: - process M_IOCTL message
 * @q: active queue (upper write queue)
 * @mp: the message
 *
 * Linking of streams: streams are linked under the multiplexing driver by opening an upper stream
 * and then linking a signalling link stream under the multiplexing driver.  Then the SL_SETLINK
 * input-output control is used with the multiplexer id to set the global-PPA and CLEI associated
 * with the signalling link.  The SL_GETLINK input-output control can be used at a later date to
 * determine the multiplexer id for a given signalling link stream.
 */
static struct int
sl_w_ioctl(queue_t *q, mblk_t *mp)
{
	struct iocblk *ioc = (struct iocblk *) mp->b_rptr;

	switch (ioc->ioc_cmd) {
	case I_LINK:
	case I_PLINK:
	{
		struct linkblk *l;

		if (!mp->b_cont)
			mi_copy_done(q, mp, EINVAL);

		l = (struct linkblk *) mp->b_cont->b_rptr;

		if (!(bot = kmem_alloc(sizeof(*bot), KM_NOSLEEP)))
			mi_copy_done(q, mp, ENOMEM);

		write_lock_str(&mux_lock, flags);
		bot->next = mux_links;
		bpt->prev = &mux_links;
		mux_links = bot;
		bot->dev = l->l_index;
		bot->rq = RD(l->l_qtop);
		bot->wq = l->l_qtop;
		bot->other = NULL;
		noenable(bot->rq);
		l->l_qtop->q_ptr = RD(l->l_qtop)->q_ptr = (void *) bot;
		write_unlock_str(&mux_lock, flags);
		mi_copy_done(q, mp, 0);
		return (0);
	}
	case I_UNLINK:
	case I_PUNLINK:
	{
		struct linkblk *l;

		if (!mp->b_cont)
			mi_copy_done(q, mp, EINVAL);

		l = (struct linkblk *) mp->b_cont->b_rptr;

		write_lock_str(&mux_lock, flags);
		for (bot = mux_list; bot; bot = bot->next)
			if (bot->dev == l->l_index)
				break;
		if (!bot) {
			write_unlock_str(&mux_lock, flags);
			mi_copy_done(q, mp, EINVAL);
			return (0);
		}
		/* Note that the lower multiplex driver put and service procedures must be prepared
		   to be invoked event after the M_IOCACK for the I_UNLINK or I_PUNLINK ioctl has
		   been returned.  THis is because the setq(9) back to the Stream head is not
		   performed until after the acknowledgement has been received.  We set q->q_ptr to
		   a null multiplex structure to keep the lower Stream functioning until the setq(9)
		   is performed. */
		l->l_qtop->q_ptr = RD(l->l_qtop)->q_ptr = &no_mux;
		if ((*bot->prev = bot->next)) {
			bot->next = NULL;
			bot->prev = &bot->next;
		}
		bot->other = NULL;
		kmem_free(bot, sizeof(*bot));
		/* hang up all upper streams that feed this lower stream */
		for (top = mux_opens; top; top = top->next) {
			if (top->other == bot) {
				putnextctl(top->rq, M_HANGUP);
				top->other = NULL;
			}
		}
		write_unlock_str(&mux_lock, flags);
		mi_copy_done(q, mp, 0);
		return (0);
	}
	case SL_SETLINK:
	{
		struct sl_mux_ppa *sm;

		/* This input-output control is used to set the global-PPA and CLEI associated with
		   a lower multiplex stream.  The argument is an sl_mux_ppa structure that contains
		   the multiplex id, the 32-bit PPA, and a CLEI string of up to 32 characters in
		   length. */
		mi_copyin(q, mp, NULL, sizeof(struct sl_mux_ppa));
		return (0);
	}
	case SL_GETLINK:
	{
		/* This input-output control is used to obtain the multiplex-id assocated with a
		   lower multiplex stream.  The argument is an sl_mux_ppa structure that contains a
		   32-bit PPA or CLEI string of up to 32 characters in length.  It returns the
		   multiplex id in the same structure. */
		mi_copyin(q, mp, NULL, sizeof(struct sl_mux_ppa));
		return (0);
	}
	default:
		if (mux->other && mux->other->rq) {
			if (bcanputnext(mux->other->rq, mp->b_band)) {
				putnext(mux->other->rq, mp);
				return (0);
			}
			return (-EBUSY);
		}
		break;
	}
	mi_copy_done(q, mp, EINVAL);
	return (0);
}