示例#1
0
文件: zcons.c 项目: andreiw/polaris
/*ARGSUSED1*/
static int
zc_close(queue_t *rqp, int flag, cred_t *credp)
{
	queue_t *wqp;
	mblk_t	*bp;
	zc_state_t *zcs;

	zcs = (zc_state_t *)rqp->q_ptr;

	if (rqp == zcs->zc_master_rdq) {
		DBG("Closing master side");

		zcs->zc_master_rdq = NULL;
		zcs->zc_state &= ~ZC_STATE_MOPEN;

		/*
		 * qenable slave side write queue so that it can flush
		 * its messages as master's read queue is going away
		 */
		if (zcs->zc_slave_rdq != NULL) {
			qenable(WR(zcs->zc_slave_rdq));
		}

		qprocsoff(rqp);
		WR(rqp)->q_ptr = rqp->q_ptr = NULL;

	} else if (rqp == zcs->zc_slave_rdq) {

		DBG("Closing slave side");
		zcs->zc_state &= ~ZC_STATE_SOPEN;
		zcs->zc_slave_rdq = NULL;

		wqp = WR(rqp);
		while ((bp = getq(wqp)) != NULL) {
			if (zcs->zc_master_rdq != NULL)
				putnext(zcs->zc_master_rdq, bp);
			else if (bp->b_datap->db_type == M_IOCTL)
				miocnak(wqp, bp, 0, 0);
			else
				freemsg(bp);
		}

		/*
		 * Qenable master side write queue so that it can flush its
		 * messages as slaves's read queue is going away.
		 */
		if (zcs->zc_master_rdq != NULL)
			qenable(WR(zcs->zc_master_rdq));

		qprocsoff(rqp);
		WR(rqp)->q_ptr = rqp->q_ptr = NULL;
	}

	return (0);
}
示例#2
0
/*ARGSUSED*/
static int
sdp_gen_open(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp)
{
	qprocson(q);
	qenable(q);
	return (0);
}
示例#3
0
static streamscall int
sl_lwsrv(queue_t *q)
{
	struct sl *mux = SL_PRIV(q);

	if (mux->other && mux->other->wq)
		qenable(mux->other->wq);
}
示例#4
0
/* ARGSUSED */
static void
cvc_flush_queue(void *notused)
{
	rw_enter(&cvclock, RW_WRITER);
	if (cvcinput_q != NULL) {
		qenable(WR(cvcinput_q));
	}

	cvc_timeout_id = (timeout_id_t)-1;
	rw_exit(&cvclock);
}
示例#5
0
/* 
 *  PUTP and SRVP routines
 */
static streamscall void
pckt_enable(long arg)
{
	queue_t *q = (queue_t *) arg;
	bcid_t bc;
	struct pckt *p;

	if ((p = PCKT_PRIV(q)) && (bc = xchg(&p->bufcall, 0))) {
		qenable(q);
		enableok(q);
	}
}
示例#6
0
STATIC void streamscall
spm_r_timeout(caddr_t data)
{
	queue_t *q = (queue_t *) data;
	spm_t *s = PRIV(q);

	if (!xchg(&s->rtim, 0))
		return;
	enableok(q);
	qenable(q);
	return;
}
示例#7
0
文件: dm2s.c 项目: andreiw/polaris
/*
 * dm2s_bufcall_rcv - Bufcall callaback routine.
 *
 * It simply enables read side queue so that the service procedure
 * can retry receive operation.
 */
void
dm2s_bufcall_rcv(void *arg)
{
	dm2s_t *dm2sp = (dm2s_t *)arg;

	DPRINTF(DBG_DRV, ("dm2s_bufcall_rcv: called\n"));
	mutex_enter(&dm2sp->ms_lock);
	dm2sp->ms_rbufcid = 0;
	if (dm2sp->ms_rq != NULL) {
		qenable(dm2sp->ms_rq);
	}
	mutex_exit(&dm2sp->ms_lock);
}
示例#8
0
文件: dm2s.c 项目: andreiw/polaris
/*
 * dm2s_wq_timeout - Timeout callback for the write.
 *
 * It simply enables write side queue so that the service procedure
 * can retry the transmission operation.
 */
void
dm2s_wq_timeout(void *arg)
{
	dm2s_t *dm2sp = (dm2s_t *)arg;

	DPRINTF(DBG_DRV, ("dm2s_wq_timeout: called\n"));
	mutex_enter(&dm2sp->ms_lock);
	dm2sp->ms_wq_timeoutid = 0;
	if (dm2sp->ms_wq != NULL) {
		qenable(dm2sp->ms_wq);
	}
	mutex_exit(&dm2sp->ms_lock);
}
示例#9
0
/*
 * rsrv(9E) is symmetric for master and slave, so zc_rsrv() handles both
 * without splitting up the codepath.
 *
 * Enable the write side of the partner.  This triggers the partner to send
 * messages queued on its write side to this queue's read side.
 */
static void
zc_rsrv(queue_t *qp)
{
	zc_state_t *zcs;
	zcs = (zc_state_t *)qp->q_ptr;

	/*
	 * Care must be taken here, as either of the master or slave side
	 * qptr could be NULL.
	 */
	ASSERT(qp == zcs->zc_master_rdq || qp == zcs->zc_slave_rdq);
	if (zc_switch(qp) == NULL) {
		DBG("zc_rsrv: other side isn't listening\n");
		return;
	}
	qenable(WR(zc_switch(qp)));
}
示例#10
0
文件: telmod.c 项目: andreiw/polaris
static void
telmod_buffer(void *arg)
{
	queue_t *q = arg;
	struct	telmod_info	*tmip = (struct telmod_info *)q->q_ptr;

	ASSERT(tmip);

	if (q->q_flag & QREADR) {
		ASSERT(tmip->rbufcid);
		tmip->rbufcid = 0;
	} else {
		ASSERT(tmip->wbufcid);
		tmip->wbufcid = 0;
	}
	enableok(q);
	qenable(q);
}
示例#11
0
/* ARGSUSED */
static int
cvc_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
{
	static char	been_here = 0;

	if (cmd == DDI_RESUME) {
		cvc_suspended = 0;
		if (cvcinput_q != NULL) {
			qenable(WR(cvcinput_q));
		}
		return (DDI_SUCCESS);
	}

	mutex_enter(&cvcmutex);
	if (!been_here) {
		been_here = 1;
		mutex_init(&cvc_iosram_input_mutex, NULL, MUTEX_DEFAULT, NULL);
		rw_init(&cvclock, NULL, RW_DRIVER, NULL);
		cvc_instance = ddi_get_instance(devi);
	} else {
#if defined(DEBUG)
		cmn_err(CE_NOTE,
		    "cvc_attach: called multiple times!! (instance = %d)",
		    ddi_get_instance(devi));
#endif /* DEBUG */
		mutex_exit(&cvcmutex);
		return (DDI_SUCCESS);
	}
	mutex_exit(&cvcmutex);

	if (ddi_create_minor_node(devi, "cvc", S_IFCHR,
	    0, NULL, NULL) == DDI_FAILURE) {
		ddi_remove_minor_node(devi, NULL);
		return (-1);
	}
	cvcdip = devi;
	cvcinput_q = NULL;
	cvcoutput_q = NULL;

	CVC_DBG0(CVC_DBG_ATTACH, "Attached");

	return (DDI_SUCCESS);
}
示例#12
0
/**
 * ch_srvp: - service procedure.
 * @q: queue to service
 *
 * Simple draining service procedure.
 */
static streamscall __hot_get int
ch_srvp(queue_t *q, mblk_t *mp)
{
	mblk_t *mp;

	if (likely((mp = getq(q)))) {
		do {
			if (ch_msg(q, mp)) {
				if (putbq(q, mp)) {
					swerr();
					freemsg(mp);
				}
				break;
			}
		} while (unlikely((mp = getq(q))));
	}
	if (_WR(q)->q_first)
		qenable(_WR(q));
	return (0);
}
示例#13
0
static void
mouse8042_reset_timeout(void *argp)
{
	struct mouse_state *state = (struct mouse_state *)argp;
	mblk_t *mp;

	mutex_enter(&state->reset_mutex);

	/*
	 * If the interrupt handler hasn't completed the reset handling
	 * (reset_state would be IDLE or FAILED in that case), then
	 * drop the 8042 lock, and send a faked retry reply upstream,
	 * then enable the queue for further message processing.
	 */
	if (state->reset_state != MSE_RESET_IDLE &&
	    state->reset_state != MSE_RESET_FAILED) {

		state->reset_tid = 0;
		state->reset_state = MSE_RESET_IDLE;
		cv_signal(&state->reset_cv);

		(void) ddi_get8(state->ms_handle, state->ms_addr +
		    I8042_UNLOCK);

		mp = state->reply_mp;
		*mp->b_wptr++ = MSERESEND;
		state->reply_mp = NULL;

		if (state->ms_rqp != NULL)
			putnext(state->ms_rqp, mp);
		else
			freemsg(mp);

		ASSERT(state->ms_wqp != NULL);

		enableok(state->ms_wqp);
		qenable(state->ms_wqp);
	}

	mutex_exit(&state->reset_mutex);
}
示例#14
0
/*
 * Restart queuing for high priority message of write stream when flow control
 * failed
 *
 * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
 *  -. uinst_t->lock   : M [RW_READER or RW_WRITER]
 *  -. uinst_t->u_lock : P
 *  -. uinst_t->l_lock : P
 *  -. uinst_t->c_lock : P
 */
void
oplmsu_wcmn_high_qenable(queue_t *q, krw_t rw)
{
	mblk_t	*mp;

	ASSERT(RW_LOCK_HELD(&oplmsu_uinst->lock));

	if (oplmsu_queue_flag == 1) {
		return;
	}

	/* Handle high priority message */
	while (mp = oplmsu_wcmn_high_getq(WR(q))) {
		if (mp->b_datap->db_type & M_FLUSH) {
			oplmsu_wcmn_flush_hndl(q, mp, rw);
			continue;
		}

		if (oplmsu_wcmn_through_hndl(q, mp, MSU_HIGH, rw) == FAILURE) {
			return;
		}
	}
	qenable(WR(q));	/* enable upper write queue */
}
示例#15
0
static int
pckt_r_msg(queue_t *q, mblk_t *mp)
{
	mblk_t *bp, *fp;
	struct pckt *p = PCKT_PRIV(q);

	switch (mp->b_datap->db_type) {
	case M_FLUSH:
		/* The pseudo-terminal device, pty(4), slave side reverses the sense of the
		   M_FLUSH(9) flush bits so that they can be used directly by the master side
		   Stream head. This is similar to the pipemod(4) module. To provide an
		   encapsulated message that contains flush bits that are exactly as they were
		   issued by the user of the slave side of the pty(4), pckt reverses the FLUSHR and 
		   FLUSHW bits before packetizing the M_FLUSH(9) message. Also, because every
		   Stream must respond to M_FLUSH(9) by flushing queues, pckt also passes the
		   M_FLUSH(9) message to the Stream head. However, to preserve packetized messages
		   that may be sitting on the Stream head read queue, the read side is not flushed
		   and the FLUSHR bit in any M_FLUSH(9) message passed to the Stream head will be
		   cleared. The result is as follows, depending on the value of the M_FLUSH(9)
		   bits: FLUSHR: The bits are set to FLUSHW and the message is packetized. No
		   M_FLUSH(9) message is sent to the Stream head. FLUSHW: The bits are set to
		   FLUSHR and the message is packetized. An M_FLUSH(9) message is sent to the
		   Stream head containing the FLUSHW flag. FLUSHRW: The bits are set to FLUSHRW and 
		   the message is packetized. An M_FLUSH(9) message is sent to the Stream head
		   containing only the FLUSHW flag.  */
		if (!(bp = allocb(1, BPRI_MED)))
			goto bufcall;
		if ((mp->b_rptr[0] & FLUSHW)) {
			if (!(fp = copyb(mp))) {
				freeb(bp);
				goto bufcall;
			}
			fp->b_rptr[0] &= ~FLUSHR;
			putnext(q, fp);
		} else {
			fp = NULL;
		}
		switch (mp->b_rptr[0] & (FLUSHR | FLUSHW)) {
		case FLUSHR:
			mp->b_rptr[0] &= ~FLUSHR;
			mp->b_rptr[0] |= FLUSHW;
			break;
		case FLUSHW:
			mp->b_rptr[0] &= ~FLUSHW;
			mp->b_rptr[0] |= FLUSHR;
			break;
		}
		goto finish_it;
	case M_IOCTL:
		/* The M_IOCTL(9) message is packetized as normal on 32-bit systems. On 64-bit
		   systems, where the user process that pushed the pckt module on the master side
		   of the pseudo-terminal, pty(4), device is a 32-bit process, the iocblk(9)
		   structure contained in the message block is transformed by the pckt module into
		   a 32-bit representation of the iocblk(9) structure (struct iocblk32) before
		   being packetized.  */
		if ((p->flags & FILP32)) {
			struct iocblk *ioc;
			struct iocblk32 ioc32 = { 0, };

			/* Need to convert from native to ILP32. */
			if ((bp = allocb(1, BPRI_MED)) == NULL)
				goto bufcall;
			ioc = (typeof(ioc)) mp->b_rptr;
			ioc32.ioc_cmd32 = ioc->ioc_cmd;
			ioc32.ioc_cr32 = (uint32_t) (long) ioc->ioc_cr;
			ioc32.ioc_id32 = ioc->ioc_id;
			ioc32.ioc_count32 = ioc->ioc_count;
			ioc32.ioc_error32 = ioc->ioc_error;
			ioc32.ioc_rval32 = ioc->ioc_rval;
			ioc32.ioc_filler32[0] = ioc->ioc_filler[0];
			ioc32.ioc_filler32[1] = ioc->ioc_filler[1];
			ioc32.ioc_filler32[2] = ioc->ioc_filler[2];
			ioc32.ioc_filler32[3] = ioc->ioc_filler[3];
			mp->b_wptr = mp->b_wptr - sizeof(*ioc) + sizeof(ioc32);
			*(struct iocblk32 *) mp->b_rptr = ioc32;
			goto finish_it;
		}
		goto pass_it;
	case M_READ:
		/* The M_READ(9) message is packetized as normal on 32-bit systems.  On 64-bit
		   systems, where the user process that pushed the pckt module on the master side
		   of the pseudo-terminal, pty(4), device is a 32-bit process, the size_t count
		   contained in the message block is transformed by the pckt module into a 32-bit
		   representation of the size_t (size32_t) before being packetized.  */
		if ((p->flags & FILP32)) {
			uint32_t size32;

			/* Need to convert from native to ILP32. */
			if ((bp = allocb(1, BPRI_MED)) == NULL)
				goto bufcall;
			size32 = *(size_t *) mp->b_rptr;
			*(uint32_t *) mp->b_rptr = size32;
			mp->b_wptr = mp->b_wptr - sizeof(size_t) + sizeof(uint32_t);
			goto finish_it;
		}
		goto pass_it;
	case M_PROTO:
	case M_PCPROTO:
	case M_STOP:
	case M_STOPI:
	case M_START:
	case M_STARTI:
	case M_DATA:
	      pass_it:
		/* Problem: UnixWare says 4 bytes.  Solaris says 1 byte.  The user must determine
		   the size and alignment of the message type by the length of the control part.
		   We'll go with 1 byte.  On little endian it should line up. */
		if ((bp = allocb(1, BPRI_MED))) {
		      finish_it:
			bp->b_datap->db_type = M_PROTO;
			bp->b_wptr[0] = mp->b_datap->db_type;
			bp->b_wptr++;
			mp->b_datap->db_type = M_DATA;
			bp->b_cont = mp;
			putnext(q, bp);
			return (1);
		}
	      bufcall:
		noenable(q);
		if (!(p->bufcall = bufcall(1, BPRI_MED, pckt_enable, (long) q)))
			qenable(q);	/* spin through service procedure */
		return (0);
	default:
		putnext(q, mp);
		return (1);
	}
}
示例#16
0
文件: ip_to_dlpi.c 项目: iHaD/openss7
STATIC streamscall int
ip2xinet_lrput(queue_t *q, mblk_t *mp)
{
	struct iocblk *iocp;
	union DL_primitives *dp;
	struct ip2xinet_priv *privptr;
	struct net_device *dev;
	int i;

	spin_lock(&ip2xinet_lock);

	/* use the first open ip device */
	for (i = 0; i < NUMIP2XINET; i++) {
		privptr = &ip2xinet_devs[i].priv;

		if (privptr->state == 1)
			break;
	}
	if (i == NUMIP2XINET)
		i = 0;		/* All devices closed, pick the 1st one */
	/* send data up to ip through the 1st open device */
	dev = &ip2xinet_devs[i].dev;

	switch (mp->b_datap->db_type) {
	case M_CTL:
		freemsg(mp);
		break;

	case M_DATA:
		/* NOTE: We don't expect any M_DATA messages from xinet */
		freemsg(mp);
		break;

	case M_PROTO:
	case M_PCPROTO:
		dp = (union DL_primitives *) mp->b_rptr;

#if 0
#ifdef DEBUG
		printk("ip2xinet_lrput: %s size=%d\n", x25dbdlpmsg(dp->dl_primitive),
		       x25dbmsgsize(mp));
#endif
#endif

		switch (dp->dl_primitive) {
		case DL_BIND_ACK:

			/* if we're in in BNDPND and receive a BIND_ACK we go to IDLE */
			ip2xinet_status.ip2x_dlstate = DL_IDLE;

			/* If we're DL_IDLE, then dev is open and the kernel can transmit */
			for (i = 0; i < NUMIP2XINET; i++) {
				privptr = &ip2xinet_devs[i].priv;

				if (privptr->state == 1)
					netif_start_queue(&(ip2xinet_devs[i].dev));
			}
			freemsg(mp);	/* Frees bind_ack no longer needed */
			break;

		case DL_INFO_ACK:

			/* NOTE: currently we don't send info_req to xinet */

			freemsg(mp);
			break;

		case DL_ERROR_ACK:
			switch (ip2xinet_status.ip2x_dlstate) {
			case DL_ATTACH_PENDING:
				/* if we receive ERROR_ACK and we're in ATTACH_PEND go into
				   UNATTACHED */
				ip2xinet_status.ip2x_dlstate = DL_UNATTACHED;
				freemsg(mp);
				break;

			case DL_BIND_PENDING:
				/* if we're in BNDPND and receive an ERR ack we go to UNBND, */
				ip2xinet_status.ip2x_dlstate = DL_UNBOUND;
				freemsg(mp);
				break;

			case DL_UNBIND_PENDING:
				/* If we're in UNBIND_PEND and we receive ERROR_ACK we go into IDLE 
				 */
				ip2xinet_status.ip2x_dlstate = DL_IDLE;
				freemsg(mp);
				break;

			case DL_DETACH_PENDING:
				/* If we're in DETACH_PEND and receive and ERROR_ACK we go into
				   UNBND */
				ip2xinet_status.ip2x_dlstate = DL_UNBOUND;
				freemsg(mp);
				break;
			default:
				freemsg(mp);
				break;

			}
			break;

		case DL_UNITDATA_IND:
			/* if we're in IDLE we can get DL_UNITDATA_IND with data and call the guy
			   who would normally receive data from interrupt handler. */

			/* Check state: can't transmit if dev is closed :-) Note: we have to check
			   both the dlpi state and dev->start because during a close the DLPI state 
			   could remain DL_IDLE if we couldn't allocate mblk for UNBIND_REQ. There
			   are many ways in which the dev->start could be 1 but dlpi state - not
			   DL_IDLE. */
			if (ip2xinet_status.ip2x_dlstate == DL_IDLE && privptr->state == 1)
			{
				mblk_t *newmp;
				unsigned char *buf;
				int len, tmplen;
				struct ethhdr *eth;
				struct sk_buff *skb;

				newmp = unlinkb(mp);

				freemsg(mp);
				mp = newmp;

				/* 1st pass through.  figure out the len */
				for (len = sizeof(struct ethhdr); newmp != NULL;
				     newmp = newmp->b_cont)
					len += (newmp->b_wptr - newmp->b_rptr);

				/* ALLOCATE skb of length len+2, COPY from mp chain to skb */

				skb = dev_alloc_skb(len + 2);
				if (!skb) {
					printk("ip2xinet rx: failed to allocate an skb\n");
					freemsg(mp);
					break;
				}
				skb_reserve(skb, 2);	/* align IP on 16B boundary */
				/* The packet has been retrieved from the transmission medium.
				   Build an skb around it, so upper layers can handle it */
				buf = skb_put(skb, len);
				for (newmp = mp, tmplen = sizeof(struct ethhdr); newmp != NULL;
				     newmp = newmp->b_cont) {
					bcopy(newmp->b_rptr, buf + tmplen,
					      newmp->b_wptr - newmp->b_rptr);
					tmplen += (newmp->b_wptr - newmp->b_rptr);
				}
				eth = (struct ethhdr *) buf;

				/* I am not sure it's necessary, but just in case... */

				memcpy(eth->h_source, dev->dev_addr, dev->addr_len);
				memcpy(eth->h_dest, dev->dev_addr, dev->addr_len);
				eth->h_proto = 0x8;	/* ETH_P_IP in network order */
				eth->h_source[ETH_ALEN - 1] ^= 0x01;	/* say src is us xor 1 */

				/* send it to ip2xinet_rx for handling */
				ip2xinet_rx(dev, skb);
			}
			freemsg(mp);
			break;
		case DL_UDERROR_IND:
			freemsg(mp);
			break;

		case DL_OK_ACK:
			switch (dp->ok_ack.dl_correct_primitive) {

			case DL_ATTACH_REQ:
				/* if we're in ATTACH_PEND and we received OK_ACK1 change state to
				   UNBND */
				ip2xinet_status.ip2x_dlstate = DL_UNBOUND;
				freemsg(mp);
				/* We just completed building up the X.25 stack below us. If IP is
				   already above us, we need to send down the bind that we would
				   normally do when IP opens us.  This allows us to restart the
				   X.25 stack without restarting TCP/IP. */
				if (ip2xinet_num_ip_opened != 0)
					ip2xinet_send_down_bind(WR(q));
				break;

			case DL_UNBIND_REQ:
				/* If we're in UNBIND_PEND and receive OK_ACK1 we go to UNBND. */
				ip2xinet_status.ip2x_dlstate = DL_UNBOUND;
				freemsg(mp);
				break;

			case DL_DETACH_REQ:
				/* If we're in DETACH_PEND and receive OK_ACK1 we go to UNATT */
				ip2xinet_status.ip2x_dlstate = DL_UNATTACHED;
				freemsg(mp);
				break;

			default:
				freemsg(mp);
				break;
			}
			break;

		default:
			printk("ip2xinet_lrput: bad prim=0x%lx", (ulong) dp->dl_primitive);
			freemsg(mp);
			break;
		}
		break;

	case M_FLUSH:
		if (mp->b_rptr[0] & FLUSHR) {
			if (mp->b_rptr[0] & FLUSHBAND)
				flushband(q, mp->b_rptr[1], FLUSHDATA);
			else
				flushq(q, FLUSHDATA);
			qenable(q);
		}
		if (mp->b_rptr[0] & FLUSHW) {
			mp->b_rptr[0] &= ~FLUSHR;
			if (mp->b_rptr[0] & FLUSHBAND)
				flushband(WR(q), mp->b_rptr[1], FLUSHDATA);
			else
				flushq(WR(q), FLUSHDATA);
			qenable(WR(q));
			if (!putq(WR(q), mp)) {
				mp->b_band = 0;
				putq(WR(q), mp);
			}
		} else
			freemsg(mp);
		break;

	case M_HANGUP:
		/* send it to the guy that linked us up, what he does is his problem. */
		if (!putq(ip2xinet_status.readq, mp)) {
			mp->b_band = 0;
			putq(ip2xinet_status.readq, mp);
		}
		break;

	case M_IOCACK:
		iocp = (struct iocblk *) mp->b_rptr;
		if (iocp->ioc_cmd == SIOCSIFMTU) {
			/* The set MTU ioctl was a success Rejoice :-) */
			freemsg(mp);
		} else if (!putq(ip2xinet_status.readq, mp)) {
			mp->b_band = 0;
			putq(ip2xinet_status.readq, mp);
		}
		break;

	case M_IOCNAK:
		iocp = (struct iocblk *) mp->b_rptr;
		if (iocp->ioc_cmd == SIOCSIFMTU) {
			/* The set MTU ioctl was a failure From looking at xinet code this is *
			   impossible, so ignore it */

			freemsg(mp);
		} else if (!putq(ip2xinet_status.readq, mp)) {
			mp->b_band = 0;
			putq(ip2xinet_status.readq, mp);
		}
		break;

	default:
		printk("ip2xinet_lrput: bad type=%d", mp->b_datap->db_type);
		freemsg(mp);
		break;
	}

	spin_unlock(&ip2xinet_lock);
	return (0);
}
示例#17
0
/*ARGSUSED1*/
static int
zc_close(queue_t *rqp, int flag, cred_t *credp)
{
	queue_t *wqp;
	mblk_t	*bp;
	zc_state_t *zcs;
	major_t major;
	minor_t minor;

	zcs = (zc_state_t *)rqp->q_ptr;

	if (rqp == zcs->zc_master_rdq) {
		DBG("Closing master side");

		zcs->zc_master_rdq = NULL;
		zcs->zc_state &= ~ZC_STATE_MOPEN;

		/*
		 * qenable slave side write queue so that it can flush
		 * its messages as master's read queue is going away
		 */
		if (zcs->zc_slave_rdq != NULL) {
			qenable(WR(zcs->zc_slave_rdq));
		}

		qprocsoff(rqp);
		WR(rqp)->q_ptr = rqp->q_ptr = NULL;

	} else if (rqp == zcs->zc_slave_rdq) {

		DBG("Closing slave side");
		zcs->zc_state &= ~ZC_STATE_SOPEN;
		zcs->zc_slave_rdq = NULL;

		wqp = WR(rqp);
		while ((bp = getq(wqp)) != NULL) {
			if (zcs->zc_master_rdq != NULL)
				putnext(zcs->zc_master_rdq, bp);
			else if (bp->b_datap->db_type == M_IOCTL)
				miocnak(wqp, bp, 0, 0);
			else
				freemsg(bp);
		}

		/*
		 * Qenable master side write queue so that it can flush its
		 * messages as slaves's read queue is going away.
		 */
		if (zcs->zc_master_rdq != NULL)
			qenable(WR(zcs->zc_master_rdq));

		qprocsoff(rqp);
		WR(rqp)->q_ptr = rqp->q_ptr = NULL;

		/*
		 * Clear the sad configuration so that reopening doesn't fail
		 * to set up sad configuration.
		 */
		major = ddi_driver_major(zcs->zc_devinfo);
		minor = ddi_get_instance(zcs->zc_devinfo) << 1 | ZC_SLAVE_MINOR;
		(void) kstr_autopush(CLR_AUTOPUSH, &major, &minor, NULL, NULL,
		    NULL);
	}

	return (0);
}
示例#18
0
/*
 * ptemwput - Module write queue put procedure.
 *
 * This is called from the module or stream head upstream.
 *
 * XXX:	This routine is quite lazy about handling allocation failures,
 *	basically just giving up and reporting failure.  It really ought to
 *	set up bufcalls and only fail when it's absolutely necessary.
 */
static void
ptemwput(queue_t *q, mblk_t *mp)
{
	struct ptem *ntp = (struct ptem *)q->q_ptr;
	struct iocblk *iocp;	/* outgoing ioctl structure */
	struct copyresp *resp;
	unsigned char type = mp->b_datap->db_type;

	if (type >= QPCTL) {
		switch (type) {

		case M_IOCDATA:
			resp = (struct copyresp *)mp->b_rptr;
			if (resp->cp_rval) {
				/*
				 * Just free message on failure.
				 */
				freemsg(mp);
				break;
			}

			/*
			 * Only need to copy data for the SET case.
			 */
			switch (resp->cp_cmd) {

				case TIOCSWINSZ:
					ptioc(q, mp, WRSIDE);
					break;

				case JWINSIZE:
				case TIOCGWINSZ:
					mioc2ack(mp, NULL, 0, 0);
					qreply(q, mp);
					break;

				default:
					freemsg(mp);
			}
			break;

		case M_FLUSH:
			if (*mp->b_rptr & FLUSHW) {
			    if ((ntp->state & IS_PTSTTY) &&
					(*mp->b_rptr & FLUSHBAND))
				flushband(q, *(mp->b_rptr + 1), FLUSHDATA);
			    else
				flushq(q, FLUSHDATA);
			}
			putnext(q, mp);
			break;

		case M_READ:
			freemsg(mp);
			break;

		case M_STOP:
			/*
			 * Set the output flow control state.
			 */
			ntp->state |= OFLOW_CTL;
			putnext(q, mp);
			break;

		case M_START:
			/*
			 * Relieve the output flow control state.
			 */
			ntp->state &= ~OFLOW_CTL;
			putnext(q, mp);
			qenable(q);
			break;
		default:
			putnext(q, mp);
			break;
		}
		return;
	}
	/*
	 * If our queue is nonempty or flow control persists
	 * downstream or module in stopped state, queue this message.
	 */
	if (q->q_first != NULL || !bcanputnext(q, mp->b_band)) {
		/*
		 * Exception: ioctls, except for those defined to
		 * take effect after output has drained, should be
		 * processed immediately.
		 */
		switch (type) {

		case M_IOCTL:
			iocp = (struct iocblk *)mp->b_rptr;
			switch (iocp->ioc_cmd) {
			/*
			 * Queue these.
			 */
			case TCSETSW:
			case TCSETSF:
			case TCSETAW:
			case TCSETAF:
			case TCSBRK:
				break;

			/*
			 * Handle all others immediately.
			 */
			default:
				(void) ptemwmsg(q, mp);
				return;
			}
			break;

		case M_DELAY: /* tty delays not supported */
			freemsg(mp);
			return;

		case M_DATA:
			if ((mp->b_wptr - mp->b_rptr) < 0) {
				/*
				 * Free all bad length messages.
				 */
				freemsg(mp);
				return;
			} else if ((mp->b_wptr - mp->b_rptr) == 0) {
				if (!(ntp->state & IS_PTSTTY)) {
					freemsg(mp);
					return;
				}
			}
		}
		(void) putq(q, mp);
		return;
	}
	/*
	 * fast path into ptemwmsg to dispose of mp.
	 */
	if (!ptemwmsg(q, mp))
		(void) putq(q, mp);
}
示例#19
0
文件: ip_to_dlpi.c 项目: iHaD/openss7
STATIC streamscall int
ip2xinet_uwput(queue_t *q, mblk_t *mp)
{

	int i;

	spin_lock(&ip2xinet_lock);

	switch (mp->b_datap->db_type) {
	case M_FLUSH:
		if (mp->b_rptr[0] & FLUSHW) {
			if (mp->b_rptr[0] & FLUSHBAND)
				flushband(q, mp->b_rptr[1], FLUSHDATA);
			else
				flushq(q, FLUSHDATA);
			qenable(q);
			mp->b_rptr[0] &= ~FLUSHW;
		}
		if (mp->b_rptr[0] & FLUSHR) {
			if (mp->b_rptr[0] & FLUSHBAND)
				flushband(RD(q), mp->b_rptr[1], FLUSHDATA);
			else
				flushq(RD(q), FLUSHDATA);
			if (!putq(RD(q), mp)) {
				mp->b_band = 0;
				putq(RD(q), mp);
			}
		} else
			freemsg(mp);
		break;

	case M_IOCTL:
		/* Process at least the I_LINK, I_UNLINK */

		/* THINKME: Failure to correctly process I_LINK/I_UNLINK while returning correctly
		   a nack to stream head will leave us in a possibly totally screwed up DLPI state
		   from which we have to somehow recover.  The possible problematic states are
		   DL_UNBOUND, any DL_PENDING states Note: if we stay in UNATTACHED on I_LINK
		   failure or in IDLE on I_UNLINK failure we're ok as long as the private data
		   structure stuff is consistent with the state */

	{
		struct iocblk *iocp;
		mblk_t *nmp;
		dl_attach_req_t *attach;
		struct linkblk *lp;

		iocp = (struct iocblk *) mp->b_rptr;

#if 0
#ifdef DEBUG
		pkt_debug(X25DBIOCTL) KPRINTF("%s size %d\n", x25dbiocmsg(iocp->ioc_cmd),
					      x25dbmsgsize(mp));
#endif
#endif

		switch ((unsigned) iocp->ioc_cmd) {
		case I_LINK:
			iocp->ioc_error = 0;
			iocp->ioc_rval = 0;
			iocp->ioc_count = 0;

			lp = (struct linkblk *) mp->b_cont->b_rptr;
			/* Use only one xinet queue for all devices */
			ip2xinet_status.lowerq = lp->l_qbot;
			ip2xinet_status.index = lp->l_index;

			/* Only one read q to get data from xinet */
			ip2xinet_status.readq = RD(q);

			/* These are dummy ones to indicate the queues are being used */
			ip2xinet_status.lowerq->q_ptr = (char *) &ip2xinet_numopen;
			RD(ip2xinet_status.lowerq)->q_ptr = (char *) &ip2xinet_numopen;

			if ((nmp = allocb(sizeof(union DL_primitives), BPRI_LO)) == NULL) {
				iocp->ioc_error = ENOSR;
				mp->b_datap->db_type = M_IOCNAK;
				if (!putq(RD(q), mp)) {
					mp->b_band = 0;
					putq(RD(q), mp);
				}
				spin_unlock(&ip2xinet_lock);
				printk("pktioctl: I_LINK failed: allocb failed");
				return (0);
			}

			/* Setup and send an ATTACH */
			nmp->b_datap->db_type = M_PROTO;
			nmp->b_wptr += DL_ATTACH_REQ_SIZE;

			attach = (dl_attach_req_t *) nmp->b_rptr;
			attach->dl_primitive = DL_ATTACH_REQ;
			attach->dl_ppa = ip2xinet_status.myminor;
			ip2xinet_status.ip2x_dlstate = DL_ATTACH_PENDING;

			/* experience shows that an I_LINKed queue needs to be enabled so that the
			   service routine will be run. */
			qenable(ip2xinet_status.lowerq);
			if (!putq(ip2xinet_status.lowerq, nmp)) {
				nmp->b_band = 0;
				putq(ip2xinet_status.lowerq, nmp);
			}

			/* all went well */
			mp->b_datap->db_type = M_IOCACK;
			if (!putq(RD(q), mp)) {
				mp->b_band = 0;
				putq(RD(q), mp);
			}
			break;

		case I_UNLINK:
		{
			struct linkblk *lp;

			iocp->ioc_error = 0;
			iocp->ioc_rval = 0;
			iocp->ioc_count = 0;
			lp = (struct linkblk *) mp->b_cont->b_rptr;

			/* Ignore the DLPI state, the stack is being torn down regardless.  */
			ip2xinet_status.ip2x_dlstate = UNLINKED;
			/* can't transmit any more */
			for (i = 0; i < NUMIP2XINET; i++) {
				struct ip2xinet_priv *privptr = &ip2xinet_devs[i].priv;

				if (privptr->state == 1)
					netif_stop_queue(&(ip2xinet_devs[i].dev));
			}

			flushq(q, FLUSHALL);
			flushq(RD(lp->l_qbot), FLUSHALL);

			ip2xinet_status.readq = NULL;
			ip2xinet_status.lowerq = NULL;
			mp->b_datap->db_type = M_IOCACK;
			if (!putq(RD(q), mp)) {
				mp->b_band = 0;
				putq(RD(q), mp);
			}

			break;
		}

		default:
			iocp->ioc_error = EINVAL;
			mp->b_datap->db_type = M_IOCNAK;
			if (!putq(RD(q), mp)) {
				mp->b_band = 0;
				putq(RD(q), mp);
			}
			break;
		}

	}
		break;

	case M_DATA:
	case M_PCPROTO:
	case M_PROTO:
	default:
		printk("ip2xinet_uwput: unexpected type=0x%x", mp->b_datap->db_type);
		freemsg(mp);
		break;
	}
	spin_unlock(&ip2xinet_lock);
	return (0);
}
示例#20
0
static uint_t
mouse8042_intr(caddr_t arg)
{
	unsigned char    mdata;
	mblk_t *mp;
	struct mouse_state *state = (struct mouse_state *)arg;
	int rc;

	mutex_enter(&state->ms_mutex);

	rc = DDI_INTR_UNCLAIMED;

	for (;;) {

		if (ddi_get8(state->ms_handle,
		    state->ms_addr + I8042_INT_INPUT_AVAIL) == 0) {
			break;
		}

		mdata = ddi_get8(state->ms_handle,
		    state->ms_addr + I8042_INT_INPUT_DATA);

		rc = DDI_INTR_CLAIMED;

		/*
		 * If we're not ready for this data, discard it.
		 */
		if (!state->ready)
			continue;

		mutex_enter(&state->reset_mutex);
		if (state->reset_state != MSE_RESET_IDLE) {

			if (mdata == MSEERROR || mdata == MSERESET) {
				state->reset_state = MSE_RESET_FAILED;
			} else {
				state->reset_state =
				    mouse8042_reset_fsm(state->reset_state,
				    mdata);
			}

			if (state->reset_state == MSE_RESET_ACK) {

			/*
			 * We received an ACK from the mouse, so
			 * send it upstream immediately so that
			 * consumers depending on the immediate
			 * ACK don't time out.
			 */
				if (state->reset_ack_mp != NULL) {

					mp = state->reset_ack_mp;

					state->reset_ack_mp = NULL;

					if (state->ms_rqp != NULL) {
						*mp->b_wptr++ = MSE_ACK;
						putnext(state->ms_rqp, mp);
					} else
						freemsg(mp);
				}

				if (state->ms_wqp != NULL) {
					enableok(state->ms_wqp);
					qenable(state->ms_wqp);
				}

			} else if (state->reset_state == MSE_RESET_IDLE ||
			    state->reset_state == MSE_RESET_FAILED) {

			/*
			 * If we transitioned back to the idle reset state (or
			 * the reset failed), disable the timeout, release the
			 * 8042 exclusive-access lock, then send the response
			 * the the upper-level modules. Finally, enable the
			 * queue and schedule queue service procedures so that
			 * upper-level modules can process the response.
			 * Otherwise, if we're still in the middle of the
			 * reset sequence, do not send the data up (since the
			 * response is sent at the end of the sequence, or
			 * on timeout/error).
			 */

				mutex_exit(&state->reset_mutex);
				(void) quntimeout(state->ms_wqp,
				    state->reset_tid);
				mutex_enter(&state->reset_mutex);

				(void) ddi_get8(state->ms_handle,
				    state->ms_addr + I8042_UNLOCK);

				state->reset_tid = 0;
				if (state->reply_mp != NULL) {
					mp = state->reply_mp;
					if (state->reset_state ==
					    MSE_RESET_FAILED) {
						*mp->b_wptr++ = mdata;
					} else {
						*mp->b_wptr++ = MSE_AA;
						*mp->b_wptr++ = MSE_00;
					}
					state->reply_mp = NULL;
				} else {
					mp = NULL;
				}

				state->reset_state = MSE_RESET_IDLE;
				cv_signal(&state->reset_cv);

				if (mp != NULL) {
					if (state->ms_rqp != NULL)
						putnext(state->ms_rqp, mp);
					else
						freemsg(mp);
				}

				if (state->ms_wqp != NULL) {
					enableok(state->ms_wqp);
					qenable(state->ms_wqp);
				}
			}

			mutex_exit(&state->reset_mutex);
			mutex_exit(&state->ms_mutex);
			return (rc);
		}
		mutex_exit(&state->reset_mutex);

		if (state->ms_rqp != NULL && (mp = allocb(1, BPRI_MED))) {
			*mp->b_wptr++ = mdata;
			putnext(state->ms_rqp, mp);
		}
	}
	mutex_exit(&state->ms_mutex);

	return (rc);
}
示例#21
0
/*
 * ptemrput - Module read queue put procedure.
 *
 * This is called from the module or driver downstream.
 */
static void
ptemrput(queue_t *q, mblk_t *mp)
{
	struct iocblk *iocp;	/* M_IOCTL data */
	struct copyresp *resp;	/* transparent ioctl response struct */
	int error;

	switch (mp->b_datap->db_type) {
	case M_DELAY:
	case M_READ:
		freemsg(mp);
		break;

	case M_IOCTL:
		iocp = (struct iocblk *)mp->b_rptr;

		switch (iocp->ioc_cmd) {
		case TCSBRK:
			/*
			 * Send a break message upstream.
			 *
			 * XXX:	Shouldn't the argument come into play in
			 *	determining whether or not so send an M_BREAK?
			 *	It certainly does in the write-side direction.
			 */
			error = miocpullup(mp, sizeof (int));
			if (error != 0) {
				miocnak(q, mp, 0, error);
				break;
			}
			if (!(*(int *)mp->b_cont->b_rptr)) {
				if (!putnextctl(q, M_BREAK)) {
					/*
					 * Send an NAK reply back
					 */
					miocnak(q, mp, 0, EAGAIN);
					break;
				}
			}
			/*
			 * ACK it.
			 */
			mioc2ack(mp, NULL, 0, 0);
			qreply(q, mp);
			break;

		case JWINSIZE:
		case TIOCGWINSZ:
		case TIOCSWINSZ:
			ptioc(q, mp, RDSIDE);
			break;

		case TIOCSIGNAL:
			/*
			 * The following subtle logic is due to the fact that
			 * `mp' may be in any one of three distinct formats:
			 *
			 *	1. A transparent M_IOCTL with an intptr_t-sized
			 *	   payload containing the signal number.
			 *
			 *	2. An I_STR M_IOCTL with an int-sized payload
			 *	   containing the signal number.
			 *
			 *	3. An M_IOCDATA with an int-sized payload
			 *	   containing the signal number.
			 */
			if (iocp->ioc_count == TRANSPARENT) {
				intptr_t sig = *(intptr_t *)mp->b_cont->b_rptr;

				if (sig < 1 || sig >= NSIG) {
					/*
					 * it's transparent with pointer
					 * to the arg
					 */
					mcopyin(mp, NULL, sizeof (int), NULL);
					qreply(q, mp);
					break;
				}
			}
			ptioc(q, mp, RDSIDE);
			break;

		case TIOCREMOTE:
			if (iocp->ioc_count != TRANSPARENT)
				ptioc(q, mp, RDSIDE);
			else {
				mcopyin(mp, NULL, sizeof (int), NULL);
				qreply(q, mp);
			}
			break;

		default:
			putnext(q, mp);
			break;
		}
		break;

	case M_IOCDATA:
		resp = (struct copyresp *)mp->b_rptr;
		if (resp->cp_rval) {
			/*
			 * Just free message on failure.
			 */
			freemsg(mp);
			break;
		}

		/*
		 * Only need to copy data for the SET case.
		 */
		switch (resp->cp_cmd) {

		case TIOCSWINSZ:
		case TIOCSIGNAL:
		case TIOCREMOTE:
			ptioc(q, mp, RDSIDE);
			break;

		case JWINSIZE:
		case TIOCGWINSZ:
			mp->b_datap->db_type = M_IOCACK;
			mioc2ack(mp, NULL, 0, 0);
			qreply(q, mp);
			break;

		default:
			freemsg(mp);
			break;
	}
	break;

	case M_IOCACK:
	case M_IOCNAK:
		/*
		 * We only pass write-side ioctls through to the master that
		 * we've already ACKed or NAKed to the stream head.  Thus, we
		 * discard ones arriving from below, since they're redundant
		 * from the point of view of modules above us.
		 */
		freemsg(mp);
		break;

	case M_HANGUP:
		/*
		 * clear blocked state.
		 */
		{
			struct ptem *ntp = (struct ptem *)q->q_ptr;
			if (ntp->state & OFLOW_CTL) {
				ntp->state &= ~OFLOW_CTL;
				qenable(WR(q));
			}
		}
	default:
		putnext(q, mp);
		break;
	}
}
示例#22
0
文件: dm2s.c 项目: andreiw/polaris
/*
 * dm2s_wput - Streams write side put routine.
 *
 * All M_DATA messages are queued so that they are transmitted in
 * the service procedure. This is done to simplify the streams
 * synchronization. Other messages are handled appropriately.
 */
int
dm2s_wput(queue_t *wq, mblk_t *mp)
{
	dm2s_t	*dm2sp = (dm2s_t *)wq->q_ptr;

	DPRINTF(DBG_DRV, ("dm2s_wput: called\n"));
	if (dm2sp == NULL) {
		return (ENODEV);   /* Can't happen. */
	}

	switch (mp->b_datap->db_type) {
	case (M_DATA):
		DPRINTF(DBG_DRV, ("dm2s_wput: M_DATA message\n"));
		while (mp->b_wptr == mp->b_rptr) {
			mblk_t *mp1;

			mp1 = unlinkb(mp);
			freemsg(mp);
			mp = mp1;
			if (mp == NULL) {
				return (0);
			}
		}

		/*
		 * Simply queue the message and handle it in the service
		 * procedure.
		 */
		(void) putq(wq, mp);
		qenable(wq);
		return (0);

	case (M_PROTO):
		DPRINTF(DBG_DRV, ("dm2s_wput: M_PROTO message\n"));
		/* We don't expect this */
		mp->b_datap->db_type = M_ERROR;
		mp->b_rptr = mp->b_wptr = mp->b_datap->db_base;
		*mp->b_wptr++ = EPROTO;
		qreply(wq, mp);
		return (EINVAL);

	case (M_IOCTL):
		DPRINTF(DBG_DRV, ("dm2s_wput: M_IOCTL message\n"));
		if (MBLKL(mp) < sizeof (struct iocblk)) {
			freemsg(mp);
			return (0);
		}
		/*
		 * No ioctls required to be supported by this driver, so
		 * return EINVAL for all ioctls.
		 */
		miocnak(wq, mp, 0, EINVAL);
		break;

	case (M_CTL):
		DPRINTF(DBG_DRV, ("dm2s_wput: M_CTL message\n"));
		/*
		 * No M_CTL messages need to supported by this driver,
		 * so simply ignore them.
		 */
		freemsg(mp);
		break;

	case (M_FLUSH):
		DPRINTF(DBG_DRV, (
		    "dm2s_wput: M_FLUSH message 0x%X\n", *mp->b_rptr));
		if (*mp->b_rptr & FLUSHW) {	/* Flush write-side */
			(void) scf_mb_flush(dm2sp->ms_target, dm2sp->ms_key,
			    MB_FLUSH_SEND);
			flushq(wq, FLUSHDATA);
			*mp->b_rptr &= ~FLUSHW;
		}
		if (*mp->b_rptr & FLUSHR) {
			(void) scf_mb_flush(dm2sp->ms_target, dm2sp->ms_key,
			    MB_FLUSH_RECEIVE);
			flushq(RD(wq), FLUSHDATA);
			qreply(wq, mp);
		} else {
			freemsg(mp);
		}
		break;

	default:
		DPRINTF(DBG_DRV, ("dm2s_wput: UNKNOWN message\n"));
		freemsg(mp);

	}
	return (0);
}
示例#23
0
文件: dm2s.c 项目: andreiw/polaris
/*
 * dm2s_event_handler - Mailbox event handler.
 */
void
dm2s_event_handler(scf_event_t event, void *arg)
{
	dm2s_t *dm2sp = (dm2s_t *)arg;
	queue_t	*rq;

	ASSERT(dm2sp != NULL);
	mutex_enter(&dm2sp->ms_lock);
	if (!(dm2sp->ms_state & DM2S_MB_INITED)) {
		/*
		 * Ignore all events if the state flag indicates that the
		 * mailbox not initialized, this may happen during the close.
		 */
		mutex_exit(&dm2sp->ms_lock);
		DPRINTF(DBG_MBOX,
		    ("Event(0x%X) received - Mailbox not inited\n", event));
		return;
	}
	switch (event) {
	case SCF_MB_CONN_OK:
		/*
		 * Now the mailbox is ready to use, lets wake up
		 * any one waiting for this event.
		 */
		dm2sp->ms_state |= DM2S_MB_CONN;
		cv_broadcast(&dm2sp->ms_wait);
		DPRINTF(DBG_MBOX, ("Event received = CONN_OK\n"));
		break;

	case SCF_MB_MSG_DATA:
		if (!DM2S_MBOX_READY(dm2sp)) {
			DPRINTF(DBG_MBOX,
			    ("Event(MSG_DATA) received - Mailbox not READY\n"));
			break;
		}
		/*
		 * A message is available in the mailbox.
		 * Lets enable the read service procedure
		 * to receive this message.
		 */
		if (dm2sp->ms_rq != NULL) {
			qenable(dm2sp->ms_rq);
		}
		DPRINTF(DBG_MBOX, ("Event received = MSG_DATA\n"));
		break;

	case SCF_MB_SPACE:
		if (!DM2S_MBOX_READY(dm2sp)) {
			DPRINTF(DBG_MBOX,
			    ("Event(MB_SPACE) received - Mailbox not READY\n"));
			break;
		}

		/*
		 * Now the mailbox is ready to transmit, lets
		 * schedule the write service procedure.
		 */
		if (dm2sp->ms_wq != NULL) {
			qenable(dm2sp->ms_wq);
		}
		DPRINTF(DBG_MBOX, ("Event received = MB_SPACE\n"));
		break;
	case SCF_MB_DISC_ERROR:
		dm2sp->ms_state |= DM2S_MB_DISC;
		if (dm2sp->ms_state & DM2S_MB_CONN) {
			/*
			 * If it was previously connected,
			 * then send a hangup message.
			 */
			rq = dm2sp->ms_rq;
			if (rq != NULL) {
				mutex_exit(&dm2sp->ms_lock);
				/*
				 * Send a hangup message to indicate
				 * disconnect event.
				 */
				(void) putctl(rq, M_HANGUP);
				DTRACE_PROBE1(dm2s_hangup, dm2s_t, dm2sp);
				mutex_enter(&dm2sp->ms_lock);
			}
		} else {
			/*
			 * Signal if the open is waiting for a
			 * connection.
			 */
			cv_broadcast(&dm2sp->ms_wait);
		}
		DPRINTF(DBG_MBOX, ("Event received = DISC_ERROR\n"));
		break;
	default:
		cmn_err(CE_WARN, "Unexpected event received\n");
		break;
	}
	mutex_exit(&dm2sp->ms_lock);
}
示例#24
0
static streamscall __hot_put int
ptem_wput(queue_t *q, mblk_t *mp)
{
	struct ptem *p = PTEM_PRIV(q);

	/* fast path */
	if (likely(mp->b_datap->db_type == M_DATA)) {
	      m_data:
		/* free zero-length messages */
		if (msgdsize(mp) != 0) {
			if ((p->flags & PTEM_OUTPUT_STOPPED)
			    || (q->q_first != NULL)
			    || (q->q_flag & QSVCBUSY)
			    || (!bcanputnext(q, mp->b_band))) {
				/* Note, the only reason for failinng putq() is the lack of a queue 
				   band, in which case the band is empty and no loss of order will
				   result from putting it to the next queue. */
				if (putq(q, mp))
					return (0);
			}
			putnext(q, mp);
			return (0);
		}
		freemsg(mp);
		return (0);
	}

	switch (mp->b_datap->db_type) {
	case M_DATA:
		goto m_data;
	case M_IOCTL:
	{
		struct iocblk *ioc = (struct iocblk *) mp->b_rptr;

		/* The Stream head is set to recognized all transparent terminal input-output
		   controls and pass them downstream as though they were I_STR input-output
		   controls.  There is also the opportunity to register input-output controls with
		   the Stream head using the TIOC_REPLY message. */
		if (unlikely(ioc->ioc_count == TRANSPARENT))
			goto do_it;

		switch (ioc->ioc_cmd) {
		case TCSETAW:
		case TCSETAF:
		case TCSETSW:
		case TCSETSF:
		case TCSBRK:
			/* These need to wait for the output to drain before being processed, queue 
			   them. */
			putq(q, mp);
			break;
		default:
			/* Process others immediately, regardless of whether there is any data or
			   other messages in queue. */
			goto do_it;
		}
		break;
	}
	case M_DELAY:
	case M_READ:
		freemsg(mp);
		break;
	case M_STOP:
		if (canenable(q)) {
			noenable(q);
			p->flags |= PTEM_OUTPUT_STOPPED;
		}
		putnext(q, mp);
		break;
	case M_START:
		if (!canenable(q)) {
			p->flags &= ~PTEM_OUTPUT_STOPPED;
			enableok(q);
			qenable(q);
		}
		putnext(q, mp);
		break;
	case M_STOPI:
	case M_STARTI:
		/* We have no read side queue so we cannot queue in this direction.  Tell master so 
		   that pckt(4) can tell master not to send anything more. */
		putnext(q, mp);
		break;
	default:
	      do_it:
		if (ptem_w_msg(q, mp) && !putq(q, mp))
			freemsg(mp);
		break;
	}
	return (0);
}
示例#25
0
/*
 * Restart queuing for high priority message of read stream
 * when flow control failed
 *
 * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
 *  -. uinst_t->lock   : P
 *  -. uinst_t->u_lock : P
 *  -. uinst_t->l_lock : P
 *  -. uinst_t->c_lock : P
 */
void
oplmsu_rcmn_high_qenable(queue_t *q)
{
	mblk_t		*mp;
	struct iocblk	*iocp = NULL;
	lpath_t		*lpath;
	int		rval;

	rw_enter(&oplmsu_uinst->lock, RW_READER);

	for (;;) {	/* Handle high priority message */
		mutex_enter(&oplmsu_uinst->l_lock);
		lpath = (lpath_t *)q->q_ptr;
		if ((mp = lpath->first_lpri_hi) == NULL) {
			mutex_exit(&oplmsu_uinst->l_lock);
			break;
		}

		if (mp->b_next == NULL) {
			lpath->first_lpri_hi = NULL;
			lpath->last_lpri_hi = NULL;
		} else {
			lpath->first_lpri_hi = mp->b_next;
			mp->b_next->b_prev = NULL;
			mp->b_next = NULL;
		}
		mp->b_prev = NULL;
		mutex_exit(&oplmsu_uinst->l_lock);

		rval = SUCCESS;
		switch (mp->b_datap->db_type) {
		case M_IOCACK :		/* FALLTHRU */
		case M_IOCNAK :
			iocp = (struct iocblk *)mp->b_rptr;
			switch (iocp->ioc_cmd) {
			case TCSETS :		/* FALLTHRU */
			case TCSETSW :		/* FALLTHRU */
			case TCSETSF :		/* FALLTHRU */
			case TIOCMSET :		/* FALLTHRU */
			case TIOCSPPS :		/* FALLTHRU */
			case TIOCSWINSZ :	/* FALLTHRU */
			case TIOCSSOFTCAR :
				rw_exit(&oplmsu_uinst->lock);
				rval = oplmsu_lrioctl_termios(q, mp);
				rw_enter(&oplmsu_uinst->lock, RW_WRITER);
				break;

			default :
				rval = oplmsu_rcmn_through_hndl(
				    q, mp, MSU_HIGH);
				if (rval == FAILURE) {
					rw_exit(&oplmsu_uinst->lock);
					return;
				}
			}
			break;

		case M_ERROR :
			rw_exit(&oplmsu_uinst->lock);
			rval = oplmsu_lrmsg_error(q, mp);
			rw_enter(&oplmsu_uinst->lock, RW_WRITER);
			break;

		case M_FLUSH :
			oplmsu_rcmn_flush_hndl(q, mp);
			break;

		default :
			rval = oplmsu_rcmn_through_hndl(q, mp, MSU_HIGH);
			if (rval == FAILURE) {
				rw_exit(&oplmsu_uinst->lock);
				return;
			}
		}

		if (rval == FAILURE) {
			break;
		}
	}

	rw_exit(&oplmsu_uinst->lock);
	qenable(q);	/* Enable lower read queue */
}
示例#26
0
文件: telmod.c 项目: andreiw/polaris
/*
 * telmodwput:
 * M_DATA is processed and forwarded if we aren't stopped awaiting the daemon
 * to process something.  M_CTL's are data from the daemon bound for the
 * network.  We forward them immediately.  There are two classes of ioctl's
 * we must handle here also.  One is ioctl's forwarded by ptem which we
 * ignore.  The other is ioctl's issued by the daemon to control us.
 * Process them appropriately.  M_PROTO's we pass along, figuring they are
 * are TPI operations for TCP.  M_FLUSH requires careful processing, since
 * telnet cannot tolerate flushing its protocol requests.  Also the flushes
 * can be running either daemon<->TCP or application<->telmod.  We must
 * carefully deal with this.
 */
static void
telmodwput(
	queue_t *q,	/* Pointer to the read queue */
	mblk_t *mp)	/* Pointer to current message block */
{
	struct telmod_info	*tmip;
	struct iocblk *ioc;
	mblk_t *savemp;
	int rw;
	int error;

	tmip = (struct telmod_info *)q->q_ptr;

	switch (mp->b_datap->db_type) {
	case M_DATA:
		if (!canputnext(q) || (tmip->flags & TEL_STOPPED) ||
			(q->q_first)) {
			noenable(q);
			(void) putq(q, mp);
			break;
		}
		/*
		 * This routine parses data generating from ptm side.
		 * Insert a null character if carraige return
		 * is not followed by line feed unless we are in binary mode.
		 * Also, duplicate IAC if found in the data.
		 */
		(void) snd_parse(q, mp);
		break;

	case M_CTL:
		if (((mp->b_wptr - mp->b_rptr) == 1) &&
			(*(mp->b_rptr) == M_CTL_MAGIC_NUMBER)) {
			savemp = mp->b_cont;
			freeb(mp);
			mp = savemp;
		}
		putnext(q, mp);
		break;

	case M_IOCTL:
		ioc = (struct iocblk *)mp->b_rptr;
		switch (ioc->ioc_cmd) {

		/*
		 * This ioctl is issued by user level daemon to
		 * request one more message block to process protocol
		 */
		case TEL_IOC_GETBLK:
			if (!(tmip->flags & TEL_STOPPED)) {
				miocnak(q, mp, 0, EINVAL);
				break;
			}
			tmip->flags |= TEL_GETBLK;
			qenable(RD(q));
			enableok(RD(q));

			miocack(q, mp, 0, 0);
			break;

		/*
		 * This ioctl is issued by user level daemon to reenable the
		 * read and write queues. This is issued during startup time
		 * after setting up the mux links and also after processing
		 * the protocol.  It is also issued after each time an
		 * an unrecognized telnet option is forwarded to the daemon.
		 */
		case TEL_IOC_ENABLE:

			/*
			 * Send negative ack if TEL_STOPPED flag is not set
			 */
			if (!(tmip->flags & TEL_STOPPED)) {
				miocnak(q, mp, 0, EINVAL);
				break;
			}
			tmip->flags &= ~TEL_STOPPED;
			if (mp->b_cont) {
				(void) putbq(RD(q), mp->b_cont);
				mp->b_cont = 0;
			}

			qenable(RD(q));
			enableok(RD(q));
			qenable(q);
			enableok(q);

			miocack(q, mp, 0, 0);
			break;

		/*
		 * Set binary/normal mode for input and output
		 * according to the instructions from the daemon.
		 */
		case TEL_IOC_MODE:
			error = miocpullup(mp, sizeof (uchar_t));
			if (error != 0) {
				miocnak(q, mp, 0, error);
				break;
			}
			tmip->flags |= *(mp->b_cont->b_rptr) &
			    (TEL_BINARY_IN|TEL_BINARY_OUT);
			miocack(q, mp, 0, 0);
			break;

#ifdef DEBUG
		case TCSETAF:
		case TCSETSF:
		case TCSETA:
		case TCSETAW:
		case TCSETS:
		case TCSETSW:
		case TCSBRK:
		case TIOCSTI:
		case TIOCSWINSZ:
			miocnak(q, mp, 0, EINVAL);
			break;
#endif
		case CRYPTPASSTHRU:
			error = miocpullup(mp, sizeof (uchar_t));
			if (error != 0) {
				miocnak(q, mp, 0, error);
				break;
			}
			if (*(mp->b_cont->b_rptr) == 0x01)
				tmip->flags |= TEL_IOCPASSTHRU;
			else
				tmip->flags &= ~TEL_IOCPASSTHRU;

			miocack(q, mp, 0, 0);
			break;

		default:
			if (tmip->flags & TEL_IOCPASSTHRU) {
				putnext(q, mp);
			} else {
#ifdef DEBUG
				cmn_err(CE_NOTE,
				"telmodwput: unexpected ioctl type 0x%x",
					ioc->ioc_cmd);
#endif
				miocnak(q, mp, 0, EINVAL);
			}
			break;
		}
		break;

	case M_FLUSH:
		/*
		 * Flushing is tricky:  We try to flush all we can, but certain
		 * data cannot be flushed.  Telnet protocol sequences cannot
		 * be flushed.  So, TCP's queues cannot be flushed since we
		 * cannot tell what might be telnet protocol data.  Then we
		 * must take care to create and forward out-of-band data
		 * indicating the flush to the far side.
		 */
		rw = *mp->b_rptr;
		if (rw & FLUSHR) {
			/*
			 * We cannot flush our read queue, since there may
			 * be telnet protocol bits in the queue, awaiting
			 * processing.  However, once it leaves this module
			 * it's guaranteed that all protocol data is in
			 * M_CTL, so we do flush read data beyond us, expecting
			 * them (actually logindmux) to do FLUSHDATAs also.
			 */
			*mp->b_rptr = rw & ~FLUSHW;
			qreply(q, mp);
		} else {
			freemsg(mp);
		}
		if (rw & FLUSHW) {
			/*
			 * Since all telnet protocol data comes from the
			 * daemon, stored as M_CTL messages, flushq will
			 * do exactly what's needed:  Flush bytes which do
			 * not have telnet protocol data.
			 */
			flushq(q, FLUSHDATA);
		}
		break;

	case M_PCPROTO:
		putnext(q, mp);
		break;

	case M_PROTO:
		/* We may receive T_DISCON_REQ from the mux */
		if (!canputnext(q) || q->q_first != NULL)
			(void) putq(q, mp);
		else
			putnext(q, mp);
		break;

	default:
#ifdef DEBUG
		cmn_err(CE_NOTE,
		    "telmodwput: unexpected msg type 0x%x",
		    mp->b_datap->db_type);
#endif
		freemsg(mp);
		break;
	}
}
示例#27
0
/*
 * cvc_wput()
 *	cn driver does a strwrite of console output data to rconsvp which has
 *	been set by consconfig. The data enters the cvc stream at the streamhead
 *	and flows thru ttycompat and ldterm which have been pushed on the
 *	stream.  Console output data gets sent out either to cvcredir, if the
 *	network path is available and selected, or to IOSRAM otherwise.  Data is
 *	sent to cvcredir via its read queue (cvcoutput_q, which gets set in
 *	cvc_register()).  If the IOSRAM path is selected, or if previous mblks
 *	are currently queued up for processing, the new mblk will be queued
 *	and handled later on by cvc_wsrv.
 */
static int
cvc_wput(queue_t *q, mblk_t *mp)
{
	int		error = 0;

	rw_enter(&cvclock, RW_READER);

	CVC_DBG2(CVC_DBG_WPUT, "mp 0x%x db_type 0x%x",
	    mp, mp->b_datap->db_type);

	switch (mp->b_datap->db_type) {

		case M_IOCTL:
		case M_CTL: {
			struct iocblk *iocp = (struct iocblk *)mp->b_rptr;

			switch (iocp->ioc_cmd) {
				/*
				 * These ioctls are only supposed to be
				 * processed after everything else that is
				 * already queued awaiting processing, so throw
				 * them on the queue and let cvc_wsrv handle
				 * them.
				 */
				case TCSETSW:
				case TCSETSF:
				case TCSETAW:
				case TCSETAF:
				case TCSBRK:
					putq(q, mp);
					break;

				default:
					cvc_ioctl(q, mp);
			}
			break;
		}

		case M_FLUSH:
			if (*mp->b_rptr & FLUSHW) {
				/*
				 * Flush our write queue.
				 */
				flushq(q, FLUSHDATA);
				*mp->b_rptr &= ~FLUSHW;
			}
			if (*mp->b_rptr & FLUSHR) {
				flushq(RD(q), FLUSHDATA);
				qreply(q, mp);
			} else
				freemsg(mp);
			break;

		case M_STOP:
			cvc_stopped = 1;
			freemsg(mp);
			break;

		case M_START:
			cvc_stopped = 0;
			freemsg(mp);
			qenable(q);  /* Start up delayed messages */
			break;

		case M_READ:
			/*
			 * ldterm handles this (VMIN/VTIME processing).
			 */
			freemsg(mp);
			break;

		default:
			cmn_err(CE_WARN, "cvc_wput: unexpected mblk type - mp ="
			    " 0x%p, type = 0x%x", mp, mp->b_datap->db_type);
			freemsg(mp);
			break;

		case M_DATA:
			/*
			 * If there are other mblks queued up for transmission,
			 * or we're using IOSRAM either because cvcredir hasn't
			 * registered yet or because we were configured that
			 * way, or cvc has been stopped or suspended, place this
			 * mblk on the input queue for future processing.
			 * Otherwise, hand it off to cvcredir for transmission
			 * via the network.
			 */
			if (q->q_first != NULL || cvcoutput_q == NULL ||
			    via_iosram || cvc_stopped == 1 ||
			    cvc_suspended == 1) {
				(void) putq(q, mp);
			} else {
				/*
				 * XXX - should canputnext be called here?
				 * Starfire's cvc doesn't do that, and it
				 * appears to work anyway.
				 */
				(void) putnext(cvcoutput_q, mp);
			}
			break;

	}
	rw_exit(&cvclock);
	return (error);
}
示例#28
0
/*
 * sppp_dlunitdatareq()
 *
 * MT-Perimeters:
 *    shared inner, shared outer.
 *
 * Description:
 *    Handle DL_UNITDATA_REQ request, called by sppp_mproto. This procedure
 *    gets called for M_PROTO (DLPI) style of transmission. The fact that we
 *    have acknowledged IP's fastpath probing (DL_IOC_HDR_INFO) does not
 *    guarantee that IP will always transmit via M_DATA, and it merely implies
 *    that such situation _may_ happen. In other words, IP may decide to use
 *    M_PROTO (DLPI) for data transmission should it decide to do so.
 *    Therefore, we should never place any restrictions or checks against
 *    streams marked with SPS_FASTPATH, since it is legal for this procedure
 *    to be entered with or without the bit set.
 */
static int
sppp_dlunitdatareq(queue_t *q, mblk_t *mp, spppstr_t *sps)
{
	sppa_t		*ppa;
	mblk_t		*hdrmp;
	mblk_t		*pktmp;
	dl_unitdata_req_t *dludp;
	int		dladdroff;
	int		dladdrlen;
	int		msize;
	int		error = 0;
	boolean_t	is_promisc;

	ASSERT(q != NULL && q->q_ptr != NULL);
	ASSERT(mp != NULL && mp->b_rptr != NULL);
	ASSERT((MTYPE(mp) == M_PCPROTO) || (MTYPE(mp) == M_PROTO));
	dludp = (dl_unitdata_req_t *)mp->b_rptr;
	dladdroff = dludp->dl_dest_addr_offset;
	dladdrlen = dludp->dl_dest_addr_length;
	ASSERT(sps != NULL);
	ASSERT(!IS_SPS_PIOATTACH(sps));
	ASSERT(sps->sps_dlstate == DL_IDLE);
	ASSERT(q->q_ptr == sps);
	/*
	 * If this stream is not attached to any ppas, then discard data
	 * coming down through this stream.
	 */
	ppa = sps->sps_ppa;
	if (ppa == NULL) {
		DBGERROR((CE_CONT, "DLPI unitdata: no attached ppa\n"));
		error = ENOLINK;
	} else if (mp->b_cont == NULL) {
		DBGERROR((CE_CONT, "DLPI unitdata: missing data\n"));
		error = EPROTO;
	}
	if (error != 0) {
		dluderrorind(q, mp, mp->b_rptr + dladdroff, dladdrlen,
		    DL_BADDATA, error);
		return (0);
	}
	ASSERT(mp->b_cont->b_rptr != NULL);
	/*
	 * Check if outgoing packet size is larger than allowed. We use
	 * msgdsize to count all of M_DATA blocks in the message.
	 */
	msize = msgdsize(mp);
	if (msize > ppa->ppa_mtu) {
		/* Log, and send it anyway */
		mutex_enter(&ppa->ppa_sta_lock);
		ppa->ppa_otoolongs++;
		mutex_exit(&ppa->ppa_sta_lock);
	}
	if (IS_SPS_KDEBUG(sps)) {
		SPDEBUG(PPP_DRV_NAME
		    "/%d: DL_UNITDATA_REQ (%d bytes) sps=0x%p flags=0x%b "
		    "ppa=0x%p flags=0x%b\n", sps->sps_mn_id, msize,
		    (void *)sps, sps->sps_flags, SPS_FLAGS_STR,
		    (void *)ppa, ppa->ppa_flags, PPA_FLAGS_STR);
	}
	/* Allocate a message (M_DATA) to contain PPP header bytes. */
	if ((hdrmp = allocb(PPP_HDRLEN, BPRI_MED)) == NULL) {
		mutex_enter(&ppa->ppa_sta_lock);
		ppa->ppa_allocbfail++;
		mutex_exit(&ppa->ppa_sta_lock);
		DBGERROR((CE_CONT,
		    "DLPI unitdata: can't allocate header buffer\n"));
		dluderrorind(q, mp, mp->b_rptr + dladdroff, dladdrlen,
		    DL_SYSERR, ENOSR);
		return (0);
	}
	/*
	 * Should there be any promiscuous stream(s), send the data up
	 * for each promiscuous stream that we recognize.
	 */
	rw_enter(&ppa->ppa_sib_lock, RW_READER);
	is_promisc = ppa->ppa_promicnt;
	if (is_promisc) {
		ASSERT(ppa->ppa_streams != NULL);
		sppp_dlprsendup(ppa->ppa_streams, mp->b_cont, sps->sps_sap,
		    B_FALSE);
	}
	rw_exit(&ppa->ppa_sib_lock);
	/* Discard DLPI header and keep only IP payload (mp->b_cont). */
	pktmp = mp->b_cont;
	mp->b_cont = NULL;
	freemsg(mp);
	mp = hdrmp;

	*(uchar_t *)mp->b_wptr++ = PPP_ALLSTATIONS;
	*(uchar_t *)mp->b_wptr++ = PPP_UI;
	*(uchar_t *)mp->b_wptr++ = ((uint16_t)sps->sps_sap >> 8) & 0xff;
	*(uchar_t *)mp->b_wptr++ = ((uint16_t)sps->sps_sap) & 0xff;
	ASSERT(MBLKL(mp) == PPP_HDRLEN);

	linkb(mp, pktmp);
	/*
	 * Only time-stamp the packet with hrtime if the upper stream
	 * is configured to do so.
	 */
	if (IS_PPA_TIMESTAMP(ppa)) {
		ppa->ppa_lasttx = gethrtime();
	}
	/*
	 * Just put this back on the queue and allow the write service
	 * routine to handle it.  We're nested too deeply here to
	 * rewind the stack sufficiently to prevent overflow.  This is
	 * the slow path anyway.
	 */
	if (putq(q, mp) == 0) {
		mutex_enter(&ppa->ppa_sta_lock);
		ppa->ppa_oqdropped++;
		mutex_exit(&ppa->ppa_sta_lock);
		freemsg(mp);
	} else {
		qenable(q);
	}
	return (0);
}