Exemplo n.º 1
0
static void winsnd_write_postprocess(MSFilter *f){
	WinSnd *d=(WinSnd*)f->data;
	MMRESULT mr;
	int i;
	if (d->outdev==NULL) return;
	mr=waveOutReset(d->outdev);
	if (mr != MMSYSERR_NOERROR){
		ms_error("waveOutReset() error");
		return ;
	}
	for(i=0;i<WINSND_OUT_NBUFS;++i){
		WAVEHDR *hdr=&d->hdrs_write[i];
		mblk_t *old;
		if (hdr->dwFlags & WHDR_DONE){
			mr=waveOutUnprepareHeader(d->outdev,hdr,sizeof(*hdr));
			if (mr != MMSYSERR_NOERROR){
				ms_error("waveOutUnprepareHeader error");
			}
			old=(mblk_t*)hdr->dwUser;
			if (old) freemsg(old);
			hdr->dwUser=0;
		}
	}
	mr=waveOutClose(d->outdev);
	if (mr != MMSYSERR_NOERROR){
		ms_error("waveOutClose() error");
		return ;
	}
	ms_message("Shutting down sound device (playing: %i) (d->write_rq.q_mcount=%i) (input-output: %i) (notplayed: %i)", d->nbufs_playing, d->write_rq.q_mcount, d->stat_input - d->stat_output, d->stat_notplayed);
	flushq(&d->write_rq,0);
	d->ready=0;
	d->workaround=0;

#ifndef DISABLE_SPEEX
	if (d->pst!=NULL)
	    speex_preprocess_state_destroy(d->pst);
	d->pst=NULL;
	d->pst_frame_size=0;
#endif
}
Exemplo n.º 2
0
STATIC streamscall int
srvmod_rput(queue_t *q, mblk_t *mp)
{
	if (unlikely(mp->b_datap->db_type == M_FLUSH)) {
		if (mp->b_rptr[0] & FLUSHR) {
			if (mp->b_rptr[0] & FLUSHBAND)
				flushband(q, mp->b_rptr[1], FLUSHDATA);
			else
				flushq(q, FLUSHDATA);
		}
	}
	if (likely(mp->b_datap->db_type >= QPCTL || (q->q_first == NULL && !(q->q_flag & QSVCBUSY)
						     && bcanputnext(q, mp->b_band)))) {
		putnext(q, mp);
		return (0);
	}
	if (unlikely(putq(q, mp) == 0)) {
		mp->b_band = 0;
		putq(q, mp);	/* this must succeed */
	}
	return (0);
}
Exemplo n.º 3
0
static void v4m_process(MSFilter * obj){
	V4lState *s=(V4lState*)obj->data;
	uint32_t timestamp;
	int cur_frame;
	if (s->frame_count==-1){
		s->start_time=obj->ticker->time;
		s->frame_count=0;
	}
	if (s->seqgrab!=NULL)
	{
	  SGIdle(s->seqgrab);
	}

	cur_frame=((obj->ticker->time-s->start_time)*s->fps/1000.0);
	if (cur_frame>=s->frame_count){
		mblk_t *om=NULL;
		ms_mutex_lock(&s->mutex);
		/*keep the most recent frame if several frames have been captured */
		if (s->seqgrab!=NULL){
			om=getq(&s->rq);
		}else{
			if (s->usemire){
				om=dupmsg(v4m_make_mire(s));
			}else {
				mblk_t *tmpm=v4m_make_nowebcam(s);
				if (tmpm) om=dupmsg(tmpm);
			}
		}
		ms_mutex_unlock(&s->mutex);
		if (om!=NULL){
			timestamp=obj->ticker->time*90;/* rtp uses a 90000 Hz clockrate for video*/
			mblk_set_timestamp_info(om,timestamp);
			mblk_set_marker_info(om,TRUE);
			ms_queue_put(obj->outputs[0],om);
			/*ms_message("picture sent");*/
			s->frame_count++;
		}
	}else flushq(&s->rq,0);
}
Exemplo n.º 4
0
static void
handle_mflush(queue_t *qp, mblk_t *mp)
{
	mblk_t *nmp;
	DBG1("M_FLUSH on %s side", zc_side(qp));

	if (*mp->b_rptr & FLUSHW) {
		DBG1("M_FLUSH, FLUSHW, %s side", zc_side(qp));
		flushq(qp, FLUSHDATA);
		*mp->b_rptr &= ~FLUSHW;
		if ((*mp->b_rptr & FLUSHR) == 0) {
			/*
			 * FLUSHW only. Change to FLUSHR and putnext other side,
			 * then we are done.
			 */
			*mp->b_rptr |= FLUSHR;
			if (zc_switch(RD(qp)) != NULL) {
				putnext(zc_switch(RD(qp)), mp);
				return;
			}
		} else if ((zc_switch(RD(qp)) != NULL) &&
		    (nmp = copyb(mp)) != NULL) {
			/*
			 * It is a FLUSHRW; we copy the mblk and send
			 * it to the other side, since we still need to use
			 * the mblk in FLUSHR processing, below.
			 */
			putnext(zc_switch(RD(qp)), nmp);
		}
	}

	if (*mp->b_rptr & FLUSHR) {
		DBG("qreply(qp) turning FLUSHR around\n");
		qreply(qp, mp);
		return;
	}
	freemsg(mp);
}
Exemplo n.º 5
0
STATIC streamscall int
srvmod_wput(queue_t *q, mblk_t *mp)
{
	if (unlikely(mp->b_datap->db_type == M_FLUSH)) {
		if (mp->b_rptr[0] & FLUSHW) {
			if (mp->b_rptr[0] & FLUSHBAND)
				flushband(q, mp->b_rptr[1], FLUSHDATA);
			else
				flushq(q, FLUSHDATA);
		}
	}
	if (likely(mp->b_datap->db_type >= QPCTL || (q->q_first == NULL && !(q->q_flag & QSVCBUSY)
						     && bcanputnext(q, mp->b_band)))) {
		putnext(q, mp);
		return (0);
	}
	/* always buffer, always schedule out of service procedure for testing */
	if (unlikely(putq(q, mp) == 0)) {
		mp->b_band = 0;
		putq(q, mp);	/* this must succeed */
	}
	return (0);
}
Exemplo n.º 6
0
static void winsnd_read_postprocess(MSFilter *f){
	WinSnd *d=(WinSnd*)f->data;
	MMRESULT mr;
	int i;
#ifndef _TRUE_TIME
	ms_ticker_set_time_func(f->ticker,NULL,NULL);
#endif
	d->running=FALSE;
	mr=waveInStop(d->indev);
	if (mr != MMSYSERR_NOERROR){
		ms_error("waveInStop() error");
		return ;
	}
	mr=waveInReset(d->indev);
	if (mr != MMSYSERR_NOERROR){
		ms_error("waveInReset() error");
		return ;
	}
	for(i=0;i<WINSND_NBUFS;++i){
		WAVEHDR *hdr=&d->hdrs_read[i];
		if (hdr->dwFlags & WHDR_PREPARED)
		{
			mr = waveInUnprepareHeader(d->indev,hdr,sizeof (*hdr));
			if (mr != MMSYSERR_NOERROR){
				ms_error("waveInUnPrepareHeader() error");
			}
		}
	}
	mr = waveInClose(d->indev);
	if (mr != MMSYSERR_NOERROR){
		ms_error("waveInClose() error");
		return ;
	}

	ms_message("Shutting down sound device (playing: %i) (input-output: %i) (notplayed: %i)", d->nbufs_playing, d->stat_input - d->stat_output, d->stat_notplayed);
	flushq(&d->rq,0);
}
Exemplo n.º 7
0
static void au_read_uninit(MSFilter *f) {
    AURead *d = (AURead *) f->data;
    flushq(&d->rq,0);
    au_common_uninit(&d->common);
    ms_free(d);
}
Exemplo n.º 8
0
static void vfw_postprocess(MSFilter * obj){
	VfwState *s=(VfwState*)obj->data;
	vfw_engine_stop_capture(s->eng);
	flushq(&s->rq,0);
}
Exemplo n.º 9
0
/*
 * cvc_wput()
 *	cn driver does a strwrite of console output data to rconsvp which has
 *	been set by consconfig. The data enters the cvc stream at the streamhead
 *	and flows thru ttycompat and ldterm which have been pushed on the
 *	stream.  Console output data gets sent out either to cvcredir, if the
 *	network path is available and selected, or to IOSRAM otherwise.  Data is
 *	sent to cvcredir via its read queue (cvcoutput_q, which gets set in
 *	cvc_register()).  If the IOSRAM path is selected, or if previous mblks
 *	are currently queued up for processing, the new mblk will be queued
 *	and handled later on by cvc_wsrv.
 */
static int
cvc_wput(queue_t *q, mblk_t *mp)
{
	int		error = 0;

	rw_enter(&cvclock, RW_READER);

	CVC_DBG2(CVC_DBG_WPUT, "mp 0x%x db_type 0x%x",
	    mp, mp->b_datap->db_type);

	switch (mp->b_datap->db_type) {

		case M_IOCTL:
		case M_CTL: {
			struct iocblk *iocp = (struct iocblk *)mp->b_rptr;

			switch (iocp->ioc_cmd) {
				/*
				 * These ioctls are only supposed to be
				 * processed after everything else that is
				 * already queued awaiting processing, so throw
				 * them on the queue and let cvc_wsrv handle
				 * them.
				 */
				case TCSETSW:
				case TCSETSF:
				case TCSETAW:
				case TCSETAF:
				case TCSBRK:
					putq(q, mp);
					break;

				default:
					cvc_ioctl(q, mp);
			}
			break;
		}

		case M_FLUSH:
			if (*mp->b_rptr & FLUSHW) {
				/*
				 * Flush our write queue.
				 */
				flushq(q, FLUSHDATA);
				*mp->b_rptr &= ~FLUSHW;
			}
			if (*mp->b_rptr & FLUSHR) {
				flushq(RD(q), FLUSHDATA);
				qreply(q, mp);
			} else
				freemsg(mp);
			break;

		case M_STOP:
			cvc_stopped = 1;
			freemsg(mp);
			break;

		case M_START:
			cvc_stopped = 0;
			freemsg(mp);
			qenable(q);  /* Start up delayed messages */
			break;

		case M_READ:
			/*
			 * ldterm handles this (VMIN/VTIME processing).
			 */
			freemsg(mp);
			break;

		default:
			cmn_err(CE_WARN, "cvc_wput: unexpected mblk type - mp ="
			    " 0x%p, type = 0x%x", mp, mp->b_datap->db_type);
			freemsg(mp);
			break;

		case M_DATA:
			/*
			 * If there are other mblks queued up for transmission,
			 * or we're using IOSRAM either because cvcredir hasn't
			 * registered yet or because we were configured that
			 * way, or cvc has been stopped or suspended, place this
			 * mblk on the input queue for future processing.
			 * Otherwise, hand it off to cvcredir for transmission
			 * via the network.
			 */
			if (q->q_first != NULL || cvcoutput_q == NULL ||
			    via_iosram || cvc_stopped == 1 ||
			    cvc_suspended == 1) {
				(void) putq(q, mp);
			} else {
				/*
				 * XXX - should canputnext be called here?
				 * Starfire's cvc doesn't do that, and it
				 * appears to work anyway.
				 */
				(void) putnext(cvcoutput_q, mp);
			}
			break;

	}
	rw_exit(&cvclock);
	return (error);
}
Exemplo n.º 10
0
STATIC streamscall int
ip2xinet_lrput(queue_t *q, mblk_t *mp)
{
	struct iocblk *iocp;
	union DL_primitives *dp;
	struct ip2xinet_priv *privptr;
	struct net_device *dev;
	int i;

	spin_lock(&ip2xinet_lock);

	/* use the first open ip device */
	for (i = 0; i < NUMIP2XINET; i++) {
		privptr = &ip2xinet_devs[i].priv;

		if (privptr->state == 1)
			break;
	}
	if (i == NUMIP2XINET)
		i = 0;		/* All devices closed, pick the 1st one */
	/* send data up to ip through the 1st open device */
	dev = &ip2xinet_devs[i].dev;

	switch (mp->b_datap->db_type) {
	case M_CTL:
		freemsg(mp);
		break;

	case M_DATA:
		/* NOTE: We don't expect any M_DATA messages from xinet */
		freemsg(mp);
		break;

	case M_PROTO:
	case M_PCPROTO:
		dp = (union DL_primitives *) mp->b_rptr;

#if 0
#ifdef DEBUG
		printk("ip2xinet_lrput: %s size=%d\n", x25dbdlpmsg(dp->dl_primitive),
		       x25dbmsgsize(mp));
#endif
#endif

		switch (dp->dl_primitive) {
		case DL_BIND_ACK:

			/* if we're in in BNDPND and receive a BIND_ACK we go to IDLE */
			ip2xinet_status.ip2x_dlstate = DL_IDLE;

			/* If we're DL_IDLE, then dev is open and the kernel can transmit */
			for (i = 0; i < NUMIP2XINET; i++) {
				privptr = &ip2xinet_devs[i].priv;

				if (privptr->state == 1)
					netif_start_queue(&(ip2xinet_devs[i].dev));
			}
			freemsg(mp);	/* Frees bind_ack no longer needed */
			break;

		case DL_INFO_ACK:

			/* NOTE: currently we don't send info_req to xinet */

			freemsg(mp);
			break;

		case DL_ERROR_ACK:
			switch (ip2xinet_status.ip2x_dlstate) {
			case DL_ATTACH_PENDING:
				/* if we receive ERROR_ACK and we're in ATTACH_PEND go into
				   UNATTACHED */
				ip2xinet_status.ip2x_dlstate = DL_UNATTACHED;
				freemsg(mp);
				break;

			case DL_BIND_PENDING:
				/* if we're in BNDPND and receive an ERR ack we go to UNBND, */
				ip2xinet_status.ip2x_dlstate = DL_UNBOUND;
				freemsg(mp);
				break;

			case DL_UNBIND_PENDING:
				/* If we're in UNBIND_PEND and we receive ERROR_ACK we go into IDLE 
				 */
				ip2xinet_status.ip2x_dlstate = DL_IDLE;
				freemsg(mp);
				break;

			case DL_DETACH_PENDING:
				/* If we're in DETACH_PEND and receive and ERROR_ACK we go into
				   UNBND */
				ip2xinet_status.ip2x_dlstate = DL_UNBOUND;
				freemsg(mp);
				break;
			default:
				freemsg(mp);
				break;

			}
			break;

		case DL_UNITDATA_IND:
			/* if we're in IDLE we can get DL_UNITDATA_IND with data and call the guy
			   who would normally receive data from interrupt handler. */

			/* Check state: can't transmit if dev is closed :-) Note: we have to check
			   both the dlpi state and dev->start because during a close the DLPI state 
			   could remain DL_IDLE if we couldn't allocate mblk for UNBIND_REQ. There
			   are many ways in which the dev->start could be 1 but dlpi state - not
			   DL_IDLE. */
			if (ip2xinet_status.ip2x_dlstate == DL_IDLE && privptr->state == 1)
			{
				mblk_t *newmp;
				unsigned char *buf;
				int len, tmplen;
				struct ethhdr *eth;
				struct sk_buff *skb;

				newmp = unlinkb(mp);

				freemsg(mp);
				mp = newmp;

				/* 1st pass through.  figure out the len */
				for (len = sizeof(struct ethhdr); newmp != NULL;
				     newmp = newmp->b_cont)
					len += (newmp->b_wptr - newmp->b_rptr);

				/* ALLOCATE skb of length len+2, COPY from mp chain to skb */

				skb = dev_alloc_skb(len + 2);
				if (!skb) {
					printk("ip2xinet rx: failed to allocate an skb\n");
					freemsg(mp);
					break;
				}
				skb_reserve(skb, 2);	/* align IP on 16B boundary */
				/* The packet has been retrieved from the transmission medium.
				   Build an skb around it, so upper layers can handle it */
				buf = skb_put(skb, len);
				for (newmp = mp, tmplen = sizeof(struct ethhdr); newmp != NULL;
				     newmp = newmp->b_cont) {
					bcopy(newmp->b_rptr, buf + tmplen,
					      newmp->b_wptr - newmp->b_rptr);
					tmplen += (newmp->b_wptr - newmp->b_rptr);
				}
				eth = (struct ethhdr *) buf;

				/* I am not sure it's necessary, but just in case... */

				memcpy(eth->h_source, dev->dev_addr, dev->addr_len);
				memcpy(eth->h_dest, dev->dev_addr, dev->addr_len);
				eth->h_proto = 0x8;	/* ETH_P_IP in network order */
				eth->h_source[ETH_ALEN - 1] ^= 0x01;	/* say src is us xor 1 */

				/* send it to ip2xinet_rx for handling */
				ip2xinet_rx(dev, skb);
			}
			freemsg(mp);
			break;
		case DL_UDERROR_IND:
			freemsg(mp);
			break;

		case DL_OK_ACK:
			switch (dp->ok_ack.dl_correct_primitive) {

			case DL_ATTACH_REQ:
				/* if we're in ATTACH_PEND and we received OK_ACK1 change state to
				   UNBND */
				ip2xinet_status.ip2x_dlstate = DL_UNBOUND;
				freemsg(mp);
				/* We just completed building up the X.25 stack below us. If IP is
				   already above us, we need to send down the bind that we would
				   normally do when IP opens us.  This allows us to restart the
				   X.25 stack without restarting TCP/IP. */
				if (ip2xinet_num_ip_opened != 0)
					ip2xinet_send_down_bind(WR(q));
				break;

			case DL_UNBIND_REQ:
				/* If we're in UNBIND_PEND and receive OK_ACK1 we go to UNBND. */
				ip2xinet_status.ip2x_dlstate = DL_UNBOUND;
				freemsg(mp);
				break;

			case DL_DETACH_REQ:
				/* If we're in DETACH_PEND and receive OK_ACK1 we go to UNATT */
				ip2xinet_status.ip2x_dlstate = DL_UNATTACHED;
				freemsg(mp);
				break;

			default:
				freemsg(mp);
				break;
			}
			break;

		default:
			printk("ip2xinet_lrput: bad prim=0x%lx", (ulong) dp->dl_primitive);
			freemsg(mp);
			break;
		}
		break;

	case M_FLUSH:
		if (mp->b_rptr[0] & FLUSHR) {
			if (mp->b_rptr[0] & FLUSHBAND)
				flushband(q, mp->b_rptr[1], FLUSHDATA);
			else
				flushq(q, FLUSHDATA);
			qenable(q);
		}
		if (mp->b_rptr[0] & FLUSHW) {
			mp->b_rptr[0] &= ~FLUSHR;
			if (mp->b_rptr[0] & FLUSHBAND)
				flushband(WR(q), mp->b_rptr[1], FLUSHDATA);
			else
				flushq(WR(q), FLUSHDATA);
			qenable(WR(q));
			if (!putq(WR(q), mp)) {
				mp->b_band = 0;
				putq(WR(q), mp);
			}
		} else
			freemsg(mp);
		break;

	case M_HANGUP:
		/* send it to the guy that linked us up, what he does is his problem. */
		if (!putq(ip2xinet_status.readq, mp)) {
			mp->b_band = 0;
			putq(ip2xinet_status.readq, mp);
		}
		break;

	case M_IOCACK:
		iocp = (struct iocblk *) mp->b_rptr;
		if (iocp->ioc_cmd == SIOCSIFMTU) {
			/* The set MTU ioctl was a success Rejoice :-) */
			freemsg(mp);
		} else if (!putq(ip2xinet_status.readq, mp)) {
			mp->b_band = 0;
			putq(ip2xinet_status.readq, mp);
		}
		break;

	case M_IOCNAK:
		iocp = (struct iocblk *) mp->b_rptr;
		if (iocp->ioc_cmd == SIOCSIFMTU) {
			/* The set MTU ioctl was a failure From looking at xinet code this is *
			   impossible, so ignore it */

			freemsg(mp);
		} else if (!putq(ip2xinet_status.readq, mp)) {
			mp->b_band = 0;
			putq(ip2xinet_status.readq, mp);
		}
		break;

	default:
		printk("ip2xinet_lrput: bad type=%d", mp->b_datap->db_type);
		freemsg(mp);
		break;
	}

	spin_unlock(&ip2xinet_lock);
	return (0);
}
Exemplo n.º 11
0
static void
m3ua_wput(queue_t *q, mblk_t *mp)
{
	int err = EOPNOTSUPP;

	trace();
	if (q->q_count && mp->b_datap->db_type < QPCTL) {
		putq(q, mp);
		return;
	}
	switch (mp->b_datap->db_type) {
	case M_DATA:
		if ((err = m3ua_m_proto(q, mp)))
			break;
		return;
	case M_CTL:
	case M_PROTO:
	case M_PCPROTO:
		if ((err = m3ua_m_proto(q, mp)))
			break;
		return;
	case M_FLUSH:
		if (mp->b_rptr[0] & FLUSHW) {
			if (mp->b_rptr[0] & FLUSHBAND)
				flushband(q, mp->b_rptr[1], FLUSHDATA);
			else
				flushq(q, FLUSHDATA);
			if (q->q_next) {
				putnext(q, mp);
				return;
			}
			mp->b_rptr[0] &= ~FLUSHW;
		}
		if (mp->b_rptr[0] & FLUSHR) {
			if (mp->b_rptr[0] & FLUSHBAND)
				flushband(RD(1), mp->b_rptr[1], FLUSHDATA);
			else
				flushq(RD(q), FLUSHDATA);
			qreply(q, mp);
		} else
			break;
		return;
	case M_IOCTL:
		if ((err = ls_m_ioctl(q, mp)))
			break;
		return;
	}
	switch (err) {
	case EAGAIN:
		if (mp->b_datap->db_type < QPCTL) {
			putq(q, mp);
			return;
		}
		break;
	case EOPNTOSUPP:
		if (q->q_next) {
			putnext(q, mp);
			return;
		}
	}
	trace();
	freemsg(mp);
	return;
}
Exemplo n.º 12
0
static streamscall int
sad_put(queue_t *q, mblk_t *mp)
{
	struct sad *sad = q->q_ptr;
	union ioctypes *ioc;
	int err = 0, rval = 0, count = 0;
	mblk_t *dp = mp->b_cont;
	caddr_t sa_addr, sl_addr;
	size_t sa_size, sl_size;

	switch (mp->b_datap->db_type) {
	case M_FLUSH:
		if (mp->b_rptr[0] & FLUSHW) {
			if (mp->b_rptr[0] & FLUSHBAND)
				flushband(q, mp->b_rptr[1], FLUSHDATA);
			else
				flushq(q, FLUSHDATA);
			mp->b_rptr[0] &= ~FLUSHW;
		}
		if (mp->b_rptr[0] & FLUSHR) {
			queue_t *rq = RD(q);

			if (mp->b_rptr[0] & FLUSHBAND)
				flushband(rq, mp->b_rptr[1], FLUSHDATA);
			else
				flushq(rq, FLUSHDATA);
			qreply(q, mp);
			return (0);
		}
		break;
	case M_IOCTL:
		ioc = (typeof(ioc)) mp->b_rptr;
#ifdef WITH_32BIT_CONVERSION
		if (ioc->iocblk.ioc_flag == IOC_ILP32) {
			/* XXX: following pointer conversion does not work on all architectures. */
			sa_addr =
			    (caddr_t) (unsigned long) (uint32_t) *(unsigned long *) dp->b_rptr;
			sa_size = sizeof(struct strapush32);
			sl_addr = sa_addr;
			sl_size = sizeof(struct str_list32);
		} else
#endif
		{
			sa_addr = (caddr_t) *(unsigned long *) dp->b_rptr;
			sa_size = sizeof(struct strapush);
			sl_addr = sa_addr;
			sl_size = sizeof(struct str_list);
		}
		switch (ioc->iocblk.ioc_cmd) {
		case SAD_SAP:
			err = -EPERM;
#ifdef HAVE_KMEMB_STRUCT_CRED_UID_VAL
			if (ioc->iocblk.ioc_uid.val != 0)
				goto nak;
#else
			if (ioc->iocblk.ioc_uid != 0)
				goto nak;
#endif
			if (ioc->iocblk.ioc_count == TRANSPARENT) {
				mp->b_datap->db_type = M_COPYIN;
				ioc->copyreq.cq_addr = sa_addr;
				ioc->copyreq.cq_size = sa_size;
				ioc->copyreq.cq_flag = 0;
				ioc->copyreq.cq_private = (mblk_t *) ioc->copyreq.cq_addr;
				sad->transparent = 1;
				sad->iocstate = 1;
				qreply(q, mp);
				return (0);
			}
			sad->transparent = 0;
			sad->iocstate = 1;
			goto sad_sap_state1;
		case SAD_GAP:
			if (ioc->iocblk.ioc_count == TRANSPARENT) {
				mp->b_datap->db_type = M_COPYIN;
				ioc->copyreq.cq_addr = sa_addr;
				ioc->copyreq.cq_size = sa_size;
				ioc->copyreq.cq_flag = 0;
				ioc->copyreq.cq_private = (mblk_t *) ioc->copyreq.cq_addr;
				sad->transparent = 1;
				sad->iocstate = 1;
				qreply(q, mp);
				return (0);
			}
			sad->transparent = 0;
			sad->iocstate = 1;
			goto sad_gap_state1;
		case SAD_LAP:
			if (ioc->iocblk.ioc_count == TRANSPARENT) {
				mp->b_datap->db_type = M_COPYIN;
				ioc->copyreq.cq_addr = sa_addr;
				ioc->copyreq.cq_size = sa_size;
				ioc->copyreq.cq_flag = 0;
				ioc->copyreq.cq_private = (mblk_t *) ioc->copyreq.cq_addr;
				sad->transparent = 1;
				sad->iocstate = 1;
				qreply(q, mp);
				return (0);
			}
			sad->transparent = 0;
			sad->iocstate = 1;
			goto sad_lap_state1;
		case SAD_VML:
			if (ioc->iocblk.ioc_count == TRANSPARENT) {
				mp->b_datap->db_type = M_COPYIN;
				ioc->copyreq.cq_addr = sl_addr;
				ioc->copyreq.cq_size = sl_size;
				ioc->copyreq.cq_flag = 0;
				ioc->copyreq.cq_private = (mblk_t *) ioc->copyreq.cq_addr;
				sad->transparent = 1;
				sad->iocstate = 1;
				qreply(q, mp);
				return (0);
			}
			sad->transparent = 0;
			sad->iocstate = 1;
			goto sad_vml_state1;
		}
		err = -EINVAL;
		goto nak;
	case M_IOCDATA:
		ioc = (typeof(ioc)) mp->b_rptr;
		if (ioc->copyresp.cp_rval != (caddr_t) 0) {
			sad->transparent = 0;
			sad->iocstate = 0;
			goto abort;
		}
#ifdef WITH_32BIT_CONVERSION
		if (ioc->copyresp.cp_flag == IOC_ILP32) {
			sa_size = sizeof(struct strapush32);
			sl_size = sizeof(struct str_list32);
		} else
#endif
		{
			sa_size = sizeof(struct strapush);
			sl_size = sizeof(struct str_list);
		}
		switch (ioc->copyresp.cp_cmd) {
		case SAD_SAP:
			switch (sad->iocstate) {
			case 1:
			      sad_sap_state1:
				err = -EFAULT;
				if (!dp || dp->b_wptr < dp->b_rptr + sa_size)
					goto nak;
#ifdef WITH_32BIT_CONVERSION
				if (ioc->copyresp.cp_flag == IOC_ILP32) {
					struct strapush32 *sap32 = (typeof(sap32)) dp->b_rptr;
					struct strapush sa, *sap = &sa;

					sap32_convert(sap32, sap);
					if ((err = apush_set(sap)))
						goto nak;
					sap32_revert(sap, sap32);
				} else
#endif
				{
					struct strapush *sap = (typeof(sap)) dp->b_rptr;

					if ((err = apush_set(sap)))
						goto nak;
				}
				if (sad->transparent == 1) {
					mp->b_datap->db_type = M_COPYOUT;
					ioc->copyreq.cq_addr = (caddr_t) ioc->copyresp.cp_private;
					ioc->copyreq.cq_size = sa_size;
					ioc->copyreq.cq_flag = 0;
					sad->transparent = 1;
					sad->iocstate = 2;
					qreply(q, mp);
					return (0);
				}
				/* use implied I_STR copyout */
				count = sa_size;
				goto ack;
			case 2:
				/* done */
				goto ack;
			}
			err = -EIO;
			goto nak;
		case SAD_GAP:
			switch (sad->iocstate) {
			case 1:
			      sad_gap_state1:
				err = -EFAULT;
				if (!dp || dp->b_wptr < dp->b_rptr + sa_size)
					goto nak;
#ifdef WITH_32BIT_CONVERSION
				if (ioc->copyresp.cp_flag == IOC_ILP32) {
					struct strapush32 *sap32 = (typeof(sap32)) dp->b_rptr;
					struct strapush sa, *sap = &sa;

					sap32_convert(sap32, sap);
					if ((err = apush_get(sap)))
						goto nak;
					sap32_revert(sap, sap32);
				} else
#endif
				{
					struct strapush *sap;

					sap = (typeof(sap)) dp->b_rptr;
					if ((err = apush_get(sap)))
						goto nak;
				}
				if (sad->transparent == 1) {
					mp->b_datap->db_type = M_COPYOUT;
					ioc->copyreq.cq_addr = (caddr_t) ioc->copyresp.cp_private;
					ioc->copyreq.cq_size = sa_size;
					ioc->copyreq.cq_flag = 0;
					sad->transparent = 1;
					sad->iocstate = 2;
					qreply(q, mp);
					return (0);
				}
				/* use implied I_STR copyout */
				count = sa_size;
				goto ack;
			case 2:
				/* done */
				goto ack;
			}
			err = -EIO;
			goto nak;
		case SAD_LAP:
			switch (sad->iocstate) {
			case 1:
			      sad_lap_state1:
				err = -EFAULT;
				if (!dp || dp->b_wptr < dp->b_rptr + sa_size)
					goto nak;
#ifdef WITH_32BIT_CONVERSION
				if (ioc->copyresp.cp_flag == IOC_ILP32) {
					struct strapush32 *sap32 = (typeof(sap32)) dp->b_rptr;
					struct strapush sa, *sap = &sa;

					sap32_convert(sap32, sap);
					if ((err = apush_lst(sap)))
						goto nak;
					sap32_revert(sap, sap32);
				} else
#endif
				{
					struct strapush *sap;

					sap = (typeof(sap)) dp->b_rptr;
					if ((err = apush_lst(sap)))
						goto nak;
				}
				if (sad->transparent == 1) {
					mp->b_datap->db_type = M_COPYOUT;
					ioc->copyreq.cq_addr = (caddr_t) ioc->copyresp.cp_private;
					ioc->copyreq.cq_size = sa_size;
					ioc->copyreq.cq_flag = 0;
					sad->transparent = 1;
					sad->iocstate = 2;
					qreply(q, mp);
					return (0);
				}
				/* use implied I_STR copyout */
				count = sa_size;
				goto ack;
			case 2:
				/* done */
				goto ack;
			}
			err = -EIO;
			goto nak;
		case SAD_VML:
			switch (sad->iocstate) {
			case 1:
			      sad_vml_state1:
				err = -EFAULT;
				if (!dp || dp->b_wptr < dp->b_rptr + sl_size)
					goto nak;
#ifdef WITH_32BIT_CONVERSION
				if (ioc->copyresp.cp_flag == IOC_ILP32) {
					struct str_list32 *slp32 = (typeof(slp32)) dp->b_rptr;

					sad->sl.sl_nmods = slp32->sl_nmods;
					sad->sl.sl_modlist =
					    (struct str_mlist *) (unsigned long) slp32->sl_modlist;
				} else
#endif
				{
					struct str_list *slp = (typeof(slp)) dp->b_rptr;

					sad->sl.sl_nmods = slp->sl_nmods;
					sad->sl.sl_modlist = slp->sl_modlist;
				}
				err = -EINVAL;
				if (1 > sad->sl.sl_nmods || sad->sl.sl_nmods > MAXAPUSH)
					goto nak;
				mp->b_datap->db_type = M_COPYIN;
				ioc->copyreq.cq_addr = (caddr_t) sad->sl.sl_modlist;
				ioc->copyreq.cq_size = sad->sl.sl_nmods * sizeof(struct str_mlist);
				ioc->copyreq.cq_flag = 0;
				sad->iocstate = 2;
				qreply(q, mp);
				return (0);
			case 2:
				err = -EFAULT;
				if (!dp || dp->b_wptr < dp->b_rptr
				    + sad->sl.sl_nmods * sizeof(struct str_mlist))
					goto nak;
				sad->sl.sl_modlist = (struct str_mlist *) dp->b_rptr;
				if ((err = apush_vml(&sad->sl)) < 0)
					goto nak;
				rval = err;
				goto ack;
			}
			err = -EIO;
			goto nak;
		}
	}
      abort:
	freemsg(mp);
	return (0);
      nak:
	sad->iocstate = 0;
	mp->b_datap->db_type = M_IOCNAK;
	ioc->iocblk.ioc_count = 0;
	ioc->iocblk.ioc_rval = -1;
	ioc->iocblk.ioc_error = -err;
	sad->transparent = 0;
	sad->iocstate = 0;
	qreply(q, mp);
	return (0);
      ack:
	sad->iocstate = 0;
	mp->b_datap->db_type = M_IOCACK;
	ioc->iocblk.ioc_count = count;
	ioc->iocblk.ioc_rval = rval;
	ioc->iocblk.ioc_error = 0;
	sad->transparent = 0;
	sad->iocstate = 0;
	qreply(q, mp);
	return (0);
}
Exemplo n.º 13
0
/*
 * dm2s_receive - Read all messages from the mailbox.
 *
 * This function is called from the read service procedure, to
 * receive the messages awaiting in the mailbox.
 */
void
dm2s_receive(dm2s_t *dm2sp)
{
	queue_t	*rq = dm2sp->ms_rq;
	mblk_t	*mp;
	int	ret;
	uint32_t len;

	DPRINTF(DBG_DRV, ("dm2s_receive: called\n"));
	ASSERT(dm2sp != NULL);
	ASSERT(MUTEX_HELD(&dm2sp->ms_lock));
	if (rq == NULL) {
		return;
	}
	/*
	 * As the number of messages in the mailbox are pretty limited,
	 * it is safe to process all messages in one loop.
	 */
	while (DM2S_MBOX_READY(dm2sp) && ((ret = scf_mb_canget(dm2sp->ms_target,
	    dm2sp->ms_key, &len)) == 0)) {
		DPRINTF(DBG_MBOX, ("dm2s_receive: mb_canget len=%d\n", len));
		if (len == 0) {
			break;
		}
		mp = allocb(len, BPRI_MED);
		if (mp == NULL) {
			DPRINTF(DBG_WARN, ("dm2s_receive: allocb failed\n"));
			/*
			 * Start a bufcall so that we can retry again
			 * when memory becomes available.
			 */
			dm2sp->ms_rbufcid = qbufcall(rq, len, BPRI_MED,
			    dm2s_bufcall_rcv, dm2sp);
			if (dm2sp->ms_rbufcid == 0) {
				DPRINTF(DBG_WARN,
				    ("dm2s_receive: qbufcall failed\n"));
				/*
				 * if bufcall fails, start a timeout to
				 * initiate a re-try after some time.
				 */
				DTRACE_PROBE1(dm2s_rqtimeout__start,
				    dm2s_t, dm2sp);
				dm2sp->ms_rq_timeoutid = qtimeout(rq,
				    dm2s_rq_timeout, (void *)dm2sp,
				    drv_usectohz(DM2S_SM_TOUT));
			}
			break;
		}

		/*
		 * Only a single scatter/gather element is enough here.
		 */
		dm2sp->ms_sg_rcv.msc_dptr = (caddr_t)mp->b_wptr;
		dm2sp->ms_sg_rcv.msc_len = len;
		DPRINTF(DBG_MBOX, ("dm2s_receive: calling getmsg\n"));
		ret = scf_mb_getmsg(dm2sp->ms_target, dm2sp->ms_key, len, 1,
		    &dm2sp->ms_sg_rcv, 0);
		DPRINTF(DBG_MBOX, ("dm2s_receive: getmsg ret=%d\n", ret));
		if (ret != 0) {
			freemsg(mp);
			break;
		}
		DMPBYTES("dm2s: Getmsg: ", len, 1, &dm2sp->ms_sg_rcv);
		mp->b_wptr += len;
		/*
		 * Queue the messages in the rq, so that the service
		 * procedure handles sending the messages up the stream.
		 */
		putq(rq, mp);
	}

	if ((!DM2S_MBOX_READY(dm2sp)) || (ret != ENOMSG && ret != EMSGSIZE)) {
		/*
		 * Some thing went wrong, flush pending messages
		 * and initiate a hangup.
		 * Note: flushing the wq initiates a faster close.
		 */
		mutex_exit(&dm2sp->ms_lock);
		flushq(WR(rq), FLUSHDATA);
		(void) putnextctl(rq, M_HANGUP);
		DTRACE_PROBE1(dm2s_hangup, dm2s_t, dm2sp);
		mutex_enter(&dm2sp->ms_lock);
		DPRINTF(DBG_WARN, ("dm2s_receive: encountered unknown "
		    "condition - hangup ret=%d\n", ret));
	}
}
Exemplo n.º 14
0
/*
 * dm2s_wput - Streams write side put routine.
 *
 * All M_DATA messages are queued so that they are transmitted in
 * the service procedure. This is done to simplify the streams
 * synchronization. Other messages are handled appropriately.
 */
int
dm2s_wput(queue_t *wq, mblk_t *mp)
{
	dm2s_t	*dm2sp = (dm2s_t *)wq->q_ptr;

	DPRINTF(DBG_DRV, ("dm2s_wput: called\n"));
	if (dm2sp == NULL) {
		return (ENODEV);   /* Can't happen. */
	}

	switch (mp->b_datap->db_type) {
	case (M_DATA):
		DPRINTF(DBG_DRV, ("dm2s_wput: M_DATA message\n"));
		while (mp->b_wptr == mp->b_rptr) {
			mblk_t *mp1;

			mp1 = unlinkb(mp);
			freemsg(mp);
			mp = mp1;
			if (mp == NULL) {
				return (0);
			}
		}

		/*
		 * Simply queue the message and handle it in the service
		 * procedure.
		 */
		(void) putq(wq, mp);
		qenable(wq);
		return (0);

	case (M_PROTO):
		DPRINTF(DBG_DRV, ("dm2s_wput: M_PROTO message\n"));
		/* We don't expect this */
		mp->b_datap->db_type = M_ERROR;
		mp->b_rptr = mp->b_wptr = mp->b_datap->db_base;
		*mp->b_wptr++ = EPROTO;
		qreply(wq, mp);
		return (EINVAL);

	case (M_IOCTL):
		DPRINTF(DBG_DRV, ("dm2s_wput: M_IOCTL message\n"));
		if (MBLKL(mp) < sizeof (struct iocblk)) {
			freemsg(mp);
			return (0);
		}
		/*
		 * No ioctls required to be supported by this driver, so
		 * return EINVAL for all ioctls.
		 */
		miocnak(wq, mp, 0, EINVAL);
		break;

	case (M_CTL):
		DPRINTF(DBG_DRV, ("dm2s_wput: M_CTL message\n"));
		/*
		 * No M_CTL messages need to supported by this driver,
		 * so simply ignore them.
		 */
		freemsg(mp);
		break;

	case (M_FLUSH):
		DPRINTF(DBG_DRV, (
		    "dm2s_wput: M_FLUSH message 0x%X\n", *mp->b_rptr));
		if (*mp->b_rptr & FLUSHW) {	/* Flush write-side */
			(void) scf_mb_flush(dm2sp->ms_target, dm2sp->ms_key,
			    MB_FLUSH_SEND);
			flushq(wq, FLUSHDATA);
			*mp->b_rptr &= ~FLUSHW;
		}
		if (*mp->b_rptr & FLUSHR) {
			(void) scf_mb_flush(dm2sp->ms_target, dm2sp->ms_key,
			    MB_FLUSH_RECEIVE);
			flushq(RD(wq), FLUSHDATA);
			qreply(wq, mp);
		} else {
			freemsg(mp);
		}
		break;

	default:
		DPRINTF(DBG_DRV, ("dm2s_wput: UNKNOWN message\n"));
		freemsg(mp);

	}
	return (0);
}
Exemplo n.º 15
0
/*
 * telmodwput:
 * M_DATA is processed and forwarded if we aren't stopped awaiting the daemon
 * to process something.  M_CTL's are data from the daemon bound for the
 * network.  We forward them immediately.  There are two classes of ioctl's
 * we must handle here also.  One is ioctl's forwarded by ptem which we
 * ignore.  The other is ioctl's issued by the daemon to control us.
 * Process them appropriately.  M_PROTO's we pass along, figuring they are
 * are TPI operations for TCP.  M_FLUSH requires careful processing, since
 * telnet cannot tolerate flushing its protocol requests.  Also the flushes
 * can be running either daemon<->TCP or application<->telmod.  We must
 * carefully deal with this.
 */
static void
telmodwput(
	queue_t *q,	/* Pointer to the read queue */
	mblk_t *mp)	/* Pointer to current message block */
{
	struct telmod_info	*tmip;
	struct iocblk *ioc;
	mblk_t *savemp;
	int rw;
	int error;

	tmip = (struct telmod_info *)q->q_ptr;

	switch (mp->b_datap->db_type) {
	case M_DATA:
		if (!canputnext(q) || (tmip->flags & TEL_STOPPED) ||
			(q->q_first)) {
			noenable(q);
			(void) putq(q, mp);
			break;
		}
		/*
		 * This routine parses data generating from ptm side.
		 * Insert a null character if carraige return
		 * is not followed by line feed unless we are in binary mode.
		 * Also, duplicate IAC if found in the data.
		 */
		(void) snd_parse(q, mp);
		break;

	case M_CTL:
		if (((mp->b_wptr - mp->b_rptr) == 1) &&
			(*(mp->b_rptr) == M_CTL_MAGIC_NUMBER)) {
			savemp = mp->b_cont;
			freeb(mp);
			mp = savemp;
		}
		putnext(q, mp);
		break;

	case M_IOCTL:
		ioc = (struct iocblk *)mp->b_rptr;
		switch (ioc->ioc_cmd) {

		/*
		 * This ioctl is issued by user level daemon to
		 * request one more message block to process protocol
		 */
		case TEL_IOC_GETBLK:
			if (!(tmip->flags & TEL_STOPPED)) {
				miocnak(q, mp, 0, EINVAL);
				break;
			}
			tmip->flags |= TEL_GETBLK;
			qenable(RD(q));
			enableok(RD(q));

			miocack(q, mp, 0, 0);
			break;

		/*
		 * This ioctl is issued by user level daemon to reenable the
		 * read and write queues. This is issued during startup time
		 * after setting up the mux links and also after processing
		 * the protocol.  It is also issued after each time an
		 * an unrecognized telnet option is forwarded to the daemon.
		 */
		case TEL_IOC_ENABLE:

			/*
			 * Send negative ack if TEL_STOPPED flag is not set
			 */
			if (!(tmip->flags & TEL_STOPPED)) {
				miocnak(q, mp, 0, EINVAL);
				break;
			}
			tmip->flags &= ~TEL_STOPPED;
			if (mp->b_cont) {
				(void) putbq(RD(q), mp->b_cont);
				mp->b_cont = 0;
			}

			qenable(RD(q));
			enableok(RD(q));
			qenable(q);
			enableok(q);

			miocack(q, mp, 0, 0);
			break;

		/*
		 * Set binary/normal mode for input and output
		 * according to the instructions from the daemon.
		 */
		case TEL_IOC_MODE:
			error = miocpullup(mp, sizeof (uchar_t));
			if (error != 0) {
				miocnak(q, mp, 0, error);
				break;
			}
			tmip->flags |= *(mp->b_cont->b_rptr) &
			    (TEL_BINARY_IN|TEL_BINARY_OUT);
			miocack(q, mp, 0, 0);
			break;

#ifdef DEBUG
		case TCSETAF:
		case TCSETSF:
		case TCSETA:
		case TCSETAW:
		case TCSETS:
		case TCSETSW:
		case TCSBRK:
		case TIOCSTI:
		case TIOCSWINSZ:
			miocnak(q, mp, 0, EINVAL);
			break;
#endif
		case CRYPTPASSTHRU:
			error = miocpullup(mp, sizeof (uchar_t));
			if (error != 0) {
				miocnak(q, mp, 0, error);
				break;
			}
			if (*(mp->b_cont->b_rptr) == 0x01)
				tmip->flags |= TEL_IOCPASSTHRU;
			else
				tmip->flags &= ~TEL_IOCPASSTHRU;

			miocack(q, mp, 0, 0);
			break;

		default:
			if (tmip->flags & TEL_IOCPASSTHRU) {
				putnext(q, mp);
			} else {
#ifdef DEBUG
				cmn_err(CE_NOTE,
				"telmodwput: unexpected ioctl type 0x%x",
					ioc->ioc_cmd);
#endif
				miocnak(q, mp, 0, EINVAL);
			}
			break;
		}
		break;

	case M_FLUSH:
		/*
		 * Flushing is tricky:  We try to flush all we can, but certain
		 * data cannot be flushed.  Telnet protocol sequences cannot
		 * be flushed.  So, TCP's queues cannot be flushed since we
		 * cannot tell what might be telnet protocol data.  Then we
		 * must take care to create and forward out-of-band data
		 * indicating the flush to the far side.
		 */
		rw = *mp->b_rptr;
		if (rw & FLUSHR) {
			/*
			 * We cannot flush our read queue, since there may
			 * be telnet protocol bits in the queue, awaiting
			 * processing.  However, once it leaves this module
			 * it's guaranteed that all protocol data is in
			 * M_CTL, so we do flush read data beyond us, expecting
			 * them (actually logindmux) to do FLUSHDATAs also.
			 */
			*mp->b_rptr = rw & ~FLUSHW;
			qreply(q, mp);
		} else {
			freemsg(mp);
		}
		if (rw & FLUSHW) {
			/*
			 * Since all telnet protocol data comes from the
			 * daemon, stored as M_CTL messages, flushq will
			 * do exactly what's needed:  Flush bytes which do
			 * not have telnet protocol data.
			 */
			flushq(q, FLUSHDATA);
		}
		break;

	case M_PCPROTO:
		putnext(q, mp);
		break;

	case M_PROTO:
		/* We may receive T_DISCON_REQ from the mux */
		if (!canputnext(q) || q->q_first != NULL)
			(void) putq(q, mp);
		else
			putnext(q, mp);
		break;

	default:
#ifdef DEBUG
		cmn_err(CE_NOTE,
		    "telmodwput: unexpected msg type 0x%x",
		    mp->b_datap->db_type);
#endif
		freemsg(mp);
		break;
	}
}
Exemplo n.º 16
0
/*
 * telmodrput:
 * Be sure to preserve data order.  If the daemon is waiting for additional
 * data (TEL_GETBLK state) forward new data.  Otherwise, apply normal
 * telnet protocol processing to M_DATA.  Take notice of TLI messages
 * indicating connection tear-down, and change them into M_HANGUP's.
 */
static void
telmodrput(queue_t *q, mblk_t *mp)
{
	mblk_t	*newmp;
	struct telmod_info    *tmip = (struct telmod_info *)q->q_ptr;
	union T_primitives *tip;

	if ((mp->b_datap->db_type < QPCTL) &&
	    ((q->q_first) || ((tmip->flags & TEL_STOPPED) &&
	    !(tmip->flags & TEL_GETBLK)) || !canputnext(q))) {
		(void) putq(q, mp);
		return;
	}

	switch (mp->b_datap->db_type) {
	case M_DATA:

		/*
		 * If the user level daemon requests for 1 more
		 * block of data (needs more data for protocol processing)
		 * create a M_CTL message block with the mp.
		 */
is_mdata:
		if (tmip->flags & TEL_GETBLK) {
			if ((newmp = allocb(sizeof (char), BPRI_MED)) == NULL) {
				recover(q, mp, msgdsize(mp));
				return;
			}
			newmp->b_datap->db_type = M_CTL;
			newmp->b_wptr = newmp->b_rptr + 1;
			*(newmp->b_rptr) = M_CTL_MAGIC_NUMBER;
			newmp->b_cont = mp;
			tmip->flags &= ~TEL_GETBLK;
			noenable(q);
			tmip->flags |= TEL_STOPPED;

			putnext(q, newmp);

			break;
		}
		/*
		 * call the protocol parsing routine which processes
		 * the data part of the message block first. Then it
		 * handles protocol and CR/LF processing.
		 * If an error is found inside allocb/dupb, recover
		 * routines inside rcv_parse will queue up the
		 * original message block in its service queue.
		 */
		(void) rcv_parse(q, mp);
		break;

	case M_FLUSH:
		/*
		 * Since M_FLUSH came from TCP, we mark it bound for
		 * daemon, not tty.  This only happens when TCP expects
		 * to do a connection reset.
		 */
		mp->b_flag |= MSGMARK;
		if (*mp->b_rptr & FLUSHR)
			flushq(q, FLUSHALL);
		putnext(q, mp);
		break;

	case M_PCSIG:
	case M_ERROR:
		if (tmip->flags & TEL_GETBLK)
			tmip->flags &= ~TEL_GETBLK;
		/* FALLTHRU */
	case M_IOCACK:
	case M_IOCNAK:
	case M_SETOPTS:
		putnext(q, mp);
		break;

	case M_PROTO:
	case M_PCPROTO:
		if (tmip->flags & TEL_GETBLK)
			tmip->flags &= ~TEL_GETBLK;

		tip = (union T_primitives *)mp->b_rptr;
		switch (tip->type) {

		case T_ORDREL_IND:
		case T_DISCON_IND:
			/* Make into M_HANGUP and putnext */
			ASSERT(mp->b_cont == NULL);
			mp->b_datap->db_type = M_HANGUP;
			mp->b_wptr = mp->b_rptr;
			if (mp->b_cont) {
				freemsg(mp->b_cont);
				mp->b_cont = NULL;
			}
			/*
			 * If we haven't already, send T_UNBIND_REQ to prevent
			 * TCP from going into "BOUND" state and locking up the
			 * port.
			 */
			if (tip->type == T_DISCON_IND && tmip->unbind_mp !=
			    NULL) {
				putnext(q, mp);
				qreply(q, tmip->unbind_mp);
				tmip->unbind_mp = NULL;
			} else {
				putnext(q, mp);
			}
			break;

		case T_EXDATA_IND:
		case T_DATA_IND:	/* conform to TPI, but never happens */
			newmp = mp->b_cont;
			freeb(mp);
			mp = newmp;
			if (mp) {
				ASSERT(mp->b_datap->db_type == M_DATA);
				if (msgdsize(mp) != 0) {
					goto is_mdata;
				}
				freemsg(mp);
			}
			break;

		/*
		 * We only get T_OK_ACK when we issue the unbind, and it can
		 * be ignored safely.
		 */
		case T_OK_ACK:
			ASSERT(tmip->unbind_mp == NULL);
			freemsg(mp);
			break;

		default:
#ifdef DEBUG
			cmn_err(CE_NOTE,
			    "telmodrput: unexpected TLI primitive msg "
			    "type 0x%x", tip->type);
#endif
			freemsg(mp);
		}
		break;

	default:
#ifdef DEBUG
		cmn_err(CE_NOTE,
		    "telmodrput: unexpected msg type 0x%x",
		    mp->b_datap->db_type);
#endif
		freemsg(mp);
	}
}
Exemplo n.º 17
0
void msgb_allocator_uninit(msgb_allocator_t *a){
	flushq(&a->q,-1);
}
Exemplo n.º 18
0
Arquivo: spx.c Projeto: iHaD/openss7
static streamscall int
spx_wput(queue_t *q, mblk_t *mp)
{
	struct spx *p = q->q_ptr;

	switch (mp->b_datap->db_type) {
	case M_FLUSH:
		if (mp->b_rptr[0] & FLUSHW) {
			if (mp->b_rptr[0] & FLUSHBAND)
				flushband(q, mp->b_rptr[1], FLUSHDATA);
			else
				flushq(q, FLUSHDATA);
			if (q->q_next) {
				putnext(q, mp);
				break;
			}
			mp->b_rptr[0] &= ~FLUSHW;
		}
		if (mp->b_rptr[0] & FLUSHR) {
			if (q->q_next) {
				putnext(q, mp);
				break;
			}
			if (mp->b_rptr[0] & FLUSHBAND)
				flushband(RD(q), mp->b_rptr[1], FLUSHDATA);
			else
				flushq(RD(q), FLUSHDATA);
			qreply(q, mp);
			break;
		}
		freemsg(mp);
		break;
	case M_PROTO:
		/* We go to some trouble here to make sure that we do not intercept M_PROTO
		   messages that are not for us.  This is because we want the stream to support
		   passing of M_PROTO and M_PCPROTO messages as well, regardless of whether it is
		   just a loop-back device or whether it is an unnamed pipe. */
		if (p->init == 0 && mp->b_wptr >= mp->b_rptr + sizeof(long)) {
			queue_t *oq = NULL;
			struct spx *x;

			/* not necessarily aligned */
			bcopy(mp->b_rptr, oq, sizeof(*oq));
			/* validate against list */
			spin_lock(&spx_lock);
			for (x = spx_list; x && x->q != oq; x = x->next) ;
			if (x && x->q == oq) {
				weldq(WR(q), oq, WR(oq), q, NULL, NULL, NULL);
				spin_unlock(&spx_lock);
				/* FIXME: welding is probably not enough.  We probably have to link 
				   the two stream heads together, pipe-style as well as setting
				   some stream head characteristics.  People would be better to use 
				   the pipe(4) device anyway. */
				break;
			}
			spin_unlock(&spx_lock);
		}
	default:
		if (q->q_next)
			putnext(q, mp);
		else
			qreply(q, mp);
		break;
	}
	if (p->init == 0)
		p->init = 1;
	return (0);
#if 0
      nak:
	{
		union ioctypes *ioc;

		mp->b_datap->db_type = M_IOCNAK;
		ioc = (typeof(ioc)) mp->b_rptr;
		ioc->iocblk.ioc_count = 0;
		ioc->iocblk.ioc_rval = -1;
		ioc->iocblk.ioc_error = -err;
		qreply(q, mp);
		return (0);
	}
#endif
}
Exemplo n.º 19
0
static void _flush_buffer(MSBufferizer *obj){
	flushq(&obj->q,0);
	obj->size=0;
}
Exemplo n.º 20
0
static int _v4w_start(V4wState *s, void *arg)
{
	MSVideoSize try_vsize;
	int tryformat;
	int i;
	s->frame_count=-1;

	if (s->pix_fmt==MS_YUV420P)
		tryformat = MS_RGB24;
	else if (s->pix_fmt==MS_RGB24)
		tryformat = MS_YUV420P;

	try_vsize.height = s->vsize.height;
	try_vsize.width = s->vsize.width;
	i = try_format(s, s->pix_fmt, &try_vsize);
	if (i==-14)
	{
		/* try second format with same size */
		i = try_format(s, tryformat, &try_vsize);
	}

	/* try both format with CIF size */
	if (i==-14 && s->vsize.height!=MS_VIDEO_SIZE_CIF_H)
	{
		try_vsize.height = MS_VIDEO_SIZE_CIF_H;
		try_vsize.width = MS_VIDEO_SIZE_CIF_W;
		i = try_format(s, s->pix_fmt, &try_vsize);
		if (i==-14)
		{
			i = try_format(s, tryformat, &try_vsize);
		}
	}
	if (i==-14 && s->vsize.height!=MS_VIDEO_SIZE_QCIF_H)
	{
		try_vsize.height = MS_VIDEO_SIZE_QCIF_H;
		try_vsize.width = MS_VIDEO_SIZE_QCIF_W;
		i = try_format(s, s->pix_fmt, &try_vsize);
		if (i==-14)
		{
			i = try_format(s, tryformat, &try_vsize);
		}
	}
	if (i==-14 && s->vsize.height!=MS_VIDEO_SIZE_VGA_H)
	{
		try_vsize.height = MS_VIDEO_SIZE_VGA_H;
		try_vsize.width = MS_VIDEO_SIZE_VGA_W;
		i = try_format(s, s->pix_fmt, &try_vsize);
		if (i==-14)
		{
			i = try_format(s, tryformat, &try_vsize);
		}
	}

	if (i==-14 && s->vsize.height!=MS_VIDEO_SIZE_QVGA_H)
	{
		try_vsize.height = MS_VIDEO_SIZE_QVGA_H;
		try_vsize.width = MS_VIDEO_SIZE_QVGA_W;
		i = try_format(s, s->pix_fmt, &try_vsize);
		if (i==-14)
		{
			i = try_format(s, tryformat, &try_vsize);
		}
	}

	if (i==0)
	{
		if (s->pix_fmt==MS_YUV420P)
			ms_message("Using YUV420P");
		else if (s->pix_fmt==MS_RGB24)
			ms_message("Using RGB24");
	}

	if (s->rotregvalue==0){
		//RemoveGraphFromRot(s->rotregvalue);		
		if (s->m_pNullRenderer!=NULL)
			s->m_pGraph->RemoveFilter(s->m_pNullRenderer);
		if (s->m_pIDXFilter!=NULL)
			s->m_pGraph->RemoveFilter(s->m_pIDXFilter);
		if (s->m_pDeviceFilter!=NULL)
			s->m_pGraph->RemoveFilter(s->m_pDeviceFilter);
		s->m_pBuilder=NULL;
		s->m_pControl=NULL;
		s->m_pIDXFilter=NULL;
		if (s->m_pDXFilter!=NULL)
			s->m_pDXFilter->Release();
		s->m_pDXFilter=NULL;
		s->m_pGraph=NULL;
		s->m_pNullRenderer=NULL;
		s->m_pDeviceFilter=NULL;
		CoUninitialize();
		s_callback = NULL;
		flushq(&s->rq,0);
		ms_message("v4w: graph not started (err=%i)", i);
		s->rotregvalue=0;
	}
	return i;
}
Exemplo n.º 21
0
static void v4w_uninit(MSFilter *f){
	V4wState *s=(V4wState*)f->data;
	int idx;
	flushq(&s->rq,0);
	ms_mutex_destroy(&s->mutex);
	for (idx=0;idx<10;idx++)
	{
		if (s->mire[idx]==NULL)
			break;
		freemsg(s->mire[idx]);
	}
	if (s->rotregvalue>0){
		HRESULT hr = s->m_pControl->Stop();
		if(FAILED(hr))
		{
			ms_message("v4w: could not stop graph");
		}

		if (s->m_pGraph!=NULL)
		{
			if (s->m_pNullRenderer!=NULL)
				s->m_pGraph->RemoveFilter(s->m_pNullRenderer);
			if (s->m_pIDXFilter!=NULL)
				s->m_pGraph->RemoveFilter(s->m_pIDXFilter);
			if (s->m_pDeviceFilter!=NULL)
				s->m_pGraph->RemoveFilter(s->m_pDeviceFilter);
		}

		if (s->m_pNullRenderer)
			s->m_pNullRenderer->Release();
		if (s->m_pIDXFilter)
			s->m_pIDXFilter->Release();
		if (s->m_pDeviceFilter)
			s->m_pDeviceFilter->Release();

		if (s->m_pBuilder)
			s->m_pBuilder->Release();
		if (s->m_pControl)
			s->m_pControl->Release();
		if (s->m_pGraph)
			s->m_pGraph->Release();

		if (s->m_pDXFilter!=NULL)
			s->m_pDXFilter->Release();

		s->m_pNullRenderer=NULL;
		s->m_pIDXFilter=NULL;
		s->m_pDeviceFilter=NULL;
		s->m_pBuilder=NULL;
		s->m_pControl=NULL;
		s->m_pGraph=NULL;
		s->m_pDXFilter=NULL;

		CoUninitialize();
		s_callback = NULL;
		flushq(&s->rq,0);
		ms_message("v4w: graph destroyed");
		s->rotregvalue=0;
	}
	ms_free(s);
}
Exemplo n.º 22
0
static void v4w_postprocess(MSFilter * obj){
	V4wState *s=(V4wState*)obj->data;
	s->start_time=0;
	s->frame_count=-1;
	flushq(&s->rq,0);
}
Exemplo n.º 23
0
// sent data notifier
err_t client_sent(void *arg, struct tcp_pcb *pcb, u16_t len)
{
    return flushq(arg);
}
Exemplo n.º 24
0
static void
rds_wput_other(queue_t *q, mblk_t *mp)
{
	uchar_t *rptr = mp->b_rptr;
	struct datab *db;
	cred_t *cr;

	db = mp->b_datap;
	switch (db->db_type) {
	case M_DATA:
		/* Not connected */
		freemsg(mp);
		return;
	case M_PROTO:
	case M_PCPROTO:
		if ((uintptr_t)mp->b_wptr - (uintptr_t)rptr <
		    sizeof (t_scalar_t)) {
			freemsg(mp);
			return;
		}
		switch (((union T_primitives *)(uintptr_t)rptr)->type) {
		case T_CAPABILITY_REQ:
			rds_capability_req(q, mp);
			return;

		case T_INFO_REQ:
			rds_info_req(q, mp);
			return;
		case O_T_BIND_REQ:
		case T_BIND_REQ:
			rds_bind(q, mp);
			return;
		case T_SVR4_OPTMGMT_REQ:
		case T_OPTMGMT_REQ:
			/*
			 * All Solaris components should pass a db_credp
			 * for this TPI message, hence we ASSERT.
			 * But in case there is some other M_PROTO that looks
			 * like a TPI message sent by some other kernel
			 * component, we check and return an error.
			 */
			cr = msg_getcred(mp, NULL);
			ASSERT(cr != NULL);
			if (cr == NULL) {
				rds_err_ack(q, mp, TSYSERR, EINVAL);
				return;
			}
			if (((union T_primitives *)(uintptr_t)rptr)->type ==
			    T_SVR4_OPTMGMT_REQ) {
				svr4_optcom_req(q, mp, cr, &rds_opt_obj);
			} else {
				tpi_optcom_req(q, mp, cr, &rds_opt_obj);
			}
			return;
		case T_CONN_REQ:
			/*
			 * We should not receive T_CONN_REQ as sockfs only
			 * sends down T_CONN_REQ if family == AF_INET/AF_INET6
			 * and type == SOCK_DGRAM/SOCK_RAW. For all others
			 * it simply calls soisconnected. see sotpi_connect()
			 * for details.
			 */
		/* FALLTHRU */
		default:
			cmn_err(CE_PANIC, "type %d \n",
			    ((union T_primitives *)(uintptr_t)rptr)->type);
		}
		break;
	case M_FLUSH:
		if (*rptr & FLUSHW)
			flushq(q, FLUSHDATA);
		break;
	case M_IOCTL:
		rds_ioctl(q, mp);
		break;
	case M_IOCDATA:
		/* IOCTL continuation following copyin or copyout. */
		if (mi_copy_state(q, mp, NULL) == -1) {
			/*
			 * The copy operation failed.  mi_copy_state already
			 * cleaned up, so we're out of here.
			 */
			return;
		}
		/*
		 * If we just completed a copy in, continue processing
		 * in rds_ioctl_copyin_done. If it was a copy out, we call
		 * mi_copyout again.  If there is nothing more to copy out,
		 * it will complete the IOCTL.
		 */

		if (MI_COPY_DIRECTION(mp) == MI_COPY_IN)
			rds_ioctl_copyin_done(q, mp);
		else
			mi_copyout(q, mp);
		return;

	default:
		cmn_err(CE_PANIC, "types %d \n", db->db_type);
	}
}
Exemplo n.º 25
0
STATIC streamscall int
ip2xinet_uwput(queue_t *q, mblk_t *mp)
{

	int i;

	spin_lock(&ip2xinet_lock);

	switch (mp->b_datap->db_type) {
	case M_FLUSH:
		if (mp->b_rptr[0] & FLUSHW) {
			if (mp->b_rptr[0] & FLUSHBAND)
				flushband(q, mp->b_rptr[1], FLUSHDATA);
			else
				flushq(q, FLUSHDATA);
			qenable(q);
			mp->b_rptr[0] &= ~FLUSHW;
		}
		if (mp->b_rptr[0] & FLUSHR) {
			if (mp->b_rptr[0] & FLUSHBAND)
				flushband(RD(q), mp->b_rptr[1], FLUSHDATA);
			else
				flushq(RD(q), FLUSHDATA);
			if (!putq(RD(q), mp)) {
				mp->b_band = 0;
				putq(RD(q), mp);
			}
		} else
			freemsg(mp);
		break;

	case M_IOCTL:
		/* Process at least the I_LINK, I_UNLINK */

		/* THINKME: Failure to correctly process I_LINK/I_UNLINK while returning correctly
		   a nack to stream head will leave us in a possibly totally screwed up DLPI state
		   from which we have to somehow recover.  The possible problematic states are
		   DL_UNBOUND, any DL_PENDING states Note: if we stay in UNATTACHED on I_LINK
		   failure or in IDLE on I_UNLINK failure we're ok as long as the private data
		   structure stuff is consistent with the state */

	{
		struct iocblk *iocp;
		mblk_t *nmp;
		dl_attach_req_t *attach;
		struct linkblk *lp;

		iocp = (struct iocblk *) mp->b_rptr;

#if 0
#ifdef DEBUG
		pkt_debug(X25DBIOCTL) KPRINTF("%s size %d\n", x25dbiocmsg(iocp->ioc_cmd),
					      x25dbmsgsize(mp));
#endif
#endif

		switch ((unsigned) iocp->ioc_cmd) {
		case I_LINK:
			iocp->ioc_error = 0;
			iocp->ioc_rval = 0;
			iocp->ioc_count = 0;

			lp = (struct linkblk *) mp->b_cont->b_rptr;
			/* Use only one xinet queue for all devices */
			ip2xinet_status.lowerq = lp->l_qbot;
			ip2xinet_status.index = lp->l_index;

			/* Only one read q to get data from xinet */
			ip2xinet_status.readq = RD(q);

			/* These are dummy ones to indicate the queues are being used */
			ip2xinet_status.lowerq->q_ptr = (char *) &ip2xinet_numopen;
			RD(ip2xinet_status.lowerq)->q_ptr = (char *) &ip2xinet_numopen;

			if ((nmp = allocb(sizeof(union DL_primitives), BPRI_LO)) == NULL) {
				iocp->ioc_error = ENOSR;
				mp->b_datap->db_type = M_IOCNAK;
				if (!putq(RD(q), mp)) {
					mp->b_band = 0;
					putq(RD(q), mp);
				}
				spin_unlock(&ip2xinet_lock);
				printk("pktioctl: I_LINK failed: allocb failed");
				return (0);
			}

			/* Setup and send an ATTACH */
			nmp->b_datap->db_type = M_PROTO;
			nmp->b_wptr += DL_ATTACH_REQ_SIZE;

			attach = (dl_attach_req_t *) nmp->b_rptr;
			attach->dl_primitive = DL_ATTACH_REQ;
			attach->dl_ppa = ip2xinet_status.myminor;
			ip2xinet_status.ip2x_dlstate = DL_ATTACH_PENDING;

			/* experience shows that an I_LINKed queue needs to be enabled so that the
			   service routine will be run. */
			qenable(ip2xinet_status.lowerq);
			if (!putq(ip2xinet_status.lowerq, nmp)) {
				nmp->b_band = 0;
				putq(ip2xinet_status.lowerq, nmp);
			}

			/* all went well */
			mp->b_datap->db_type = M_IOCACK;
			if (!putq(RD(q), mp)) {
				mp->b_band = 0;
				putq(RD(q), mp);
			}
			break;

		case I_UNLINK:
		{
			struct linkblk *lp;

			iocp->ioc_error = 0;
			iocp->ioc_rval = 0;
			iocp->ioc_count = 0;
			lp = (struct linkblk *) mp->b_cont->b_rptr;

			/* Ignore the DLPI state, the stack is being torn down regardless.  */
			ip2xinet_status.ip2x_dlstate = UNLINKED;
			/* can't transmit any more */
			for (i = 0; i < NUMIP2XINET; i++) {
				struct ip2xinet_priv *privptr = &ip2xinet_devs[i].priv;

				if (privptr->state == 1)
					netif_stop_queue(&(ip2xinet_devs[i].dev));
			}

			flushq(q, FLUSHALL);
			flushq(RD(lp->l_qbot), FLUSHALL);

			ip2xinet_status.readq = NULL;
			ip2xinet_status.lowerq = NULL;
			mp->b_datap->db_type = M_IOCACK;
			if (!putq(RD(q), mp)) {
				mp->b_band = 0;
				putq(RD(q), mp);
			}

			break;
		}

		default:
			iocp->ioc_error = EINVAL;
			mp->b_datap->db_type = M_IOCNAK;
			if (!putq(RD(q), mp)) {
				mp->b_band = 0;
				putq(RD(q), mp);
			}
			break;
		}

	}
		break;

	case M_DATA:
	case M_PCPROTO:
	case M_PROTO:
	default:
		printk("ip2xinet_uwput: unexpected type=0x%x", mp->b_datap->db_type);
		freemsg(mp);
		break;
	}
	spin_unlock(&ip2xinet_lock);
	return (0);
}
Exemplo n.º 26
0
/*
 * ptemwput - Module write queue put procedure.
 *
 * This is called from the module or stream head upstream.
 *
 * XXX:	This routine is quite lazy about handling allocation failures,
 *	basically just giving up and reporting failure.  It really ought to
 *	set up bufcalls and only fail when it's absolutely necessary.
 */
static void
ptemwput(queue_t *q, mblk_t *mp)
{
	struct ptem *ntp = (struct ptem *)q->q_ptr;
	struct iocblk *iocp;	/* outgoing ioctl structure */
	struct copyresp *resp;
	unsigned char type = mp->b_datap->db_type;

	if (type >= QPCTL) {
		switch (type) {

		case M_IOCDATA:
			resp = (struct copyresp *)mp->b_rptr;
			if (resp->cp_rval) {
				/*
				 * Just free message on failure.
				 */
				freemsg(mp);
				break;
			}

			/*
			 * Only need to copy data for the SET case.
			 */
			switch (resp->cp_cmd) {

				case TIOCSWINSZ:
					ptioc(q, mp, WRSIDE);
					break;

				case JWINSIZE:
				case TIOCGWINSZ:
					mioc2ack(mp, NULL, 0, 0);
					qreply(q, mp);
					break;

				default:
					freemsg(mp);
			}
			break;

		case M_FLUSH:
			if (*mp->b_rptr & FLUSHW) {
			    if ((ntp->state & IS_PTSTTY) &&
					(*mp->b_rptr & FLUSHBAND))
				flushband(q, *(mp->b_rptr + 1), FLUSHDATA);
			    else
				flushq(q, FLUSHDATA);
			}
			putnext(q, mp);
			break;

		case M_READ:
			freemsg(mp);
			break;

		case M_STOP:
			/*
			 * Set the output flow control state.
			 */
			ntp->state |= OFLOW_CTL;
			putnext(q, mp);
			break;

		case M_START:
			/*
			 * Relieve the output flow control state.
			 */
			ntp->state &= ~OFLOW_CTL;
			putnext(q, mp);
			qenable(q);
			break;
		default:
			putnext(q, mp);
			break;
		}
		return;
	}
	/*
	 * If our queue is nonempty or flow control persists
	 * downstream or module in stopped state, queue this message.
	 */
	if (q->q_first != NULL || !bcanputnext(q, mp->b_band)) {
		/*
		 * Exception: ioctls, except for those defined to
		 * take effect after output has drained, should be
		 * processed immediately.
		 */
		switch (type) {

		case M_IOCTL:
			iocp = (struct iocblk *)mp->b_rptr;
			switch (iocp->ioc_cmd) {
			/*
			 * Queue these.
			 */
			case TCSETSW:
			case TCSETSF:
			case TCSETAW:
			case TCSETAF:
			case TCSBRK:
				break;

			/*
			 * Handle all others immediately.
			 */
			default:
				(void) ptemwmsg(q, mp);
				return;
			}
			break;

		case M_DELAY: /* tty delays not supported */
			freemsg(mp);
			return;

		case M_DATA:
			if ((mp->b_wptr - mp->b_rptr) < 0) {
				/*
				 * Free all bad length messages.
				 */
				freemsg(mp);
				return;
			} else if ((mp->b_wptr - mp->b_rptr) == 0) {
				if (!(ntp->state & IS_PTSTTY)) {
					freemsg(mp);
					return;
				}
			}
		}
		(void) putq(q, mp);
		return;
	}
	/*
	 * fast path into ptemwmsg to dispose of mp.
	 */
	if (!ptemwmsg(q, mp))
		(void) putq(q, mp);
}
Exemplo n.º 27
0
void ortp_network_simulator_destroy(OrtpNetworkSimulatorCtx *sim){
	flushq(&sim->q,0);
	ortp_free(sim);
}
Exemplo n.º 28
0
static void vfw_uninit(MSFilter *f){
	VfwState *s=(VfwState*)f->data;
	flushq(&s->rq,0);
	ms_mutex_destroy(&s->mutex);
	ms_free(s);
}
Exemplo n.º 29
0
static int
log_wput(queue_t *q, mblk_t *mp)
{
	log_t *lp = (log_t *)q->q_ptr;
	struct iocblk *iocp;
	mblk_t *mp2;
	cred_t *cr = DB_CRED(mp);
	zoneid_t zoneid;

	/*
	 * Default to global zone if dblk doesn't have a valid cred.
	 * Calls to syslog() go through putmsg(), which does set up
	 * the cred.
	 */
	zoneid = (cr != NULL) ? crgetzoneid(cr) : GLOBAL_ZONEID;

	switch (DB_TYPE(mp)) {
	case M_FLUSH:
		if (*mp->b_rptr & FLUSHW) {
			flushq(q, FLUSHALL);
			*mp->b_rptr &= ~FLUSHW;
		}
		if (*mp->b_rptr & FLUSHR) {
			flushq(RD(q), FLUSHALL);
			qreply(q, mp);
			return (0);
		}
		break;

	case M_IOCTL:
		iocp = (struct iocblk *)mp->b_rptr;

		if (lp->log_major != LOG_LOGMIN) {
			/* write-only device */
			miocnak(q, mp, 0, EINVAL);
			return (0);
		}

		if (iocp->ioc_count == TRANSPARENT) {
			miocnak(q, mp, 0, EINVAL);
			return (0);
		}

		if (lp->log_flags) {
			miocnak(q, mp, 0, EBUSY);
			return (0);
		}

		freemsg(lp->log_data);
		lp->log_data = mp->b_cont;
		mp->b_cont = NULL;

		switch (iocp->ioc_cmd) {

		case I_CONSLOG:
			log_update(lp, RD(q), SL_CONSOLE, log_console);
			break;

		case I_TRCLOG:
			if (lp->log_data == NULL) {
				miocnak(q, mp, 0, EINVAL);
				return (0);
			}
			log_update(lp, RD(q), SL_TRACE, log_trace);
			break;

		case I_ERRLOG:
			log_update(lp, RD(q), SL_ERROR, log_error);
			break;

		default:
			miocnak(q, mp, 0, EINVAL);
			return (0);
		}
		miocack(q, mp, 0, 0);
		return (0);

	case M_PROTO:
		if (MBLKL(mp) == sizeof (log_ctl_t) && mp->b_cont != NULL) {
			log_ctl_t *lc = (log_ctl_t *)mp->b_rptr;
			/* This code is used by savecore to log dump msgs */
			if (mp->b_band != 0 &&
			    secpolicy_sys_config(CRED(), B_FALSE) == 0) {
				(void) putq(log_consq, mp);
				return (0);
			}
			if ((lc->pri & LOG_FACMASK) == LOG_KERN)
				lc->pri |= LOG_USER;
			mp2 = log_makemsg(LOG_MID, LOG_CONSMIN, lc->level,
			    lc->flags, lc->pri, mp->b_cont->b_rptr,
			    MBLKL(mp->b_cont) + 1, 0);
			if (mp2 != NULL)
				log_sendmsg(mp2, zoneid);
		}
		break;

	case M_DATA:
		mp2 = log_makemsg(LOG_MID, LOG_CONSMIN, 0, SL_CONSOLE,
		    LOG_USER | LOG_INFO, mp->b_rptr, MBLKL(mp) + 1, 0);
		if (mp2 != NULL)
			log_sendmsg(mp2, zoneid);
		break;
	}

	freemsg(mp);
	return (0);
}
Exemplo n.º 30
0
/**
 * ptem_w_msg - process a message on the write side
 * @q: write queue
 * @mp: message to process
 *
 * Returns 1 when the caller (putp or srvp) needs to queue or requeue the
 * message.  Returns 0 when the message has been disposed and the caller must
 * release its reference to mp.
 *
 * Keep this function out of the way of the fastpath.
 */
static streams_noinline int
ptem_w_msg(queue_t *q, mblk_t *mp)
{
	struct ptem *p = PTEM_PRIV(q);

	/* fast path */
	if (likely(mp->b_datap->db_type == M_DATA)) {
	      m_data:
		if ((p->flags & PTEM_OUTPUT_STOPPED)
		    || (q->q_first != NULL)
		    || (q->q_flag & QSVCBUSY)
		    || (!bcanputnext(q, mp->b_band)))
			return (1);
		putnext(q, mp);
		return (0);
	}

	switch (mp->b_datap->db_type) {
	case M_DATA:
		goto m_data;
	case M_IOCTL:
	{
		struct iocblk *ioc = (struct iocblk *) mp->b_rptr;
		int error = EINVAL;
		int rval = 0;
		int count = 0;
		mblk_t *bp, *cp;

		/* The Stream head is set to recognized all transparent terminal input-output
		   controls and pass them downstream as though they were I_STR input-output
		   controls.  There is also the opportunity to register input-output controls with
		   the Stream head using the TIOC_REPLY message. */
		if (ioc->ioc_count == TRANSPARENT) {
			__swerr();
			goto nak;
		}

		if ((bp = mp->b_cont) == NULL)
			goto nak;

		switch (ioc->ioc_cmd) {
		case TCSETAF:
			/* Note, if properly handled the M_FLUSH message will never be queued and
			   upon successful return from this function, we have already processed the
			   read-side flush along the entire Stream. */
			if (!putnextctl1(q, M_FLUSH, FLUSHR)) {
				error = EAGAIN;
				goto nak;
			}
			/* fall through */
		case TCSETAW:
			/* Note, output should have already drained. */
			/* fall through */
		case TCSETA:
		{
			struct termio *c;
			mblk_t *zp;

			if (!pullupmsg(bp, sizeof(struct termio)))
				goto nak;

			c = (typeof(c)) bp->b_rptr;

			if ((c->c_cflag & CBAUD) == B0) {
				/* slave hangup */
				if ((zp = xchg(&p->zero, NULL)))
					putnext(q, zp);
			} else {
				if (!(cp = copymsg(mp))) {
					error = EAGAIN;
					goto nak;
				}

				p->c.c_iflag = (p->c.c_iflag & 0xffff0000) | c->c_iflag;
				p->c.c_oflag = (p->c.c_oflag & 0xffff0000) | c->c_oflag;
				p->c.c_cflag = (p->c.c_cflag & 0xffff0000) | c->c_cflag;
				p->c.c_lflag = (p->c.c_lflag & 0xffff0000) | c->c_lflag;
				p->c.c_line = c->c_line;
				bcopy(c->c_cc, p->c.c_cc, NCC);

				putnext(q, cp);
			}
			goto ack;
		}

		case TCSETSF:
			/* Note, if properly handled the M_FLUSH message will never be queued and
			   upon successful return from this function, we have already processed the
			   read-side flush along the entire Stream. */
			if (!putnextctl1(q, M_FLUSH, FLUSHR)) {
				error = EAGAIN;
				goto nak;
			}
			/* fall through */
		case TCSETSW:
			/* Note, output should have already drained. */
			/* fall through */
		case TCSETS:
		{
			struct termios *c;
			mblk_t *zp;

			if (!pullupmsg(bp, sizeof(struct termios)))
				goto nak;

			c = (typeof(c)) bp->b_rptr;

			if ((c->c_cflag & CBAUD) == B0) {
				/* slave hangup */
				if ((zp = xchg(&p->zero, NULL)))
					putnext(q, zp);
			} else {
				if (!(cp = copymsg(mp))) {
					error = EAGAIN;
					goto nak;
				}

				p->c = *c;

				putnext(q, cp);
			}
			goto ack;
		}

		case TCGETA:
		{
			struct termio *c;
			extern void __struct_termio_is_too_large_for_fastbuf(void);

			if (FASTBUF < sizeof(struct termio))
				__struct_termio_is_too_large_for_fastbuf();
			count = sizeof(*c);
			bp->b_rptr = bp->b_datap->db_base;
			bp->b_wptr = bp->b_rptr + count;
			c = (typeof(c)) bp->b_rptr;

			c->c_iflag = p->c.c_iflag;
			c->c_oflag = p->c.c_oflag;
			c->c_cflag = p->c.c_cflag;
			c->c_lflag = p->c.c_lflag;
			c->c_line = p->c.c_line;
			bcopy(p->c.c_cc, p->c.c_cc, NCC);

			goto ack;
		}
		case TCGETS:
		{
			extern void __struct_termios_is_too_large_for_fastbuf(void);

			if (FASTBUF < sizeof(struct termios))
				__struct_termios_is_too_large_for_fastbuf();
			count = sizeof(p->c);
			bp->b_rptr = bp->b_datap->db_base;
			bp->b_wptr = bp->b_rptr + count;
			*((struct termios *) bp->b_rptr) = p->c;
			goto ack;
		}
		case TIOCGWINSZ:
		{
			extern void __struct_winsize_is_too_large_for_fastbuf(void);

			if (!(p->flags & PTEM_HAVE_WINSIZE))
				goto nak;
			if (FASTBUF < sizeof(struct winsize))
				__struct_winsize_is_too_large_for_fastbuf();
			count = sizeof(p->ws);
			bp->b_rptr = bp->b_datap->db_base;
			bp->b_wptr = bp->b_rptr + count;
			*((struct winsize *) bp->b_rptr) = p->ws;
			goto ack;
		}
#ifdef JWINSIZE
		case JWINSIZE:
		{
			struct jwinsize *jws;
			extern void __struct_jwinsize_is_too_large_for_fastbuf(void);

			if (!(p->flags & PTEM_HAVE_WINSIZE))
				goto nak;
			if (FASTBUF < sizeof(struct jwinsize))
				__struct_jwinsize_is_too_large_for_fastbuf();
			/* always have room in a fastbuf */
			count = sizeof(*jws);
			bp->b_rptr = bp->b_datap->db_base;
			bp->b_wptr = bp->b_rptr + count;
			jws = (typeof(jws)) bp->b_rptr;

			jws->bytesx = p->ws.ws_col;
			jws->bytesy = p->ws.ws_row;
			jws->bitsx = p->ws.ws_xpixel;
			jws->bitsy = p->ws.ws_ypixel;

			goto ack;
		}
#endif				/* JWINSIZE */
		case TIOCSWINSZ:
		{
			struct winsize *ws;
			int changed = 0;
			int zeroed = !(p->flags & PTEM_HAVE_WINSIZE);
			mblk_t *mb;

			if (!pullupmsg(bp, sizeof(*ws)))
				goto nak;
			if (!(cp = copymsg(mp))) {
				error = EAGAIN;
				goto nak;
			}
			if (!(mb = allocb(1, BPRI_MED))) {
				freemsg(cp);
				error = EAGAIN;
				goto nak;
			}
			ws = (typeof(ws)) bp->b_rptr;
			if (ws->ws_col != p->ws.ws_col) {
				if ((p->ws.ws_col = ws->ws_col))
					zeroed = 0;
				changed = 1;
			}
			if (ws->ws_row != p->ws.ws_row) {
				if ((p->ws.ws_row = ws->ws_row))
					zeroed = 0;
				changed = 1;
			}
			if (ws->ws_xpixel != p->ws.ws_xpixel) {
				if ((p->ws.ws_xpixel = ws->ws_xpixel))
					zeroed = 0;
				changed = 1;
			}
			if (ws->ws_ypixel != p->ws.ws_ypixel) {
				if ((p->ws.ws_ypixel = ws->ws_ypixel))
					zeroed = 0;
				changed = 1;
			}
			if (zeroed)
				p->flags &= ~PTEM_HAVE_WINSIZE;
			else
				p->flags |= PTEM_HAVE_WINSIZE;
			if (changed) {
				mb->b_datap->db_type = M_SIG;
				*mb->b_wptr++ = SIGWINCH;
				qreply(q, mb);
			} else
				freeb(mb);
			putnext(q, cp);	/* copy for pctk(4) */
			count = 0;
			goto ack;
		}
		case TCSBRK:
			if (!(cp = copymsg(mp))) {
				error = EAGAIN;
				goto nak;
			}
			putnext(q, cp);
			count = 0;
			goto ack;
		default:
			goto nak;
		}
		break;
	      ack:
		mp->b_datap->db_type = M_IOCACK;
		ioc->ioc_error = 0;
		ioc->ioc_rval = rval;
		ioc->ioc_count = count;
		goto reply;
	      nak:
		mp->b_datap->db_type = M_IOCNAK;
		ioc->ioc_error = error;
		ioc->ioc_rval = -1;
		ioc->ioc_count = 0;
	      reply:
		qreply(q, mp);
		break;
	}
	case M_FLUSH:
		if (mp->b_rptr[0] & FLUSHW) {
			if (mp->b_rptr[0] & FLUSHBAND)
				flushband(q, mp->b_rptr[1], FLUSHDATA);
			else
				flushq(q, FLUSHDATA);
		}
		putnext(q, mp);
		break;
	default:
		if (mp->b_datap->db_type < QPCTL) {
			if ((q->q_first != NULL)
			    || (q->q_flag & QSVCBUSY)
			    || (!bcanputnext(q, mp->b_band)))
				return (1);	/* (re)queue */
		}
		putnext(q, mp);
		break;
	}
	return (0);
}