Ejemplo n.º 1
0
int
_tx_sndrel(int fd, int api_semantics)
{
	struct T_ordrel_req orreq;
	struct strbuf ctlbuf;
	struct _ti_user *tiptr;

	if ((tiptr = _t_checkfd(fd, 0, api_semantics)) == NULL)
		return (-1);
	sig_mutex_lock(&tiptr->ti_lock);

	if (tiptr->ti_servtype != T_COTS_ORD) {
		t_errno = TNOTSUPPORT;
		sig_mutex_unlock(&tiptr->ti_lock);
		return (-1);
	}

	if (_T_IS_XTI(api_semantics)) {
		/*
		 * User level state verification only done for XTI
		 * because doing for TLI may break existing applications
		 */
		if (!(tiptr->ti_state == T_DATAXFER ||
		    tiptr->ti_state == T_INREL)) {
			t_errno = TOUTSTATE;
			sig_mutex_unlock(&tiptr->ti_lock);
			return (-1);
		}

		if (_t_look_locked(fd, tiptr, 0,
		    api_semantics) == T_DISCONNECT) {
			t_errno = TLOOK;
			sig_mutex_unlock(&tiptr->ti_lock);
			return (-1);
		}

	}

	orreq.PRIM_type = T_ORDREL_REQ;
	ctlbuf.maxlen = (int)sizeof (struct T_ordrel_req);
	ctlbuf.len = (int)sizeof (struct T_ordrel_req);
	ctlbuf.buf = (caddr_t)&orreq;

	/*
	 * Calls to send data (write or putmsg) can potentially
	 * block, for MT case, we drop the lock and enable signals here
	 * and acquire it back
	 */
	sig_mutex_unlock(&tiptr->ti_lock);
	if (putmsg(fd, &ctlbuf, NULL, 0) < 0) {
		if (errno == EAGAIN)
			t_errno = TFLOW;
		else
			t_errno = TSYSERR;
		return (-1);
	}
	sig_mutex_lock(&tiptr->ti_lock);
	_T_TX_NEXTSTATE(T_SNDREL, tiptr,
				"t_sndrel: invalid state on event T_SNDREL");
	sig_mutex_unlock(&tiptr->ti_lock);
	return (0);
}
Ejemplo n.º 2
0
int
_tx_open(const char *path, int flags, struct t_info *info, int api_semantics)
{
	int retval, fd, sv_errno;
	int sv_terrno;
	int sv_errno_global;
	struct _ti_user *tiptr;
	sigset_t mask;
	int t_create_first_attempt = 1;
	int ticap_ioctl_failed = 0;

	if (!(flags & O_RDWR)) {
		t_errno = TBADFLAG;
		return (-1);
	}

	sv_errno_global = errno;
	sv_terrno = t_errno;

retry:
	if ((fd = open(path, flags)) < 0) {
		t_errno = TSYSERR;
		if (_T_IS_XTI(api_semantics) && errno == ENOENT)
			/* XTI only */
			t_errno = TBADNAME;
		return (-1);
	}
	/*
	 * is module already pushed
	 */
	do {
		retval = ioctl(fd, I_FIND, "timod");
	} while (retval < 0 && errno == EINTR);

	if (retval < 0) {
		sv_errno = errno;

		t_errno = TSYSERR;
		(void) close(fd);
		errno = sv_errno;
		return (-1);
	}

	if (retval == 0) {
		/*
		 * "timod" not already on stream, then push it
		 */
		do {
			/*
			 * Assumes (correctly) that I_PUSH  is
			 * atomic w.r.t signals (EINTR error)
			 */
			retval = ioctl(fd, I_PUSH, "timod");
		} while (retval < 0 && errno == EINTR);

		if (retval < 0) {
			int sv_errno = errno;

			t_errno = TSYSERR;
			(void) close(fd);
			errno = sv_errno;
			return (-1);
		}
	}

	/*
	 * _t_create() requires that all signals be blocked.
	 * Note that sig_mutex_lock() only defers signals, it does not
	 * block them, so interruptible syscalls could still get EINTR.
	 */
	(void) thr_sigsetmask(SIG_SETMASK, &fillset, &mask);
	sig_mutex_lock(&_ti_userlock);
	/*
	 * Call to _t_create may fail either because transport doesn't
	 * understand T_CAPABILITY_REQ or for some other reason. It is nearly
	 * impossible to distinguish between these cases so it is implicitly
	 * assumed that it is always save to close and reopen the same stream
	 * and that open/close doesn't have side effects. _t_create may fail
	 * only once if its' failure is caused by unimplemented
	 * T_CAPABILITY_REQ.
	 */
	tiptr = _t_create(fd, info, api_semantics, &ticap_ioctl_failed);
	if (tiptr == NULL) {
		/*
		 * If _t_create failed due to fail of ti_capability_req we may
		 * try to reopen the stream in the hope that timod will emulate
		 * TI_CAPABILITY and it will succeed when called again.
		 */
		if (t_create_first_attempt == 1 && ticap_ioctl_failed == 1) {
			t_create_first_attempt = 0;
			(void) close(fd);
			errno = sv_errno_global;
			t_errno = sv_terrno;
			sig_mutex_unlock(&_ti_userlock);
			(void) thr_sigsetmask(SIG_SETMASK, &mask, NULL);
			goto retry;
		} else {
			int sv_errno = errno;
			(void) close(fd);
			sig_mutex_unlock(&_ti_userlock);
			(void) thr_sigsetmask(SIG_SETMASK, &mask, NULL);
			errno = sv_errno;
			return (-1);
		}
	}

	/*
	 * _t_create synchronizes state witk kernel timod and
	 * already sets it to T_UNBND - what it needs to be
	 * be on T_OPEN event. No _T_TX_NEXTSTATE needed here.
	 */
	sig_mutex_unlock(&_ti_userlock);
	(void) thr_sigsetmask(SIG_SETMASK, &mask, NULL);

	do {
		retval = ioctl(fd, I_FLUSH, FLUSHRW);
	} while (retval < 0 && errno == EINTR);

	/*
	 * We ignore other error cases (retval < 0) - assumption is
	 * that I_FLUSH failures is temporary (e.g. ENOSR) or
	 * otherwise benign failure on a this newly opened file
	 * descriptor and not a critical failure.
	 */

	return (fd);
}
Ejemplo n.º 3
0
int
_tx_bind(
	int fd,
	const struct t_bind *req,
	struct t_bind *ret,
	int api_semantics
)
{
	struct T_bind_req *bind_reqp;
	struct T_bind_ack *bind_ackp;
	int size, sv_errno, retlen;
	struct _ti_user *tiptr;
	sigset_t mask;

	int didalloc;
	int use_xpg41tpi;
	struct strbuf ctlbuf;

	if ((tiptr = _t_checkfd(fd, 0, api_semantics)) == NULL)
		return (-1);

	/*
	 * We block all signals since TI_BIND, which sends a TPI message
	 * O_T_BIND_REQ down, is not an idempotetent operation
	 * Note that sig_mutex_lock() only defers signals, it does not
	 * block them, so interruptible syscalls could still get EINTR.
	 */
	(void) thr_sigsetmask(SIG_SETMASK, &fillset, &mask);
	sig_mutex_lock(&tiptr->ti_lock);
	if (_T_IS_XTI(api_semantics)) {
		/*
		 * User level state verification only done for XTI
		 * because doing for TLI may break existing applications
		 */
		if (tiptr->ti_state != T_UNBND) {
			t_errno = TOUTSTATE;
			sig_mutex_unlock(&tiptr->ti_lock);
			(void) thr_sigsetmask(SIG_SETMASK, &mask, NULL);
			return (-1);
		}
	}
	/*
	 * Acquire buffer for use in sending/receiving the message.
	 * Note: assumes (correctly) that ti_ctlsize is large enough
	 * to hold sizeof (struct T_bind_req/ack)
	 */
	if (_t_acquire_ctlbuf(tiptr, &ctlbuf, &didalloc) < 0) {
		sv_errno = errno;
		sig_mutex_unlock(&tiptr->ti_lock);
		(void) thr_sigsetmask(SIG_SETMASK, &mask, NULL);
		errno = sv_errno;
		return (-1);
	}

	/* LINTED pointer cast */
	bind_reqp = (struct T_bind_req *)ctlbuf.buf;
	size = (int)sizeof (struct T_bind_req);

	use_xpg41tpi = (_T_IS_XTI(api_semantics)) &&
		((tiptr->ti_prov_flag & XPG4_1) != 0);
	if (use_xpg41tpi)
		/* XTI call and provider knows the XTI inspired TPI */
		bind_reqp->PRIM_type = T_BIND_REQ;
	else
		/* TLI caller old TPI provider */
		bind_reqp->PRIM_type = O_T_BIND_REQ;

	bind_reqp->ADDR_length = (req == NULL? 0: req->addr.len);
	bind_reqp->ADDR_offset = 0;
	bind_reqp->CONIND_number = (req == NULL? 0: req->qlen);


	if (bind_reqp->ADDR_length) {
		if (_t_aligned_copy(&ctlbuf, (int)bind_reqp->ADDR_length, size,
		    req->addr.buf, &bind_reqp->ADDR_offset) < 0) {
			/*
			 * Aligned copy will overflow buffer allocated based
			 * on transport maximum address length.
			 * return error.
			 */
			t_errno = TBADADDR;
			goto err_out;
		}
		size = bind_reqp->ADDR_offset + bind_reqp->ADDR_length;
	}

	if (_t_do_ioctl(fd, ctlbuf.buf, size, TI_BIND, &retlen) < 0) {
		goto err_out;
	}

	if (retlen < (int)sizeof (struct T_bind_ack)) {
		t_errno = TSYSERR;
		errno = EIO;
		goto err_out;
	}

	/* LINTED pointer cast */
	bind_ackp = (struct T_bind_ack *)ctlbuf.buf;

	if ((req != NULL) && req->addr.len != 0 &&
	    (use_xpg41tpi == 0) && (_T_IS_XTI(api_semantics))) {
		/*
		 * Best effort to do XTI on old TPI.
		 *
		 * Match address requested or unbind and fail with
		 * TADDRBUSY.
		 *
		 * XXX - Hack alert ! Should we do this at all ?
		 * Not "supported" as may not work if encoding of
		 * address is different in the returned address. This
		 * will also have trouble with TCP/UDP wildcard port
		 * requests
		 */
		if ((req->addr.len != bind_ackp->ADDR_length) ||
		    (memcmp(req->addr.buf, ctlbuf.buf +
		    bind_ackp->ADDR_offset, req->addr.len) != 0)) {
			(void) _tx_unbind_locked(fd, tiptr, &ctlbuf);
			t_errno = TADDRBUSY;
			goto err_out;
		}
	}

	tiptr->ti_ocnt = 0;
	tiptr->ti_flags &= ~TX_TQFULL_NOTIFIED;

	_T_TX_NEXTSTATE(T_BIND, tiptr, "t_bind: invalid state event T_BIND");

	if (ret != NULL) {
		if (_T_IS_TLI(api_semantics) || ret->addr.maxlen > 0) {
			if (TLEN_GT_NLEN(bind_reqp->ADDR_length,
			    ret->addr.maxlen)) {
				t_errno = TBUFOVFLW;
				goto err_out;
			}
			(void) memcpy(ret->addr.buf,
			    ctlbuf.buf + bind_ackp->ADDR_offset,
			    (size_t)bind_ackp->ADDR_length);
			ret->addr.len = bind_ackp->ADDR_length;
		}
		ret->qlen = bind_ackp->CONIND_number;
	}

	tiptr->ti_qlen = (uint_t)bind_ackp->CONIND_number;

	if (didalloc)
		free(ctlbuf.buf);
	else
		tiptr->ti_ctlbuf = ctlbuf.buf;
	sig_mutex_unlock(&tiptr->ti_lock);
	(void) thr_sigsetmask(SIG_SETMASK, &mask, NULL);
	return (0);
	/* NOTREACHED */
err_out:
	sv_errno = errno;
	if (didalloc)
		free(ctlbuf.buf);
	else
		tiptr->ti_ctlbuf = ctlbuf.buf;
	sig_mutex_unlock(&tiptr->ti_lock);
	(void) thr_sigsetmask(SIG_SETMASK, &mask, NULL);
	errno = sv_errno;
	return (-1);
}
Ejemplo n.º 4
0
int
_tx_rcvdis(int fd, struct t_discon *discon, int api_semantics)
{
	struct strbuf ctlbuf;
	struct strbuf databuf;
	int retval;
	union T_primitives *pptr;
	struct _ti_user *tiptr;
	int sv_errno;
	int flg = 0;
	int didalloc, didralloc;
	int use_lookbufs = 0;

	if ((tiptr = _t_checkfd(fd, 0, api_semantics)) == NULL)
		return (-1);

	/*
	 * Acquire per thread lock.
	 * Note: Lock is held across most of this routine
	 * including the blocking getmsg() call. This is fine
	 * because it is first verfied that an event is pending
	 */
	sig_mutex_lock(&tiptr->ti_lock);

	if (tiptr->ti_servtype == T_CLTS) {
		t_errno = TNOTSUPPORT;
		sig_mutex_unlock(&tiptr->ti_lock);
		return (-1);
	}

	if (_T_IS_XTI(api_semantics)) {
		/*
		 * User level state verification only done for XTI
		 * because doing for TLI may break existing applications
		 */
		if (!(tiptr->ti_state == T_DATAXFER ||
		    tiptr->ti_state == T_OUTCON ||
		    tiptr->ti_state == T_OUTREL ||
		    tiptr->ti_state == T_INREL ||
		    (tiptr->ti_state == T_INCON && tiptr->ti_ocnt > 0))) {
			t_errno = TOUTSTATE;
			sig_mutex_unlock(&tiptr->ti_lock);
			return (-1);
		}
	}
	/*
	 * Handle likely scenario as special case:
	 * Is there a discon in look buffer as the first
	 * event in the lookbuffer, is so just get it.
	 */
	if ((tiptr->ti_lookcnt > 0) &&
	    /* LINTED pointer cast */
	    (*((t_scalar_t *)tiptr->ti_lookbufs.tl_lookcbuf) == T_DISCON_IND)) {
		/*
		 * The T_DISCON_IND is already in the look buffer
		 */
		ctlbuf.len = tiptr->ti_lookbufs.tl_lookclen;
		ctlbuf.buf = tiptr->ti_lookbufs.tl_lookcbuf;
		/* Note: ctlbuf.maxlen not used in this case */

		databuf.len = tiptr->ti_lookbufs.tl_lookdlen;
		databuf.buf = tiptr->ti_lookbufs.tl_lookdbuf;
		/* Note databuf.maxlen not used in this case */

		use_lookbufs = 1;

	} else {

		if ((retval = _t_look_locked(fd, tiptr, 0,
		    api_semantics)) < 0) {
			sv_errno = errno;
			sig_mutex_unlock(&tiptr->ti_lock);
			errno = sv_errno;
			return (-1);
		}

		if (retval != T_DISCONNECT) {
			t_errno = TNODIS;
			sig_mutex_unlock(&tiptr->ti_lock);
			return (-1);
		}

		/*
		 * get disconnect off read queue.
		 * use ctl and rcv buffers
		 *
		 * Acquire ctlbuf for use in sending/receiving control part
		 * of the message.
		 */
		if (_t_acquire_ctlbuf(tiptr, &ctlbuf, &didalloc) < 0) {
			sv_errno = errno;
			sig_mutex_unlock(&tiptr->ti_lock);
			errno = sv_errno;
			return (-1);
		}

		/*
		 * Acquire databuf for use in sending/receiving data part
		 */
		if (_t_acquire_databuf(tiptr, &databuf, &didralloc) < 0) {
			sv_errno = errno;
			if (didalloc)
				free(ctlbuf.buf);
			else
				tiptr->ti_ctlbuf = ctlbuf.buf;
			sig_mutex_unlock(&tiptr->ti_lock);
			errno = sv_errno;
			return (-1);
		}

		/*
		 * Since we already verified that a disconnect event
		 * is present, we assume that this getmsg() cannot
		 * block indefinitely
		 */
		do {
			retval = getmsg(fd, &ctlbuf, &databuf, &flg);
		} while (retval < 0 && errno == EINTR);

		if (retval  < 0) {
			t_errno = TSYSERR;
			goto err_out;
		}
		if (databuf.len == -1) databuf.len = 0;

		/*
		 * did I get entire message?
		 */
		if (retval > 0) {
			t_errno = TSYSERR;
			errno = EIO;
			goto err_out;
		}
	}


	/* LINTED pointer cast */
	pptr = (union T_primitives *)ctlbuf.buf;

	if ((ctlbuf.len < (int)sizeof (struct T_discon_ind)) ||
	    (pptr->type != T_DISCON_IND)) {
		t_errno = TSYSERR;
		errno = EPROTO;
		goto err_out;
	}

	/*
	 * clear more and expedited flags
	 */
	tiptr->ti_flags &= ~(MORE | EXPEDITED);

	if (tiptr->ti_ocnt <= 0) {
		_T_TX_NEXTSTATE(T_RCVDIS1, tiptr,
				"t_rcvdis: invalid state event T_RCVDIS1");
	} else {
		if (tiptr->ti_ocnt == 1) {
			_T_TX_NEXTSTATE(T_RCVDIS2, tiptr,
				"t_rcvdis: invalid state event T_RCVDIS2");
		} else {
			_T_TX_NEXTSTATE(T_RCVDIS3, tiptr,
				"t_rcvdis: invalid state event T_RCVDIS3");
		}
		tiptr->ti_ocnt--;
		tiptr->ti_flags &= ~TX_TQFULL_NOTIFIED;
	}

	if (discon != NULL) {
		if (_T_IS_TLI(api_semantics) || discon->udata.maxlen > 0) {
			if (databuf.len > (int)discon->udata.maxlen) {
				t_errno = TBUFOVFLW;
				goto err_out;
			}
			(void) memcpy(discon->udata.buf, databuf.buf,
			    (size_t)databuf.len);
			discon->udata.len = databuf.len;
		}
		discon->reason = pptr->discon_ind.DISCON_reason;
		discon->sequence = pptr->discon_ind.SEQ_number;
	}
	if (use_lookbufs)
		_t_free_looklist_head(tiptr);
	else {
		if (didalloc)
			free(ctlbuf.buf);
		else
			tiptr->ti_ctlbuf = ctlbuf.buf;
		if (didralloc)
			free(databuf.buf);
		else
			tiptr->ti_rcvbuf = databuf.buf;
	}
	sig_mutex_unlock(&tiptr->ti_lock);
	return (0);

err_out:
	sv_errno = errno;

	if (use_lookbufs)
		_t_free_looklist_head(tiptr);
	else {
		if (didalloc)
			free(ctlbuf.buf);
		else
			tiptr->ti_ctlbuf = ctlbuf.buf;
		if (didralloc)
			free(databuf.buf);
		else
			tiptr->ti_rcvbuf = databuf.buf;
	}
	sig_mutex_unlock(&tiptr->ti_lock);
	errno = sv_errno;
	return (-1);
}
Ejemplo n.º 5
0
int
_tx_getinfo(int fd, struct t_info *info, int api_semantics)
{
	struct T_info_req *inforeqp;
	struct T_info_ack *infoackp;
	int retlen;
	struct _ti_user *tiptr;
	int retval, sv_errno, didalloc;
	struct strbuf ctlbuf;

	if ((tiptr = _t_checkfd(fd, 0, api_semantics)) == 0)
		return (-1);
	sig_mutex_lock(&tiptr->ti_lock);

	/*
	 * Acquire buffer for use in sending/receiving the message.
	 * Note: assumes (correctly) that ti_ctlsize is large enough
	 * to hold sizeof (struct T_info_req/ack)
	 */
	if (_t_acquire_ctlbuf(tiptr, &ctlbuf, &didalloc) < 0) {
		sv_errno = errno;
		sig_mutex_unlock(&tiptr->ti_lock);
		errno = sv_errno;
		return (-1);
	}

	/* LINTED pointer cast */
	inforeqp =  (struct T_info_req *)ctlbuf.buf;
	inforeqp->PRIM_type = T_INFO_REQ;

	do {
		retval = _t_do_ioctl(fd, ctlbuf.buf,
			(int)sizeof (struct T_info_req), TI_GETINFO, &retlen);
	} while (retval < 0 && errno == EINTR);

	if (retval < 0)
		goto err_out;

	if (retlen != (int)sizeof (struct T_info_ack)) {
		t_errno = TSYSERR;
		errno = EIO;
		goto err_out;
	}

	/* LINTED pointer cast */
	infoackp = (struct T_info_ack *)ctlbuf.buf;

	info->addr = infoackp->ADDR_size;
	info->options = infoackp->OPT_size;
	info->tsdu = infoackp->TSDU_size;
	info->etsdu = infoackp->ETSDU_size;
	info->connect = infoackp->CDATA_size;
	info->discon = infoackp->DDATA_size;
	info->servtype = infoackp->SERV_type;

	if (_T_IS_XTI(api_semantics)) {
		/* XTI ONLY - TLI t_info struct does not have "flags" */
		info->flags = 0;
		if (infoackp->PROVIDER_flag & (SENDZERO|OLD_SENDZERO))
			info->flags |= T_SENDZERO;
	}
	if (didalloc)
		free(ctlbuf.buf);
	else
		tiptr->ti_ctlbuf = ctlbuf.buf;
	sig_mutex_unlock(&tiptr->ti_lock);
	return (0);

err_out:
	sv_errno = errno;
	if (didalloc)
		free(ctlbuf.buf);
	else
		tiptr->ti_ctlbuf = ctlbuf.buf;
	sig_mutex_unlock(&tiptr->ti_lock);
	errno = sv_errno;
	return (-1);
}
Ejemplo n.º 6
0
int
_tx_listen(int fd, struct t_call *call, int api_semantics)
{
	struct strbuf ctlbuf;
	struct strbuf databuf;
	int retval;
	union T_primitives *pptr;
	struct _ti_user *tiptr;
	int sv_errno;
	int didalloc, didralloc;
	int flg = 0;

	if ((tiptr = _t_checkfd(fd, 0, api_semantics)) == NULL)
		return (-1);

	sig_mutex_lock(&tiptr->ti_lock);

	if (tiptr->ti_servtype == T_CLTS) {
		sv_errno = errno;
		t_errno = TNOTSUPPORT;
		sig_mutex_unlock(&tiptr->ti_lock);
		errno = sv_errno;
		return (-1);
	}
	if (_T_IS_XTI(api_semantics)) {
		/*
		 * User level state verification only done for XTI
		 * because doing for TLI may break existing applications
		 */
		if (!(tiptr->ti_state == T_IDLE ||
		    tiptr->ti_state == T_INCON)) {
			t_errno = TOUTSTATE;
			sig_mutex_unlock(&tiptr->ti_lock);
			return (-1);
		}

		if (tiptr->ti_qlen == 0) {
			t_errno = TBADQLEN;
			sig_mutex_unlock(&tiptr->ti_lock);
			return (-1);
		}

		if (tiptr->ti_ocnt == tiptr->ti_qlen) {
			if (!(tiptr->ti_flags & TX_TQFULL_NOTIFIED)) {
				tiptr->ti_flags |= TX_TQFULL_NOTIFIED;
				t_errno = TQFULL;
				sig_mutex_unlock(&tiptr->ti_lock);
				return (-1);
			}
		}

	}

	/*
	 * check if something in look buffer
	 */
	if (tiptr->ti_lookcnt > 0) {
		t_errno = TLOOK;
		sig_mutex_unlock(&tiptr->ti_lock);
		return (-1);
	}

	/*
	 * Acquire ctlbuf for use in sending/receiving control part
	 * of the message.
	 */
	if (_t_acquire_ctlbuf(tiptr, &ctlbuf, &didalloc) < 0) {
		sv_errno = errno;
		sig_mutex_unlock(&tiptr->ti_lock);
		errno = sv_errno;
		return (-1);
	}
	/*
	 * Acquire databuf for use in sending/receiving data part
	 */
	if (_t_acquire_databuf(tiptr, &databuf, &didralloc) < 0) {
		int sv_errno = errno;

		if (didalloc)
			free(ctlbuf.buf);
		else
			tiptr->ti_ctlbuf = ctlbuf.buf;
		sig_mutex_unlock(&tiptr->ti_lock);
		errno = sv_errno;
		return (-1);
	}

	/*
	 * This is a call that may block indefinitely so we drop the
	 * lock and allow signals in MT case here and reacquire it.
	 * Error case should roll back state changes done above
	 * (happens to be no state change here)
	 */
	sig_mutex_unlock(&tiptr->ti_lock);
	if ((retval = getmsg(fd, &ctlbuf, &databuf, &flg)) < 0) {
		if (errno == EAGAIN)
			t_errno = TNODATA;
		else
			t_errno = TSYSERR;
		sv_errno = errno;
		sig_mutex_lock(&tiptr->ti_lock);
		errno = sv_errno;
		goto err_out;
	}
	sig_mutex_lock(&tiptr->ti_lock);

	if (databuf.len == -1) databuf.len = 0;

	/*
	 * did I get entire message?
	 */
	if (retval > 0) {
		t_errno = TSYSERR;
		errno = EIO;
		goto err_out;
	}

	/*
	 * is ctl part large enough to determine type
	 */
	if (ctlbuf.len < (int)sizeof (t_scalar_t)) {
		t_errno = TSYSERR;
		errno = EPROTO;
		goto err_out;
	}

	/* LINTED pointer cast */
	pptr = (union T_primitives *)ctlbuf.buf;

	switch (pptr->type) {

	case T_CONN_IND:
		if ((ctlbuf.len < (int)sizeof (struct T_conn_ind)) ||
		    (ctlbuf.len < (int)(pptr->conn_ind.OPT_length
		    + pptr->conn_ind.OPT_offset))) {
			t_errno = TSYSERR;
			errno = EPROTO;
			goto err_out;
		}
		/*
		 * Change state and increment outstanding connection
		 * indication count and instantiate "sequence" return
		 * parameter.
		 * Note: It is correct semantics accoring to spec to
		 * do this despite possibility of TBUFOVFLW error later.
		 * The spec treats TBUFOVFLW error in general as a special case
		 * which can be ignored by applications that do not
		 * really need the stuff returned in 'netbuf' structures.
		 */
		_T_TX_NEXTSTATE(T_LISTN, tiptr,
				"t_listen:invalid state event T_LISTN");
		tiptr->ti_ocnt++;
		call->sequence = pptr->conn_ind.SEQ_number;

		if (_T_IS_TLI(api_semantics) || call->addr.maxlen > 0) {
			if (TLEN_GT_NLEN(pptr->conn_ind.SRC_length,
			    call->addr.maxlen)) {
				t_errno = TBUFOVFLW;
				goto err_out;
			}
			(void) memcpy(call->addr.buf, ctlbuf.buf +
			    (size_t)pptr->conn_ind.SRC_offset,
			(unsigned int)pptr->conn_ind.SRC_length);
			call->addr.len = pptr->conn_ind.SRC_length;
		}
		if (_T_IS_TLI(api_semantics) || call->opt.maxlen > 0) {
			if (TLEN_GT_NLEN(pptr->conn_ind.OPT_length,
			    call->opt.maxlen)) {
				t_errno = TBUFOVFLW;
				goto err_out;
			}
			(void) memcpy(call->opt.buf, ctlbuf.buf +
			    pptr->conn_ind.OPT_offset,
			    (size_t)pptr->conn_ind.OPT_length);
			call->opt.len = pptr->conn_ind.OPT_length;
		}
		if (_T_IS_TLI(api_semantics) || call->udata.maxlen > 0) {
			if (databuf.len > (int)call->udata.maxlen) {
				t_errno = TBUFOVFLW;
				goto err_out;
			}
			(void) memcpy(call->udata.buf, databuf.buf,
			    (size_t)databuf.len);
			call->udata.len = databuf.len;
		}

		if (didalloc)
			free(ctlbuf.buf);
		else
			tiptr->ti_ctlbuf = ctlbuf.buf;
		if (didralloc)
			free(databuf.buf);
		else
			tiptr->ti_rcvbuf = databuf.buf;
		sig_mutex_unlock(&tiptr->ti_lock);
		return (0);

	case T_DISCON_IND:
		/*
		 * Append to the events in the "look buffer"
		 * list of events. This routine may defer signals.
		 */
		if (_t_register_lookevent(tiptr, databuf.buf,
					databuf.len, ctlbuf.buf,
					ctlbuf.len) < 0) {
			t_errno = TSYSERR;
			errno = ENOMEM;
			goto err_out;
		}
		t_errno = TLOOK;
		goto err_out;

	default:
		break;
	}

	t_errno = TSYSERR;
	errno = EPROTO;
err_out:
	sv_errno = errno;

	if (didalloc)
		free(ctlbuf.buf);
	else
		tiptr->ti_ctlbuf = ctlbuf.buf;
	if (didralloc)
		free(databuf.buf);
	else
		tiptr->ti_rcvbuf = databuf.buf;
	sig_mutex_unlock(&tiptr->ti_lock);
	errno = sv_errno;
	return (-1);
}