int _tx_bind( int fd, const struct t_bind *req, struct t_bind *ret, int api_semantics ) { struct T_bind_req *bind_reqp; struct T_bind_ack *bind_ackp; int size, sv_errno, retlen; struct _ti_user *tiptr; sigset_t mask; int didalloc; int use_xpg41tpi; struct strbuf ctlbuf; if ((tiptr = _t_checkfd(fd, 0, api_semantics)) == NULL) return (-1); /* * We block all signals since TI_BIND, which sends a TPI message * O_T_BIND_REQ down, is not an idempotetent operation * Note that sig_mutex_lock() only defers signals, it does not * block them, so interruptible syscalls could still get EINTR. */ (void) thr_sigsetmask(SIG_SETMASK, &fillset, &mask); sig_mutex_lock(&tiptr->ti_lock); if (_T_IS_XTI(api_semantics)) { /* * User level state verification only done for XTI * because doing for TLI may break existing applications */ if (tiptr->ti_state != T_UNBND) { t_errno = TOUTSTATE; sig_mutex_unlock(&tiptr->ti_lock); (void) thr_sigsetmask(SIG_SETMASK, &mask, NULL); return (-1); } } /* * Acquire buffer for use in sending/receiving the message. * Note: assumes (correctly) that ti_ctlsize is large enough * to hold sizeof (struct T_bind_req/ack) */ if (_t_acquire_ctlbuf(tiptr, &ctlbuf, &didalloc) < 0) { sv_errno = errno; sig_mutex_unlock(&tiptr->ti_lock); (void) thr_sigsetmask(SIG_SETMASK, &mask, NULL); errno = sv_errno; return (-1); } /* LINTED pointer cast */ bind_reqp = (struct T_bind_req *)ctlbuf.buf; size = (int)sizeof (struct T_bind_req); use_xpg41tpi = (_T_IS_XTI(api_semantics)) && ((tiptr->ti_prov_flag & XPG4_1) != 0); if (use_xpg41tpi) /* XTI call and provider knows the XTI inspired TPI */ bind_reqp->PRIM_type = T_BIND_REQ; else /* TLI caller old TPI provider */ bind_reqp->PRIM_type = O_T_BIND_REQ; bind_reqp->ADDR_length = (req == NULL? 0: req->addr.len); bind_reqp->ADDR_offset = 0; bind_reqp->CONIND_number = (req == NULL? 0: req->qlen); if (bind_reqp->ADDR_length) { if (_t_aligned_copy(&ctlbuf, (int)bind_reqp->ADDR_length, size, req->addr.buf, &bind_reqp->ADDR_offset) < 0) { /* * Aligned copy will overflow buffer allocated based * on transport maximum address length. * return error. */ t_errno = TBADADDR; goto err_out; } size = bind_reqp->ADDR_offset + bind_reqp->ADDR_length; } if (_t_do_ioctl(fd, ctlbuf.buf, size, TI_BIND, &retlen) < 0) { goto err_out; } if (retlen < (int)sizeof (struct T_bind_ack)) { t_errno = TSYSERR; errno = EIO; goto err_out; } /* LINTED pointer cast */ bind_ackp = (struct T_bind_ack *)ctlbuf.buf; if ((req != NULL) && req->addr.len != 0 && (use_xpg41tpi == 0) && (_T_IS_XTI(api_semantics))) { /* * Best effort to do XTI on old TPI. * * Match address requested or unbind and fail with * TADDRBUSY. * * XXX - Hack alert ! Should we do this at all ? * Not "supported" as may not work if encoding of * address is different in the returned address. This * will also have trouble with TCP/UDP wildcard port * requests */ if ((req->addr.len != bind_ackp->ADDR_length) || (memcmp(req->addr.buf, ctlbuf.buf + bind_ackp->ADDR_offset, req->addr.len) != 0)) { (void) _tx_unbind_locked(fd, tiptr, &ctlbuf); t_errno = TADDRBUSY; goto err_out; } } tiptr->ti_ocnt = 0; tiptr->ti_flags &= ~TX_TQFULL_NOTIFIED; _T_TX_NEXTSTATE(T_BIND, tiptr, "t_bind: invalid state event T_BIND"); if (ret != NULL) { if (_T_IS_TLI(api_semantics) || ret->addr.maxlen > 0) { if (TLEN_GT_NLEN(bind_reqp->ADDR_length, ret->addr.maxlen)) { t_errno = TBUFOVFLW; goto err_out; } (void) memcpy(ret->addr.buf, ctlbuf.buf + bind_ackp->ADDR_offset, (size_t)bind_ackp->ADDR_length); ret->addr.len = bind_ackp->ADDR_length; } ret->qlen = bind_ackp->CONIND_number; } tiptr->ti_qlen = (uint_t)bind_ackp->CONIND_number; if (didalloc) free(ctlbuf.buf); else tiptr->ti_ctlbuf = ctlbuf.buf; sig_mutex_unlock(&tiptr->ti_lock); (void) thr_sigsetmask(SIG_SETMASK, &mask, NULL); return (0); /* NOTREACHED */ err_out: sv_errno = errno; if (didalloc) free(ctlbuf.buf); else tiptr->ti_ctlbuf = ctlbuf.buf; sig_mutex_unlock(&tiptr->ti_lock); (void) thr_sigsetmask(SIG_SETMASK, &mask, NULL); errno = sv_errno; return (-1); }
int _tx_rcvuderr(int fd, struct t_uderr *uderr, int api_semantics) { struct strbuf ctlbuf, databuf; int flg; int retval; union T_primitives *pptr; struct _ti_user *tiptr; int sv_errno; int didalloc; int use_lookbufs = 0; if ((tiptr = _t_checkfd(fd, 0, api_semantics)) == NULL) return (-1); sig_mutex_lock(&tiptr->ti_lock); if (tiptr->ti_servtype != T_CLTS) { t_errno = TNOTSUPPORT; sig_mutex_unlock(&tiptr->ti_lock); return (-1); } /* * is there a unitdata error indication in look buffer */ if (tiptr->ti_lookcnt > 0) { ctlbuf.len = tiptr->ti_lookbufs.tl_lookclen; ctlbuf.buf = tiptr->ti_lookbufs.tl_lookcbuf; /* Note: cltbuf.maxlen not used in this case */ /* LINTED pointer cast */ assert(((union T_primitives *)ctlbuf.buf)->type == T_UDERROR_IND); databuf.maxlen = 0; databuf.len = 0; databuf.buf = NULL; use_lookbufs = 1; } else { if ((retval = _t_look_locked(fd, tiptr, 0, api_semantics)) < 0) { sv_errno = errno; sig_mutex_unlock(&tiptr->ti_lock); errno = sv_errno; return (-1); } if (retval != T_UDERR) { t_errno = TNOUDERR; sig_mutex_unlock(&tiptr->ti_lock); return (-1); } /* * Acquire ctlbuf for use in sending/receiving control part * of the message. */ if (_t_acquire_ctlbuf(tiptr, &ctlbuf, &didalloc) < 0) { sv_errno = errno; sig_mutex_unlock(&tiptr->ti_lock); errno = sv_errno; return (-1); } databuf.maxlen = 0; databuf.len = 0; databuf.buf = NULL; flg = 0; /* * Since we already verified that a unitdata error * indication is pending, we assume that this getmsg() * will not block indefinitely. */ if ((retval = getmsg(fd, &ctlbuf, &databuf, &flg)) < 0) { t_errno = TSYSERR; goto err_out; } /* * did I get entire message? */ if (retval > 0) { t_errno = TSYSERR; errno = EIO; goto err_out; } } /* LINTED pointer cast */ pptr = (union T_primitives *)ctlbuf.buf; if ((ctlbuf.len < (int)sizeof (struct T_uderror_ind)) || (pptr->type != T_UDERROR_IND)) { t_errno = TSYSERR; errno = EPROTO; goto err_out; } if (uderr) { if (_T_IS_TLI(api_semantics) || uderr->addr.maxlen > 0) { if (TLEN_GT_NLEN(pptr->uderror_ind.DEST_length, uderr->addr.maxlen)) { t_errno = TBUFOVFLW; goto err_out; } (void) memcpy(uderr->addr.buf, ctlbuf.buf + pptr->uderror_ind.DEST_offset, (size_t)pptr->uderror_ind.DEST_length); uderr->addr.len = (unsigned int)pptr->uderror_ind.DEST_length; } if (_T_IS_TLI(api_semantics) || uderr->addr.maxlen > 0) { if (TLEN_GT_NLEN(pptr->uderror_ind.OPT_length, uderr->opt.maxlen)) { t_errno = TBUFOVFLW; goto err_out; } (void) memcpy(uderr->opt.buf, ctlbuf.buf + pptr->uderror_ind.OPT_offset, (size_t)pptr->uderror_ind.OPT_length); uderr->opt.len = (unsigned int)pptr->uderror_ind.OPT_length; } uderr->error = pptr->uderror_ind.ERROR_type; } _T_TX_NEXTSTATE(T_RCVUDERR, tiptr, "t_rcvuderr: invalid state event T_RCVUDERR"); if (use_lookbufs) _t_free_looklist_head(tiptr); else { if (didalloc) free(ctlbuf.buf); else tiptr->ti_ctlbuf = ctlbuf.buf; } sig_mutex_unlock(&tiptr->ti_lock); return (0); err_out: sv_errno = errno; if (use_lookbufs) _t_free_looklist_head(tiptr); else { if (didalloc) free(ctlbuf.buf); else tiptr->ti_ctlbuf = ctlbuf.buf; } sig_mutex_unlock(&tiptr->ti_lock); errno = sv_errno; return (-1); }
int _tx_listen(int fd, struct t_call *call, int api_semantics) { struct strbuf ctlbuf; struct strbuf databuf; int retval; union T_primitives *pptr; struct _ti_user *tiptr; int sv_errno; int didalloc, didralloc; int flg = 0; if ((tiptr = _t_checkfd(fd, 0, api_semantics)) == NULL) return (-1); sig_mutex_lock(&tiptr->ti_lock); if (tiptr->ti_servtype == T_CLTS) { sv_errno = errno; t_errno = TNOTSUPPORT; sig_mutex_unlock(&tiptr->ti_lock); errno = sv_errno; return (-1); } if (_T_IS_XTI(api_semantics)) { /* * User level state verification only done for XTI * because doing for TLI may break existing applications */ if (!(tiptr->ti_state == T_IDLE || tiptr->ti_state == T_INCON)) { t_errno = TOUTSTATE; sig_mutex_unlock(&tiptr->ti_lock); return (-1); } if (tiptr->ti_qlen == 0) { t_errno = TBADQLEN; sig_mutex_unlock(&tiptr->ti_lock); return (-1); } if (tiptr->ti_ocnt == tiptr->ti_qlen) { if (!(tiptr->ti_flags & TX_TQFULL_NOTIFIED)) { tiptr->ti_flags |= TX_TQFULL_NOTIFIED; t_errno = TQFULL; sig_mutex_unlock(&tiptr->ti_lock); return (-1); } } } /* * check if something in look buffer */ if (tiptr->ti_lookcnt > 0) { t_errno = TLOOK; sig_mutex_unlock(&tiptr->ti_lock); return (-1); } /* * Acquire ctlbuf for use in sending/receiving control part * of the message. */ if (_t_acquire_ctlbuf(tiptr, &ctlbuf, &didalloc) < 0) { sv_errno = errno; sig_mutex_unlock(&tiptr->ti_lock); errno = sv_errno; return (-1); } /* * Acquire databuf for use in sending/receiving data part */ if (_t_acquire_databuf(tiptr, &databuf, &didralloc) < 0) { int sv_errno = errno; if (didalloc) free(ctlbuf.buf); else tiptr->ti_ctlbuf = ctlbuf.buf; sig_mutex_unlock(&tiptr->ti_lock); errno = sv_errno; return (-1); } /* * This is a call that may block indefinitely so we drop the * lock and allow signals in MT case here and reacquire it. * Error case should roll back state changes done above * (happens to be no state change here) */ sig_mutex_unlock(&tiptr->ti_lock); if ((retval = getmsg(fd, &ctlbuf, &databuf, &flg)) < 0) { if (errno == EAGAIN) t_errno = TNODATA; else t_errno = TSYSERR; sv_errno = errno; sig_mutex_lock(&tiptr->ti_lock); errno = sv_errno; goto err_out; } sig_mutex_lock(&tiptr->ti_lock); if (databuf.len == -1) databuf.len = 0; /* * did I get entire message? */ if (retval > 0) { t_errno = TSYSERR; errno = EIO; goto err_out; } /* * is ctl part large enough to determine type */ if (ctlbuf.len < (int)sizeof (t_scalar_t)) { t_errno = TSYSERR; errno = EPROTO; goto err_out; } /* LINTED pointer cast */ pptr = (union T_primitives *)ctlbuf.buf; switch (pptr->type) { case T_CONN_IND: if ((ctlbuf.len < (int)sizeof (struct T_conn_ind)) || (ctlbuf.len < (int)(pptr->conn_ind.OPT_length + pptr->conn_ind.OPT_offset))) { t_errno = TSYSERR; errno = EPROTO; goto err_out; } /* * Change state and increment outstanding connection * indication count and instantiate "sequence" return * parameter. * Note: It is correct semantics accoring to spec to * do this despite possibility of TBUFOVFLW error later. * The spec treats TBUFOVFLW error in general as a special case * which can be ignored by applications that do not * really need the stuff returned in 'netbuf' structures. */ _T_TX_NEXTSTATE(T_LISTN, tiptr, "t_listen:invalid state event T_LISTN"); tiptr->ti_ocnt++; call->sequence = pptr->conn_ind.SEQ_number; if (_T_IS_TLI(api_semantics) || call->addr.maxlen > 0) { if (TLEN_GT_NLEN(pptr->conn_ind.SRC_length, call->addr.maxlen)) { t_errno = TBUFOVFLW; goto err_out; } (void) memcpy(call->addr.buf, ctlbuf.buf + (size_t)pptr->conn_ind.SRC_offset, (unsigned int)pptr->conn_ind.SRC_length); call->addr.len = pptr->conn_ind.SRC_length; } if (_T_IS_TLI(api_semantics) || call->opt.maxlen > 0) { if (TLEN_GT_NLEN(pptr->conn_ind.OPT_length, call->opt.maxlen)) { t_errno = TBUFOVFLW; goto err_out; } (void) memcpy(call->opt.buf, ctlbuf.buf + pptr->conn_ind.OPT_offset, (size_t)pptr->conn_ind.OPT_length); call->opt.len = pptr->conn_ind.OPT_length; } if (_T_IS_TLI(api_semantics) || call->udata.maxlen > 0) { if (databuf.len > (int)call->udata.maxlen) { t_errno = TBUFOVFLW; goto err_out; } (void) memcpy(call->udata.buf, databuf.buf, (size_t)databuf.len); call->udata.len = databuf.len; } if (didalloc) free(ctlbuf.buf); else tiptr->ti_ctlbuf = ctlbuf.buf; if (didralloc) free(databuf.buf); else tiptr->ti_rcvbuf = databuf.buf; sig_mutex_unlock(&tiptr->ti_lock); return (0); case T_DISCON_IND: /* * Append to the events in the "look buffer" * list of events. This routine may defer signals. */ if (_t_register_lookevent(tiptr, databuf.buf, databuf.len, ctlbuf.buf, ctlbuf.len) < 0) { t_errno = TSYSERR; errno = ENOMEM; goto err_out; } t_errno = TLOOK; goto err_out; default: break; } t_errno = TSYSERR; errno = EPROTO; err_out: sv_errno = errno; if (didalloc) free(ctlbuf.buf); else tiptr->ti_ctlbuf = ctlbuf.buf; if (didralloc) free(databuf.buf); else tiptr->ti_rcvbuf = databuf.buf; sig_mutex_unlock(&tiptr->ti_lock); errno = sv_errno; return (-1); }