int _tx_rcvuderr(int fd, struct t_uderr *uderr, int api_semantics) { struct strbuf ctlbuf, databuf; int flg; int retval; union T_primitives *pptr; struct _ti_user *tiptr; int sv_errno; int didalloc; int use_lookbufs = 0; if ((tiptr = _t_checkfd(fd, 0, api_semantics)) == NULL) return (-1); sig_mutex_lock(&tiptr->ti_lock); if (tiptr->ti_servtype != T_CLTS) { t_errno = TNOTSUPPORT; sig_mutex_unlock(&tiptr->ti_lock); return (-1); } /* * is there a unitdata error indication in look buffer */ if (tiptr->ti_lookcnt > 0) { ctlbuf.len = tiptr->ti_lookbufs.tl_lookclen; ctlbuf.buf = tiptr->ti_lookbufs.tl_lookcbuf; /* Note: cltbuf.maxlen not used in this case */ /* LINTED pointer cast */ assert(((union T_primitives *)ctlbuf.buf)->type == T_UDERROR_IND); databuf.maxlen = 0; databuf.len = 0; databuf.buf = NULL; use_lookbufs = 1; } else { if ((retval = _t_look_locked(fd, tiptr, 0, api_semantics)) < 0) { sv_errno = errno; sig_mutex_unlock(&tiptr->ti_lock); errno = sv_errno; return (-1); } if (retval != T_UDERR) { t_errno = TNOUDERR; sig_mutex_unlock(&tiptr->ti_lock); return (-1); } /* * Acquire ctlbuf for use in sending/receiving control part * of the message. */ if (_t_acquire_ctlbuf(tiptr, &ctlbuf, &didalloc) < 0) { sv_errno = errno; sig_mutex_unlock(&tiptr->ti_lock); errno = sv_errno; return (-1); } databuf.maxlen = 0; databuf.len = 0; databuf.buf = NULL; flg = 0; /* * Since we already verified that a unitdata error * indication is pending, we assume that this getmsg() * will not block indefinitely. */ if ((retval = getmsg(fd, &ctlbuf, &databuf, &flg)) < 0) { t_errno = TSYSERR; goto err_out; } /* * did I get entire message? */ if (retval > 0) { t_errno = TSYSERR; errno = EIO; goto err_out; } } /* LINTED pointer cast */ pptr = (union T_primitives *)ctlbuf.buf; if ((ctlbuf.len < (int)sizeof (struct T_uderror_ind)) || (pptr->type != T_UDERROR_IND)) { t_errno = TSYSERR; errno = EPROTO; goto err_out; } if (uderr) { if (_T_IS_TLI(api_semantics) || uderr->addr.maxlen > 0) { if (TLEN_GT_NLEN(pptr->uderror_ind.DEST_length, uderr->addr.maxlen)) { t_errno = TBUFOVFLW; goto err_out; } (void) memcpy(uderr->addr.buf, ctlbuf.buf + pptr->uderror_ind.DEST_offset, (size_t)pptr->uderror_ind.DEST_length); uderr->addr.len = (unsigned int)pptr->uderror_ind.DEST_length; } if (_T_IS_TLI(api_semantics) || uderr->addr.maxlen > 0) { if (TLEN_GT_NLEN(pptr->uderror_ind.OPT_length, uderr->opt.maxlen)) { t_errno = TBUFOVFLW; goto err_out; } (void) memcpy(uderr->opt.buf, ctlbuf.buf + pptr->uderror_ind.OPT_offset, (size_t)pptr->uderror_ind.OPT_length); uderr->opt.len = (unsigned int)pptr->uderror_ind.OPT_length; } uderr->error = pptr->uderror_ind.ERROR_type; } _T_TX_NEXTSTATE(T_RCVUDERR, tiptr, "t_rcvuderr: invalid state event T_RCVUDERR"); if (use_lookbufs) _t_free_looklist_head(tiptr); else { if (didalloc) free(ctlbuf.buf); else tiptr->ti_ctlbuf = ctlbuf.buf; } sig_mutex_unlock(&tiptr->ti_lock); return (0); err_out: sv_errno = errno; if (use_lookbufs) _t_free_looklist_head(tiptr); else { if (didalloc) free(ctlbuf.buf); else tiptr->ti_ctlbuf = ctlbuf.buf; } sig_mutex_unlock(&tiptr->ti_lock); errno = sv_errno; return (-1); }
int _tx_bind( int fd, const struct t_bind *req, struct t_bind *ret, int api_semantics ) { struct T_bind_req *bind_reqp; struct T_bind_ack *bind_ackp; int size, sv_errno, retlen; struct _ti_user *tiptr; sigset_t mask; int didalloc; int use_xpg41tpi; struct strbuf ctlbuf; if ((tiptr = _t_checkfd(fd, 0, api_semantics)) == NULL) return (-1); /* * We block all signals since TI_BIND, which sends a TPI message * O_T_BIND_REQ down, is not an idempotetent operation * Note that sig_mutex_lock() only defers signals, it does not * block them, so interruptible syscalls could still get EINTR. */ (void) thr_sigsetmask(SIG_SETMASK, &fillset, &mask); sig_mutex_lock(&tiptr->ti_lock); if (_T_IS_XTI(api_semantics)) { /* * User level state verification only done for XTI * because doing for TLI may break existing applications */ if (tiptr->ti_state != T_UNBND) { t_errno = TOUTSTATE; sig_mutex_unlock(&tiptr->ti_lock); (void) thr_sigsetmask(SIG_SETMASK, &mask, NULL); return (-1); } } /* * Acquire buffer for use in sending/receiving the message. * Note: assumes (correctly) that ti_ctlsize is large enough * to hold sizeof (struct T_bind_req/ack) */ if (_t_acquire_ctlbuf(tiptr, &ctlbuf, &didalloc) < 0) { sv_errno = errno; sig_mutex_unlock(&tiptr->ti_lock); (void) thr_sigsetmask(SIG_SETMASK, &mask, NULL); errno = sv_errno; return (-1); } /* LINTED pointer cast */ bind_reqp = (struct T_bind_req *)ctlbuf.buf; size = (int)sizeof (struct T_bind_req); use_xpg41tpi = (_T_IS_XTI(api_semantics)) && ((tiptr->ti_prov_flag & XPG4_1) != 0); if (use_xpg41tpi) /* XTI call and provider knows the XTI inspired TPI */ bind_reqp->PRIM_type = T_BIND_REQ; else /* TLI caller old TPI provider */ bind_reqp->PRIM_type = O_T_BIND_REQ; bind_reqp->ADDR_length = (req == NULL? 0: req->addr.len); bind_reqp->ADDR_offset = 0; bind_reqp->CONIND_number = (req == NULL? 0: req->qlen); if (bind_reqp->ADDR_length) { if (_t_aligned_copy(&ctlbuf, (int)bind_reqp->ADDR_length, size, req->addr.buf, &bind_reqp->ADDR_offset) < 0) { /* * Aligned copy will overflow buffer allocated based * on transport maximum address length. * return error. */ t_errno = TBADADDR; goto err_out; } size = bind_reqp->ADDR_offset + bind_reqp->ADDR_length; } if (_t_do_ioctl(fd, ctlbuf.buf, size, TI_BIND, &retlen) < 0) { goto err_out; } if (retlen < (int)sizeof (struct T_bind_ack)) { t_errno = TSYSERR; errno = EIO; goto err_out; } /* LINTED pointer cast */ bind_ackp = (struct T_bind_ack *)ctlbuf.buf; if ((req != NULL) && req->addr.len != 0 && (use_xpg41tpi == 0) && (_T_IS_XTI(api_semantics))) { /* * Best effort to do XTI on old TPI. * * Match address requested or unbind and fail with * TADDRBUSY. * * XXX - Hack alert ! Should we do this at all ? * Not "supported" as may not work if encoding of * address is different in the returned address. This * will also have trouble with TCP/UDP wildcard port * requests */ if ((req->addr.len != bind_ackp->ADDR_length) || (memcmp(req->addr.buf, ctlbuf.buf + bind_ackp->ADDR_offset, req->addr.len) != 0)) { (void) _tx_unbind_locked(fd, tiptr, &ctlbuf); t_errno = TADDRBUSY; goto err_out; } } tiptr->ti_ocnt = 0; tiptr->ti_flags &= ~TX_TQFULL_NOTIFIED; _T_TX_NEXTSTATE(T_BIND, tiptr, "t_bind: invalid state event T_BIND"); if (ret != NULL) { if (_T_IS_TLI(api_semantics) || ret->addr.maxlen > 0) { if (TLEN_GT_NLEN(bind_reqp->ADDR_length, ret->addr.maxlen)) { t_errno = TBUFOVFLW; goto err_out; } (void) memcpy(ret->addr.buf, ctlbuf.buf + bind_ackp->ADDR_offset, (size_t)bind_ackp->ADDR_length); ret->addr.len = bind_ackp->ADDR_length; } ret->qlen = bind_ackp->CONIND_number; } tiptr->ti_qlen = (uint_t)bind_ackp->CONIND_number; if (didalloc) free(ctlbuf.buf); else tiptr->ti_ctlbuf = ctlbuf.buf; sig_mutex_unlock(&tiptr->ti_lock); (void) thr_sigsetmask(SIG_SETMASK, &mask, NULL); return (0); /* NOTREACHED */ err_out: sv_errno = errno; if (didalloc) free(ctlbuf.buf); else tiptr->ti_ctlbuf = ctlbuf.buf; sig_mutex_unlock(&tiptr->ti_lock); (void) thr_sigsetmask(SIG_SETMASK, &mask, NULL); errno = sv_errno; return (-1); }
int _tx_rcvdis(int fd, struct t_discon *discon, int api_semantics) { struct strbuf ctlbuf; struct strbuf databuf; int retval; union T_primitives *pptr; struct _ti_user *tiptr; int sv_errno; int flg = 0; int didalloc, didralloc; int use_lookbufs = 0; if ((tiptr = _t_checkfd(fd, 0, api_semantics)) == NULL) return (-1); /* * Acquire per thread lock. * Note: Lock is held across most of this routine * including the blocking getmsg() call. This is fine * because it is first verfied that an event is pending */ sig_mutex_lock(&tiptr->ti_lock); if (tiptr->ti_servtype == T_CLTS) { t_errno = TNOTSUPPORT; sig_mutex_unlock(&tiptr->ti_lock); return (-1); } if (_T_IS_XTI(api_semantics)) { /* * User level state verification only done for XTI * because doing for TLI may break existing applications */ if (!(tiptr->ti_state == T_DATAXFER || tiptr->ti_state == T_OUTCON || tiptr->ti_state == T_OUTREL || tiptr->ti_state == T_INREL || (tiptr->ti_state == T_INCON && tiptr->ti_ocnt > 0))) { t_errno = TOUTSTATE; sig_mutex_unlock(&tiptr->ti_lock); return (-1); } } /* * Handle likely scenario as special case: * Is there a discon in look buffer as the first * event in the lookbuffer, is so just get it. */ if ((tiptr->ti_lookcnt > 0) && /* LINTED pointer cast */ (*((t_scalar_t *)tiptr->ti_lookbufs.tl_lookcbuf) == T_DISCON_IND)) { /* * The T_DISCON_IND is already in the look buffer */ ctlbuf.len = tiptr->ti_lookbufs.tl_lookclen; ctlbuf.buf = tiptr->ti_lookbufs.tl_lookcbuf; /* Note: ctlbuf.maxlen not used in this case */ databuf.len = tiptr->ti_lookbufs.tl_lookdlen; databuf.buf = tiptr->ti_lookbufs.tl_lookdbuf; /* Note databuf.maxlen not used in this case */ use_lookbufs = 1; } else { if ((retval = _t_look_locked(fd, tiptr, 0, api_semantics)) < 0) { sv_errno = errno; sig_mutex_unlock(&tiptr->ti_lock); errno = sv_errno; return (-1); } if (retval != T_DISCONNECT) { t_errno = TNODIS; sig_mutex_unlock(&tiptr->ti_lock); return (-1); } /* * get disconnect off read queue. * use ctl and rcv buffers * * Acquire ctlbuf for use in sending/receiving control part * of the message. */ if (_t_acquire_ctlbuf(tiptr, &ctlbuf, &didalloc) < 0) { sv_errno = errno; sig_mutex_unlock(&tiptr->ti_lock); errno = sv_errno; return (-1); } /* * Acquire databuf for use in sending/receiving data part */ if (_t_acquire_databuf(tiptr, &databuf, &didralloc) < 0) { sv_errno = errno; if (didalloc) free(ctlbuf.buf); else tiptr->ti_ctlbuf = ctlbuf.buf; sig_mutex_unlock(&tiptr->ti_lock); errno = sv_errno; return (-1); } /* * Since we already verified that a disconnect event * is present, we assume that this getmsg() cannot * block indefinitely */ do { retval = getmsg(fd, &ctlbuf, &databuf, &flg); } while (retval < 0 && errno == EINTR); if (retval < 0) { t_errno = TSYSERR; goto err_out; } if (databuf.len == -1) databuf.len = 0; /* * did I get entire message? */ if (retval > 0) { t_errno = TSYSERR; errno = EIO; goto err_out; } } /* LINTED pointer cast */ pptr = (union T_primitives *)ctlbuf.buf; if ((ctlbuf.len < (int)sizeof (struct T_discon_ind)) || (pptr->type != T_DISCON_IND)) { t_errno = TSYSERR; errno = EPROTO; goto err_out; } /* * clear more and expedited flags */ tiptr->ti_flags &= ~(MORE | EXPEDITED); if (tiptr->ti_ocnt <= 0) { _T_TX_NEXTSTATE(T_RCVDIS1, tiptr, "t_rcvdis: invalid state event T_RCVDIS1"); } else { if (tiptr->ti_ocnt == 1) { _T_TX_NEXTSTATE(T_RCVDIS2, tiptr, "t_rcvdis: invalid state event T_RCVDIS2"); } else { _T_TX_NEXTSTATE(T_RCVDIS3, tiptr, "t_rcvdis: invalid state event T_RCVDIS3"); } tiptr->ti_ocnt--; tiptr->ti_flags &= ~TX_TQFULL_NOTIFIED; } if (discon != NULL) { if (_T_IS_TLI(api_semantics) || discon->udata.maxlen > 0) { if (databuf.len > (int)discon->udata.maxlen) { t_errno = TBUFOVFLW; goto err_out; } (void) memcpy(discon->udata.buf, databuf.buf, (size_t)databuf.len); discon->udata.len = databuf.len; } discon->reason = pptr->discon_ind.DISCON_reason; discon->sequence = pptr->discon_ind.SEQ_number; } if (use_lookbufs) _t_free_looklist_head(tiptr); else { if (didalloc) free(ctlbuf.buf); else tiptr->ti_ctlbuf = ctlbuf.buf; if (didralloc) free(databuf.buf); else tiptr->ti_rcvbuf = databuf.buf; } sig_mutex_unlock(&tiptr->ti_lock); return (0); err_out: sv_errno = errno; if (use_lookbufs) _t_free_looklist_head(tiptr); else { if (didalloc) free(ctlbuf.buf); else tiptr->ti_ctlbuf = ctlbuf.buf; if (didralloc) free(databuf.buf); else tiptr->ti_rcvbuf = databuf.buf; } sig_mutex_unlock(&tiptr->ti_lock); errno = sv_errno; return (-1); }
int _tx_rcvv(int fd, struct t_iovec *tiov, unsigned int tiovcount, int *flags, int api_semantics) { struct strbuf ctlbuf, databuf; int retval, flg = 0; int msglen; union T_primitives *pptr; struct _ti_user *tiptr; int sv_errno; int didalloc; unsigned int nbytes; char *dataptr; if ((tiptr = _t_checkfd(fd, 0, api_semantics)) == NULL) return (-1); sig_mutex_lock(&tiptr->ti_lock); if (tiptr->ti_servtype == T_CLTS) { t_errno = TNOTSUPPORT; sig_mutex_unlock(&tiptr->ti_lock); return (-1); } assert(api_semantics == TX_XTI_XNS5_API); if (tiovcount == 0 || tiovcount > T_IOV_MAX) { t_errno = TBADDATA; sig_mutex_unlock(&tiptr->ti_lock); return (-1); } if (!(tiptr->ti_state == T_DATAXFER || tiptr->ti_state == T_OUTREL)) { t_errno = TOUTSTATE; sig_mutex_unlock(&tiptr->ti_lock); return (-1); } /* * Check in lookbuf for stuff */ if (tiptr->ti_lookcnt > 0) { /* * Implied preference rules give priority to * T_DISCON_IND over T_ORDREL_IND. Also certain errors like * data received after T_ORDREL_IND or a duplicate T_ORDREL_IND * after a T_ORDRELING have priority over TLOOK. * This manifests in following code behavior. * * (1) If something in lookbuf then check * the stream head also. This may result * in retuning a TLOOK error but only if there are * - message at stream head but look buffer * has a T_DISCON_IND event. * - no messages are on the stream head * * (2) If there are messages on the stream head and * all of them are T_ORDREL_IND(i.e. no message in * look buffer is T_DISCON_IND), there * could be data on stream head to be picked up and * we work on the stream head and not return TLOOK. * We remove the event on the stream head and queue it. * */ do { retval = ioctl(fd, I_NREAD, &msglen); } while (retval < 0 && errno == EINTR); if (retval < 0) { sv_errno = errno; t_errno = TSYSERR; sig_mutex_unlock(&tiptr->ti_lock); errno = sv_errno; return (-1); } if (retval > 0) { /* * If any T_DISCON_IND event in look buffer * list then return TLOOK. Else continue * processing as what could be on the stream * head might be a possible T_DISCON_IND (which * would have priority over the T_ORDREL_INDs * on the look buffer.) */ struct _ti_lookbufs *tlbs; tlbs = &tiptr->ti_lookbufs; do { /* LINTED pointer cast */ if (*((t_scalar_t *)tlbs->tl_lookcbuf) == T_DISCON_IND) { t_errno = TLOOK; sig_mutex_unlock(&tiptr->ti_lock); return (-1); } } while ((tlbs = tlbs->tl_next) != NULL); } else { /* retval == 0 */ /* * Nothing on stream head so whatever in * look buffer has nothing that might override * it. */ t_errno = TLOOK; sig_mutex_unlock(&tiptr->ti_lock); return (-1); } } /* * Acquire ctlbuf for use in sending/receiving control part * of the message. */ if (_t_acquire_ctlbuf(tiptr, &ctlbuf, &didalloc) < 0) { sv_errno = errno; sig_mutex_unlock(&tiptr->ti_lock); errno = sv_errno; return (-1); } nbytes = _t_bytecount_upto_intmax(tiov, tiovcount); dataptr = NULL; if (nbytes != 0 && ((dataptr = malloc(nbytes)) == NULL)) { sv_errno = errno; t_errno = TSYSERR; if (didalloc) free(ctlbuf.buf); else tiptr->ti_ctlbuf = ctlbuf.buf; sig_mutex_unlock(&tiptr->ti_lock); errno = sv_errno; return (-1); } databuf.maxlen = (int)nbytes; databuf.len = 0; databuf.buf = dataptr; *flags = 0; /* * This is a call that may block indefinitely so we drop the * lock and allow signals in MT case here and reacquire it. * Error case should roll back state changes done above * (happens to be no state change here) */ sig_mutex_unlock(&tiptr->ti_lock); if ((retval = getmsg(fd, &ctlbuf, &databuf, &flg)) < 0) { if (errno == EAGAIN) t_errno = TNODATA; else t_errno = TSYSERR; sv_errno = errno; sig_mutex_lock(&tiptr->ti_lock); errno = sv_errno; goto err_out; } sig_mutex_lock(&tiptr->ti_lock); assert((retval & MORECTL) == 0); /* MORECTL should not be on */ if (databuf.len == -1) databuf.len = 0; if (ctlbuf.len > 0) { if (ctlbuf.len < (int)sizeof (t_scalar_t)) { t_errno = TSYSERR; errno = EPROTO; goto err_out; } /* LINTED pointer cast */ pptr = (union T_primitives *)ctlbuf.buf; switch (pptr->type) { case T_EXDATA_IND: *flags |= T_EXPEDITED; if (retval > 0) tiptr->ti_flags |= EXPEDITED; /* FALLTHROUGH */ case T_DATA_IND: /* * Uses the fact T_DATA_IND and T_EXDATA_IND * are same in size */ if ((ctlbuf.len < (int)sizeof (struct T_data_ind)) || (tiptr->ti_lookcnt > 0)) { /* * ti_lookcnt > 0 implies data * received after T_DISCON_IND or * T_ORDREL_IND hence error */ t_errno = TSYSERR; errno = EPROTO; goto err_out; } if ((pptr->data_ind.MORE_flag) || retval) *flags |= T_MORE; if ((pptr->data_ind.MORE_flag) && retval) tiptr->ti_flags |= MORE; /* * No real state change on T_RCV event (noop) * * We invoke the macro only for error logging * part of its capabilities when in a bad state. */ _T_TX_NEXTSTATE(T_RCV, tiptr, "t_rcvv: invalid state event T_RCV"); if (didalloc) free(ctlbuf.buf); else tiptr->ti_ctlbuf = ctlbuf.buf; _t_scatter(&databuf, tiov, tiovcount); if (dataptr != NULL) free(dataptr); sig_mutex_unlock(&tiptr->ti_lock); return (databuf.len); case T_ORDREL_IND: if (tiptr->ti_lookcnt > 0) { /* * ti_lookcnt > 0 implies T_ORDREL_IND * received after T_DISCON_IND or * another T_ORDREL_IND hence error. */ t_errno = TSYSERR; errno = EPROTO; goto err_out; } /* FALLTHROUGH */ case T_DISCON_IND: /* * Post event (T_ORDREL_IND/T_DISCON_IND) to * the lookbuffer list. */ if (_t_register_lookevent(tiptr, databuf.buf, databuf.len, ctlbuf.buf, ctlbuf.len) < 0) { t_errno = TSYSERR; errno = ENOMEM; goto err_out; } /* * We know that T_DISCON_IND is stored in * last look buffer. If there is more data * that follows, we try to append it to * the same look buffer */ if (retval & MOREDATA) { ctlbuf.maxlen = 0; /* XXX why ? */ ctlbuf.len = 0; /* * XXX Will break (-ve maxlen) for * transport provider with unbounded * T_DISCON_IND data part (-1). */ databuf.maxlen = tiptr->ti_rcvsize - databuf.len; databuf.len = 0; databuf.buf = tiptr->ti_lookbufs.tl_lookdbuf + tiptr->ti_lookbufs.tl_lookdlen; *flags = 0; /* * Since MOREDATA was set, we assume * that this getmsg will not block * indefinitely */ do { retval = getmsg(fd, &ctlbuf, &databuf, &flg); } while (retval < 0 && errno == EINTR); if (retval < 0) { t_errno = TSYSERR; goto err_out; } if (databuf.len == -1) databuf.len = 0; if (retval > 0) { /* MORECTL should not be on */ assert((retval & MORECTL) == 0); /* * XXX - Why ? * No support for unbounded data * on T_DISCON_IND ? */ t_errno = TSYSERR; errno = EPROTO; goto err_out; } tiptr->ti_lookbufs.tl_lookdlen += databuf.len; } t_errno = TLOOK; goto err_out; default: break; } t_errno = TSYSERR; errno = EPROTO; goto err_out; } else { /* else for "if (ctlbuf.len > 0)" */ if (!retval && (tiptr->ti_flags & MORE)) { *flags |= T_MORE; tiptr->ti_flags &= ~MORE; } if (retval & MOREDATA) *flags |= T_MORE; /* * If inside an ETSDU, set expedited flag and turn * of internal version when reach end of "ETIDU". */ if (tiptr->ti_flags & EXPEDITED) { *flags |= T_EXPEDITED; if (!retval) tiptr->ti_flags &= ~EXPEDITED; } /* * No real state change on T_RCV events (It is a NOOP) * * We invoke the macro only for error logging * part of its capabilities when in a bad state. */ _T_TX_NEXTSTATE(T_RCV, tiptr, "t_rcvv: state invalid T_RCV event"); if (didalloc) free(ctlbuf.buf); else tiptr->ti_ctlbuf = ctlbuf.buf; _t_scatter(&databuf, tiov, tiovcount); if (dataptr != NULL) free(dataptr); sig_mutex_unlock(&tiptr->ti_lock); return (databuf.len); } /* NOTREACHED */ err_out: sv_errno = errno; if (didalloc) free(ctlbuf.buf); else tiptr->ti_ctlbuf = ctlbuf.buf; if (dataptr != NULL) free(dataptr); sig_mutex_unlock(&tiptr->ti_lock); errno = sv_errno; return (-1); }
int _tx_getinfo(int fd, struct t_info *info, int api_semantics) { struct T_info_req *inforeqp; struct T_info_ack *infoackp; int retlen; struct _ti_user *tiptr; int retval, sv_errno, didalloc; struct strbuf ctlbuf; if ((tiptr = _t_checkfd(fd, 0, api_semantics)) == 0) return (-1); sig_mutex_lock(&tiptr->ti_lock); /* * Acquire buffer for use in sending/receiving the message. * Note: assumes (correctly) that ti_ctlsize is large enough * to hold sizeof (struct T_info_req/ack) */ if (_t_acquire_ctlbuf(tiptr, &ctlbuf, &didalloc) < 0) { sv_errno = errno; sig_mutex_unlock(&tiptr->ti_lock); errno = sv_errno; return (-1); } /* LINTED pointer cast */ inforeqp = (struct T_info_req *)ctlbuf.buf; inforeqp->PRIM_type = T_INFO_REQ; do { retval = _t_do_ioctl(fd, ctlbuf.buf, (int)sizeof (struct T_info_req), TI_GETINFO, &retlen); } while (retval < 0 && errno == EINTR); if (retval < 0) goto err_out; if (retlen != (int)sizeof (struct T_info_ack)) { t_errno = TSYSERR; errno = EIO; goto err_out; } /* LINTED pointer cast */ infoackp = (struct T_info_ack *)ctlbuf.buf; info->addr = infoackp->ADDR_size; info->options = infoackp->OPT_size; info->tsdu = infoackp->TSDU_size; info->etsdu = infoackp->ETSDU_size; info->connect = infoackp->CDATA_size; info->discon = infoackp->DDATA_size; info->servtype = infoackp->SERV_type; if (_T_IS_XTI(api_semantics)) { /* XTI ONLY - TLI t_info struct does not have "flags" */ info->flags = 0; if (infoackp->PROVIDER_flag & (SENDZERO|OLD_SENDZERO)) info->flags |= T_SENDZERO; } if (didalloc) free(ctlbuf.buf); else tiptr->ti_ctlbuf = ctlbuf.buf; sig_mutex_unlock(&tiptr->ti_lock); return (0); err_out: sv_errno = errno; if (didalloc) free(ctlbuf.buf); else tiptr->ti_ctlbuf = ctlbuf.buf; sig_mutex_unlock(&tiptr->ti_lock); errno = sv_errno; return (-1); }
int _tx_listen(int fd, struct t_call *call, int api_semantics) { struct strbuf ctlbuf; struct strbuf databuf; int retval; union T_primitives *pptr; struct _ti_user *tiptr; int sv_errno; int didalloc, didralloc; int flg = 0; if ((tiptr = _t_checkfd(fd, 0, api_semantics)) == NULL) return (-1); sig_mutex_lock(&tiptr->ti_lock); if (tiptr->ti_servtype == T_CLTS) { sv_errno = errno; t_errno = TNOTSUPPORT; sig_mutex_unlock(&tiptr->ti_lock); errno = sv_errno; return (-1); } if (_T_IS_XTI(api_semantics)) { /* * User level state verification only done for XTI * because doing for TLI may break existing applications */ if (!(tiptr->ti_state == T_IDLE || tiptr->ti_state == T_INCON)) { t_errno = TOUTSTATE; sig_mutex_unlock(&tiptr->ti_lock); return (-1); } if (tiptr->ti_qlen == 0) { t_errno = TBADQLEN; sig_mutex_unlock(&tiptr->ti_lock); return (-1); } if (tiptr->ti_ocnt == tiptr->ti_qlen) { if (!(tiptr->ti_flags & TX_TQFULL_NOTIFIED)) { tiptr->ti_flags |= TX_TQFULL_NOTIFIED; t_errno = TQFULL; sig_mutex_unlock(&tiptr->ti_lock); return (-1); } } } /* * check if something in look buffer */ if (tiptr->ti_lookcnt > 0) { t_errno = TLOOK; sig_mutex_unlock(&tiptr->ti_lock); return (-1); } /* * Acquire ctlbuf for use in sending/receiving control part * of the message. */ if (_t_acquire_ctlbuf(tiptr, &ctlbuf, &didalloc) < 0) { sv_errno = errno; sig_mutex_unlock(&tiptr->ti_lock); errno = sv_errno; return (-1); } /* * Acquire databuf for use in sending/receiving data part */ if (_t_acquire_databuf(tiptr, &databuf, &didralloc) < 0) { int sv_errno = errno; if (didalloc) free(ctlbuf.buf); else tiptr->ti_ctlbuf = ctlbuf.buf; sig_mutex_unlock(&tiptr->ti_lock); errno = sv_errno; return (-1); } /* * This is a call that may block indefinitely so we drop the * lock and allow signals in MT case here and reacquire it. * Error case should roll back state changes done above * (happens to be no state change here) */ sig_mutex_unlock(&tiptr->ti_lock); if ((retval = getmsg(fd, &ctlbuf, &databuf, &flg)) < 0) { if (errno == EAGAIN) t_errno = TNODATA; else t_errno = TSYSERR; sv_errno = errno; sig_mutex_lock(&tiptr->ti_lock); errno = sv_errno; goto err_out; } sig_mutex_lock(&tiptr->ti_lock); if (databuf.len == -1) databuf.len = 0; /* * did I get entire message? */ if (retval > 0) { t_errno = TSYSERR; errno = EIO; goto err_out; } /* * is ctl part large enough to determine type */ if (ctlbuf.len < (int)sizeof (t_scalar_t)) { t_errno = TSYSERR; errno = EPROTO; goto err_out; } /* LINTED pointer cast */ pptr = (union T_primitives *)ctlbuf.buf; switch (pptr->type) { case T_CONN_IND: if ((ctlbuf.len < (int)sizeof (struct T_conn_ind)) || (ctlbuf.len < (int)(pptr->conn_ind.OPT_length + pptr->conn_ind.OPT_offset))) { t_errno = TSYSERR; errno = EPROTO; goto err_out; } /* * Change state and increment outstanding connection * indication count and instantiate "sequence" return * parameter. * Note: It is correct semantics accoring to spec to * do this despite possibility of TBUFOVFLW error later. * The spec treats TBUFOVFLW error in general as a special case * which can be ignored by applications that do not * really need the stuff returned in 'netbuf' structures. */ _T_TX_NEXTSTATE(T_LISTN, tiptr, "t_listen:invalid state event T_LISTN"); tiptr->ti_ocnt++; call->sequence = pptr->conn_ind.SEQ_number; if (_T_IS_TLI(api_semantics) || call->addr.maxlen > 0) { if (TLEN_GT_NLEN(pptr->conn_ind.SRC_length, call->addr.maxlen)) { t_errno = TBUFOVFLW; goto err_out; } (void) memcpy(call->addr.buf, ctlbuf.buf + (size_t)pptr->conn_ind.SRC_offset, (unsigned int)pptr->conn_ind.SRC_length); call->addr.len = pptr->conn_ind.SRC_length; } if (_T_IS_TLI(api_semantics) || call->opt.maxlen > 0) { if (TLEN_GT_NLEN(pptr->conn_ind.OPT_length, call->opt.maxlen)) { t_errno = TBUFOVFLW; goto err_out; } (void) memcpy(call->opt.buf, ctlbuf.buf + pptr->conn_ind.OPT_offset, (size_t)pptr->conn_ind.OPT_length); call->opt.len = pptr->conn_ind.OPT_length; } if (_T_IS_TLI(api_semantics) || call->udata.maxlen > 0) { if (databuf.len > (int)call->udata.maxlen) { t_errno = TBUFOVFLW; goto err_out; } (void) memcpy(call->udata.buf, databuf.buf, (size_t)databuf.len); call->udata.len = databuf.len; } if (didalloc) free(ctlbuf.buf); else tiptr->ti_ctlbuf = ctlbuf.buf; if (didralloc) free(databuf.buf); else tiptr->ti_rcvbuf = databuf.buf; sig_mutex_unlock(&tiptr->ti_lock); return (0); case T_DISCON_IND: /* * Append to the events in the "look buffer" * list of events. This routine may defer signals. */ if (_t_register_lookevent(tiptr, databuf.buf, databuf.len, ctlbuf.buf, ctlbuf.len) < 0) { t_errno = TSYSERR; errno = ENOMEM; goto err_out; } t_errno = TLOOK; goto err_out; default: break; } t_errno = TSYSERR; errno = EPROTO; err_out: sv_errno = errno; if (didalloc) free(ctlbuf.buf); else tiptr->ti_ctlbuf = ctlbuf.buf; if (didralloc) free(databuf.buf); else tiptr->ti_rcvbuf = databuf.buf; sig_mutex_unlock(&tiptr->ti_lock); errno = sv_errno; return (-1); }