/* * The sender must later set the total length in the IP header. */ mblk_t * sctp_make_mp(sctp_t *sctp, sctp_faddr_t *fp, int trailer) { mblk_t *mp; size_t ipsctplen; int isv4; sctp_stack_t *sctps = sctp->sctp_sctps; boolean_t src_changed = B_FALSE; ASSERT(fp != NULL); isv4 = fp->isv4; if (SCTP_IS_ADDR_UNSPEC(isv4, fp->saddr) || (fp->ixa->ixa_ire->ire_flags & (RTF_REJECT|RTF_BLACKHOLE))) { /* Need to pick a source */ sctp_get_dest(sctp, fp); /* * Although we still may not get an IRE, the source address * may be changed in sctp_get_ire(). Set src_changed to * true so that the source address is copied again. */ src_changed = B_TRUE; } /* There is no suitable source address to use, return. */ if (fp->state == SCTP_FADDRS_UNREACH) return (NULL); ASSERT(fp->ixa->ixa_ire != NULL); ASSERT(!SCTP_IS_ADDR_UNSPEC(isv4, fp->saddr)); if (isv4) { ipsctplen = sctp->sctp_hdr_len; } else { ipsctplen = sctp->sctp_hdr6_len; } mp = allocb(ipsctplen + sctps->sctps_wroff_xtra + trailer, BPRI_MED); if (mp == NULL) { ip1dbg(("sctp_make_mp: error making mp..\n")); return (NULL); } mp->b_rptr += sctps->sctps_wroff_xtra; mp->b_wptr = mp->b_rptr + ipsctplen; ASSERT(OK_32PTR(mp->b_wptr)); if (isv4) { ipha_t *iph = (ipha_t *)mp->b_rptr; bcopy(sctp->sctp_iphc, mp->b_rptr, ipsctplen); if (fp != sctp->sctp_current || src_changed) { /* Fix the source and destination addresses. */ IN6_V4MAPPED_TO_IPADDR(&fp->faddr, iph->ipha_dst); IN6_V4MAPPED_TO_IPADDR(&fp->saddr, iph->ipha_src); } /* set or clear the don't fragment bit */ if (fp->df) { iph->ipha_fragment_offset_and_flags = htons(IPH_DF); } else { iph->ipha_fragment_offset_and_flags = 0; } } else { bcopy(sctp->sctp_iphc6, mp->b_rptr, ipsctplen); if (fp != sctp->sctp_current || src_changed) { /* Fix the source and destination addresses. */ ((ip6_t *)(mp->b_rptr))->ip6_dst = fp->faddr; ((ip6_t *)(mp->b_rptr))->ip6_src = fp->saddr; } } ASSERT(sctp->sctp_connp != NULL); return (mp); }
/* ARGSUSED */ char * inet_ntop(int af, const void *addr, char *buf, int addrlen) { static char local_buf[INET6_ADDRSTRLEN]; static char *err_buf1 = "<badaddr>"; static char *err_buf2 = "<badfamily>"; in6_addr_t *v6addr; uchar_t *v4addr; char *caddr; /* * We don't allow thread unsafe inet_ntop calls, they * must pass a non-null buffer pointer. For DEBUG mode * we use the ASSERT() and for non-debug kernel it will * silently allow it for now. Someday we should remove * the static buffer from this function. */ ASSERT(buf != NULL); if (buf == NULL) buf = local_buf; buf[0] = '\0'; /* Let user know politely not to send NULL or unaligned addr */ if (addr == NULL || !(OK_32PTR(addr))) { #ifdef DEBUG cmn_err(CE_WARN, "inet_ntop: addr is <null> or unaligned"); #endif return (err_buf1); } #define UC(b) (((int)b) & 0xff) switch (af) { case AF_INET: ASSERT(addrlen >= INET_ADDRSTRLEN); v4addr = (uchar_t *)addr; (void) sprintf(buf, "%03d.%03d.%03d.%03d", UC(v4addr[0]), UC(v4addr[1]), UC(v4addr[2]), UC(v4addr[3])); return (buf); case AF_INET6: ASSERT(addrlen >= INET6_ADDRSTRLEN); v6addr = (in6_addr_t *)addr; if (IN6_IS_ADDR_V4MAPPED(v6addr)) { caddr = (char *)addr; (void) sprintf(buf, "::ffff:%d.%d.%d.%d", UC(caddr[12]), UC(caddr[13]), UC(caddr[14]), UC(caddr[15])); } else if (IN6_IS_ADDR_V4COMPAT(v6addr)) { caddr = (char *)addr; (void) sprintf(buf, "::%d.%d.%d.%d", UC(caddr[12]), UC(caddr[13]), UC(caddr[14]), UC(caddr[15])); } else if (IN6_IS_ADDR_UNSPECIFIED(v6addr)) { (void) sprintf(buf, "::"); } else { convert2ascii(buf, v6addr); } return (buf); default: return (err_buf2); } #undef UC }
/* ARGSUSED */ void ip_fanout_sctp(mblk_t *mp, ill_t *recv_ill, ipha_t *ipha, uint32_t ports, uint_t flags, boolean_t mctl_present, boolean_t ip_policy, uint_t ipif_seqid, zoneid_t zoneid) { sctp_t *sctp; boolean_t isv4; conn_t *connp; mblk_t *first_mp; ip6_t *ip6h; in6_addr_t map_src, map_dst; in6_addr_t *src, *dst; first_mp = mp; if (mctl_present) { mp = first_mp->b_cont; ASSERT(mp != NULL); } /* Assume IP provides aligned packets - otherwise toss */ if (!OK_32PTR(mp->b_rptr)) { BUMP_MIB(&ip_mib, ipInDiscards); freemsg(first_mp); return; } if (IPH_HDR_VERSION(ipha) == IPV6_VERSION) { ip6h = (ip6_t *)ipha; src = &ip6h->ip6_src; dst = &ip6h->ip6_dst; isv4 = B_FALSE; } else { ip6h = NULL; IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &map_src); IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &map_dst); src = &map_src; dst = &map_dst; isv4 = B_TRUE; } if ((connp = sctp_fanout(src, dst, ports, ipif_seqid, zoneid, mp)) == NULL) { ip_fanout_sctp_raw(first_mp, recv_ill, ipha, isv4, ports, mctl_present, flags, ip_policy, ipif_seqid, zoneid); return; } sctp = CONN2SCTP(connp); /* Found a client; up it goes */ BUMP_MIB(&ip_mib, ipInDelivers); /* * We check some fields in conn_t without holding a lock. * This should be fine. */ if (CONN_INBOUND_POLICY_PRESENT(connp) || mctl_present) { first_mp = ipsec_check_inbound_policy(first_mp, connp, ipha, NULL, mctl_present); if (first_mp == NULL) { SCTP_REFRELE(sctp); return; } } /* Initiate IPPF processing for fastpath */ if (IPP_ENABLED(IPP_LOCAL_IN)) { ip_process(IPP_LOCAL_IN, &mp, recv_ill->ill_phyint->phyint_ifindex); if (mp == NULL) { SCTP_REFRELE(sctp); if (mctl_present) freeb(first_mp); return; } else if (mctl_present) { /* * ip_process might return a new mp. */ ASSERT(first_mp != mp); first_mp->b_cont = mp; } else { first_mp = mp; } } if (connp->conn_recvif || connp->conn_recvslla || connp->conn_ipv6_recvpktinfo) { int in_flags = 0; if (connp->conn_recvif || connp->conn_ipv6_recvpktinfo) { in_flags = IPF_RECVIF; } if (connp->conn_recvslla) { in_flags |= IPF_RECVSLLA; } if (isv4) { mp = ip_add_info(mp, recv_ill, in_flags); } else { mp = ip_add_info_v6(mp, recv_ill, &ip6h->ip6_dst); } if (mp == NULL) { SCTP_REFRELE(sctp); if (mctl_present) freeb(first_mp); return; } else if (mctl_present) { /* * ip_add_info might return a new mp. */ ASSERT(first_mp != mp); first_mp->b_cont = mp; } else { first_mp = mp; } } mutex_enter(&sctp->sctp_lock); if (sctp->sctp_running) { if (mctl_present) mp->b_prev = first_mp; if (!sctp_add_recvq(sctp, mp, B_FALSE)) { BUMP_MIB(&ip_mib, ipInDiscards); freemsg(first_mp); } mutex_exit(&sctp->sctp_lock); } else { sctp->sctp_running = B_TRUE; mutex_exit(&sctp->sctp_lock); mutex_enter(&sctp->sctp_recvq_lock); if (sctp->sctp_recvq != NULL) { if (mctl_present) mp->b_prev = first_mp; if (!sctp_add_recvq(sctp, mp, B_TRUE)) { BUMP_MIB(&ip_mib, ipInDiscards); freemsg(first_mp); } mutex_exit(&sctp->sctp_recvq_lock); WAKE_SCTP(sctp); } else { mutex_exit(&sctp->sctp_recvq_lock); sctp_input_data(sctp, mp, (mctl_present ? first_mp : NULL)); WAKE_SCTP(sctp); sctp_process_sendq(sctp); } } SCTP_REFRELE(sctp); }
/* * Common accept code. Called by sctp_conn_request. * cr_pkt is the INIT / INIT ACK packet. */ static int sctp_accept_comm(sctp_t *listener, sctp_t *acceptor, mblk_t *cr_pkt, uint_t ip_hdr_len, sctp_init_chunk_t *iack) { sctp_hdr_t *sctph; sctp_chunk_hdr_t *ich; sctp_init_chunk_t *init; int err; uint_t sctp_options; conn_t *aconnp; conn_t *lconnp; sctp_stack_t *sctps = listener->sctp_sctps; sctph = (sctp_hdr_t *)(cr_pkt->b_rptr + ip_hdr_len); ASSERT(OK_32PTR(sctph)); aconnp = acceptor->sctp_connp; lconnp = listener->sctp_connp; aconnp->conn_lport = lconnp->conn_lport; aconnp->conn_fport = sctph->sh_sport; ich = (sctp_chunk_hdr_t *)(iack + 1); init = (sctp_init_chunk_t *)(ich + 1); /* acceptor isn't in any fanouts yet, so don't need to hold locks */ ASSERT(acceptor->sctp_faddrs == NULL); err = sctp_get_addrparams(acceptor, listener, cr_pkt, ich, &sctp_options); if (err != 0) return (err); if ((err = sctp_set_hdraddrs(acceptor)) != 0) return (err); if ((err = sctp_build_hdrs(acceptor, KM_NOSLEEP)) != 0) return (err); if ((sctp_options & SCTP_PRSCTP_OPTION) && listener->sctp_prsctp_aware && sctps->sctps_prsctp_enabled) { acceptor->sctp_prsctp_aware = B_TRUE; } else { acceptor->sctp_prsctp_aware = B_FALSE; } /* Get initial TSNs */ acceptor->sctp_ltsn = ntohl(iack->sic_inittsn); acceptor->sctp_recovery_tsn = acceptor->sctp_lastack_rxd = acceptor->sctp_ltsn - 1; acceptor->sctp_adv_pap = acceptor->sctp_lastack_rxd; /* Serial numbers are initialized to the same value as the TSNs */ acceptor->sctp_lcsn = acceptor->sctp_ltsn; if (!sctp_initialize_params(acceptor, init, iack)) return (ENOMEM); /* * Copy sctp_secret from the listener in case we need to validate * a possibly delayed cookie. */ bcopy(listener->sctp_secret, acceptor->sctp_secret, SCTP_SECRET_LEN); bcopy(listener->sctp_old_secret, acceptor->sctp_old_secret, SCTP_SECRET_LEN); acceptor->sctp_last_secret_update = ddi_get_lbolt64(); /* * After acceptor is inserted in the hash list, it can be found. * So we need to lock it here. */ RUN_SCTP(acceptor); sctp_conn_hash_insert(&sctps->sctps_conn_fanout[ SCTP_CONN_HASH(sctps, aconnp->conn_ports)], acceptor, 0); sctp_bind_hash_insert(&sctps->sctps_bind_fanout[ SCTP_BIND_HASH(ntohs(aconnp->conn_lport))], acceptor, 0); /* * No need to check for multicast destination since ip will only pass * up multicasts to those that have expressed interest * TODO: what about rejecting broadcasts? * Also check that source is not a multicast or broadcast address. */ /* XXXSCTP */ acceptor->sctp_state = SCTPS_ESTABLISHED; acceptor->sctp_assoc_start_time = (uint32_t)ddi_get_lbolt(); /* * listener->sctp_rwnd should be the default window size or a * window size changed via SO_RCVBUF option. */ acceptor->sctp_rwnd = listener->sctp_rwnd; acceptor->sctp_irwnd = acceptor->sctp_rwnd; acceptor->sctp_pd_point = acceptor->sctp_rwnd; acceptor->sctp_upcalls = listener->sctp_upcalls; return (0); }
/* Process the COOKIE packet, mp, directed at the listener 'sctp' */ sctp_t * sctp_conn_request(sctp_t *sctp, mblk_t *mp, uint_t ifindex, uint_t ip_hdr_len, sctp_init_chunk_t *iack, ip_recv_attr_t *ira) { sctp_t *eager; ip6_t *ip6h; int err; conn_t *connp, *econnp; sctp_stack_t *sctps; struct sock_proto_props sopp; cred_t *cr; pid_t cpid; in6_addr_t faddr, laddr; ip_xmit_attr_t *ixa; /* * No need to check for duplicate as this is the listener * and we are holding the lock. This means that no new * connection can be created out of it. And since the * fanout already done cannot find a match, it means that * there is no duplicate. */ ASSERT(OK_32PTR(mp->b_rptr)); if ((eager = sctp_create_eager(sctp)) == NULL) { return (NULL); } connp = sctp->sctp_connp; sctps = sctp->sctp_sctps; econnp = eager->sctp_connp; if (connp->conn_policy != NULL) { /* Inherit the policy from the listener; use actions from ira */ if (!ip_ipsec_policy_inherit(econnp, connp, ira)) { sctp_close_eager(eager); BUMP_MIB(&sctps->sctps_mib, sctpListenDrop); return (NULL); } } ip6h = (ip6_t *)mp->b_rptr; if (ira->ira_flags & IXAF_IS_IPV4) { ipha_t *ipha; ipha = (ipha_t *)ip6h; IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &laddr); IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &faddr); } else { laddr = ip6h->ip6_dst; faddr = ip6h->ip6_src; } if (ira->ira_flags & IRAF_IPSEC_SECURE) { /* * XXX need to fix the cached policy issue here. * We temporarily set the conn_laddr/conn_faddr here so * that IPsec can use it for the latched policy * selector. This is obvioursly wrong as SCTP can * use different addresses... */ econnp->conn_laddr_v6 = laddr; econnp->conn_faddr_v6 = faddr; econnp->conn_saddr_v6 = laddr; } if (ipsec_conn_cache_policy(econnp, (ira->ira_flags & IRAF_IS_IPV4) != 0) != 0) { sctp_close_eager(eager); BUMP_MIB(&sctps->sctps_mib, sctpListenDrop); return (NULL); } /* Save for getpeerucred */ cr = ira->ira_cred; cpid = ira->ira_cpid; if (is_system_labeled()) { ip_xmit_attr_t *ixa = econnp->conn_ixa; ASSERT(ira->ira_tsl != NULL); /* Discard any old label */ if (ixa->ixa_free_flags & IXA_FREE_TSL) { ASSERT(ixa->ixa_tsl != NULL); label_rele(ixa->ixa_tsl); ixa->ixa_free_flags &= ~IXA_FREE_TSL; ixa->ixa_tsl = NULL; } if ((connp->conn_mlp_type != mlptSingle || connp->conn_mac_mode != CONN_MAC_DEFAULT) && ira->ira_tsl != NULL) { /* * If this is an MLP connection or a MAC-Exempt * connection with an unlabeled node, packets are to be * exchanged using the security label of the received * Cookie packet instead of the server application's * label. * tsol_check_dest called from ip_set_destination * might later update TSF_UNLABELED by replacing * ixa_tsl with a new label. */ label_hold(ira->ira_tsl); ip_xmit_attr_replace_tsl(ixa, ira->ira_tsl); } else { ixa->ixa_tsl = crgetlabel(econnp->conn_cred); } } err = sctp_accept_comm(sctp, eager, mp, ip_hdr_len, iack); if (err != 0) { sctp_close_eager(eager); BUMP_MIB(&sctps->sctps_mib, sctpListenDrop); return (NULL); } ASSERT(eager->sctp_current->ixa != NULL); ixa = eager->sctp_current->ixa; if (!(ira->ira_flags & IXAF_IS_IPV4)) { ASSERT(!(ixa->ixa_flags & IXAF_IS_IPV4)); if (IN6_IS_ADDR_LINKLOCAL(&ip6h->ip6_src) || IN6_IS_ADDR_LINKLOCAL(&ip6h->ip6_dst)) { eager->sctp_linklocal = 1; ixa->ixa_flags |= IXAF_SCOPEID_SET; ixa->ixa_scopeid = ifindex; econnp->conn_incoming_ifindex = ifindex; } } /* * On a clustered note send this notification to the clustering * subsystem. */ if (cl_sctp_connect != NULL) { uchar_t *slist; uchar_t *flist; size_t fsize; size_t ssize; fsize = sizeof (in6_addr_t) * eager->sctp_nfaddrs; ssize = sizeof (in6_addr_t) * eager->sctp_nsaddrs; slist = kmem_alloc(ssize, KM_NOSLEEP); flist = kmem_alloc(fsize, KM_NOSLEEP); if (slist == NULL || flist == NULL) { if (slist != NULL) kmem_free(slist, ssize); if (flist != NULL) kmem_free(flist, fsize); sctp_close_eager(eager); BUMP_MIB(&sctps->sctps_mib, sctpListenDrop); SCTP_KSTAT(sctps, sctp_cl_connect); return (NULL); } /* The clustering module frees these list */ sctp_get_saddr_list(eager, slist, ssize); sctp_get_faddr_list(eager, flist, fsize); (*cl_sctp_connect)(econnp->conn_family, slist, eager->sctp_nsaddrs, econnp->conn_lport, flist, eager->sctp_nfaddrs, econnp->conn_fport, B_FALSE, (cl_sctp_handle_t)eager); } /* Connection established, so send up the conn_ind */ if ((eager->sctp_ulpd = sctp->sctp_ulp_newconn(sctp->sctp_ulpd, (sock_lower_handle_t)eager, NULL, cr, cpid, &eager->sctp_upcalls)) == NULL) { sctp_close_eager(eager); BUMP_MIB(&sctps->sctps_mib, sctpListenDrop); return (NULL); } ASSERT(SCTP_IS_DETACHED(eager)); eager->sctp_detached = B_FALSE; bzero(&sopp, sizeof (sopp)); sopp.sopp_flags = SOCKOPT_MAXBLK|SOCKOPT_WROFF; sopp.sopp_maxblk = strmsgsz; if (econnp->conn_family == AF_INET) { sopp.sopp_wroff = sctps->sctps_wroff_xtra + sizeof (sctp_data_hdr_t) + sctp->sctp_hdr_len; } else { sopp.sopp_wroff = sctps->sctps_wroff_xtra + sizeof (sctp_data_hdr_t) + sctp->sctp_hdr6_len; } eager->sctp_ulp_prop(eager->sctp_ulpd, &sopp); return (eager); }
static int rds_wput_data(queue_t *q, mblk_t *mp, uio_t *uiop) { uchar_t *rptr = mp->b_rptr; rds_t *rds; mblk_t *mp1; sin_t *sin; ipaddr_t dst; uint16_t port; int ret = 0; #define tudr ((struct T_unitdata_req *)(uintptr_t)rptr) rds = (rds_t *)q->q_ptr; /* Handle UNITDATA_REQ messages here */ if (rds->rds_state == TS_UNBND) { /* If a port has not been bound to the stream, fail. */ dprint(2, ("%s: socket is not bound to a port", LABEL)); freemsg(mp); return (EPROTO); } mp1 = mp->b_cont; mp->b_cont = NULL; if (mp1 == NULL) { dprint(2, ("%s: No message to send", LABEL)); freemsg(mp); return (EPROTO); } /* * No options allowed */ if (tudr->OPT_length != 0) { ret = EINVAL; goto done; } ASSERT(mp1->b_datap->db_ref == 1); if ((rptr + tudr->DEST_offset + tudr->DEST_length) > mp->b_wptr) { ret = EDESTADDRREQ; goto done; } sin = (sin_t *)(uintptr_t)&rptr[tudr->DEST_offset]; if (!OK_32PTR((char *)sin) || tudr->DEST_length != sizeof (sin_t) || sin->sin_family != AF_INET_OFFLOAD) { ret = EDESTADDRREQ; goto done; } /* Extract port and ipaddr */ port = sin->sin_port; dst = sin->sin_addr.s_addr; if (port == 0 || dst == INADDR_ANY) { ret = EDESTADDRREQ; goto done; } ASSERT(rds_transport_ops != NULL); ret = rds_transport_ops->rds_transport_sendmsg(uiop, rds->rds_src, dst, ntohs(rds->rds_port), ntohs(port), rds->rds_zoneid); if (ret != 0) { if ((ret != ENOBUFS) && (ret != ENOMEM)) { /* ENOMEM is actually EWOULDBLOCK */ dprint(2, ("%s: rds_sendmsg returned %d", LABEL, ret)); goto done; } } done: freemsg(mp1); freemsg(mp); return (ret); }
/* * NO locking protection here as sockfs will only send down * one bind operation at a time. */ static void rds_bind(queue_t *q, mblk_t *mp) { sin_t *sin; rds_t *rds; struct T_bind_req *tbr; in_port_t port; /* Host byte order */ in_port_t requested_port; /* Host byte order */ struct T_bind_ack *tba; int count; rds_bf_t *rdsbf; in_port_t lport; /* Network byte order */ rds = (rds_t *)q->q_ptr; if (((uintptr_t)mp->b_wptr - (uintptr_t)mp->b_rptr) < sizeof (*tbr)) { rds_err_ack(q, mp, TPROTO, 0); return; } /* * We don't allow multiple binds */ if (rds->rds_state != TS_UNBND) { rds_err_ack(q, mp, TOUTSTATE, 0); return; } tbr = (struct T_bind_req *)(uintptr_t)mp->b_rptr; switch (tbr->ADDR_length) { case sizeof (sin_t): /* Complete IPv4 address */ sin = (sin_t *)(uintptr_t)mi_offset_param(mp, tbr->ADDR_offset, sizeof (sin_t)); if (sin == NULL || !OK_32PTR((char *)sin)) { rds_err_ack(q, mp, TSYSERR, EINVAL); return; } if (rds->rds_family != AF_INET_OFFLOAD || sin->sin_family != AF_INET_OFFLOAD) { rds_err_ack(q, mp, TSYSERR, EAFNOSUPPORT); return; } if (sin->sin_addr.s_addr == INADDR_ANY) { rds_err_ack(q, mp, TBADADDR, 0); return; } /* * verify that the address is hosted on IB * only exception is the loopback address. */ if ((sin->sin_addr.s_addr != INADDR_LOOPBACK) && !rds_verify_bind_address(sin->sin_addr.s_addr)) { rds_err_ack(q, mp, TBADADDR, 0); return; } port = ntohs(sin->sin_port); break; default: /* Invalid request */ rds_err_ack(q, mp, TBADADDR, 0); return; } requested_port = port; /* * TPI only sends down T_BIND_REQ for AF_INET and AF_INET6 * since RDS socket is of type AF_INET_OFFLOAD a O_T_BIND_REQ * will be sent down. Treat O_T_BIND_REQ as T_BIND_REQ */ if (requested_port == 0) { /* * If the application passed in zero for the port number, it * doesn't care which port number we bind to. Get one in the * valid range. */ port = rds_update_next_port(rds_next_port_to_try); } ASSERT(port != 0); count = 0; for (;;) { rds_t *rds1; ASSERT(sin->sin_addr.s_addr != INADDR_ANY); /* * Walk through the list of rds streams bound to * requested port with the same IP address. */ lport = htons(port); rdsbf = &rds_bind_fanout[RDS_BIND_HASH(lport)]; mutex_enter(&rdsbf->rds_bf_lock); for (rds1 = rdsbf->rds_bf_rds; rds1 != NULL; rds1 = rds1->rds_bind_hash) { if (lport != rds1->rds_port || rds1->rds_src != sin->sin_addr.s_addr || rds1->rds_zoneid != rds->rds_zoneid) continue; break; } if (rds1 == NULL) { /* * No other stream has this IP address * and port number. We can use it. */ break; } mutex_exit(&rdsbf->rds_bf_lock); if (requested_port != 0) { /* * We get here only when requested port * is bound (and only first of the for() * loop iteration). * * The semantics of this bind request * require it to fail so we return from * the routine (and exit the loop). * */ rds_err_ack(q, mp, TADDRBUSY, 0); return; } port = rds_update_next_port(port + 1); if (++count >= loopmax) { /* * We've tried every possible port number and * there are none available, so send an error * to the user. */ rds_err_ack(q, mp, TNOADDR, 0); return; } } /* * Copy the source address into our rds structure. */ rds->rds_src = sin->sin_addr.s_addr; rds->rds_port = lport; /* * reset the next port if we choose the port */ if (requested_port == 0) { rds_next_port_to_try = port + 1; } rds->rds_state = TS_IDLE; rds_bind_hash_insert(rdsbf, rds); mutex_exit(&rdsbf->rds_bf_lock); /* Reset the message type in preparation for shipping it back. */ mp->b_datap->db_type = M_PCPROTO; tba = (struct T_bind_ack *)(uintptr_t)mp->b_rptr; tba->PRIM_type = T_BIND_ACK; /* Increment the number of ports and set the port quota */ RDS_INCR_NPORT(); rds->rds_port_quota = RDS_CURRENT_PORT_QUOTA(); RDS_SET_PORT_QUOTA(rds->rds_port_quota); (void) proto_set_rx_hiwat(RD(q), NULL, rds->rds_port_quota * UserBufferSize); qreply(q, mp); }
/* * If iserror == 0, sends an abort. If iserror != 0, sends an error. */ void sctp_send_abort(sctp_t *sctp, uint32_t vtag, uint16_t serror, char *details, size_t len, mblk_t *inmp, int iserror, boolean_t tbit, ip_recv_attr_t *ira) { mblk_t *hmp; uint32_t ip_hdr_len; ipha_t *iniph; ipha_t *ahiph = NULL; ip6_t *inip6h; ip6_t *ahip6h = NULL; sctp_hdr_t *sh; sctp_hdr_t *insh; size_t ahlen; uchar_t *p; ssize_t alen; int isv4; conn_t *connp = sctp->sctp_connp; sctp_stack_t *sctps = sctp->sctp_sctps; ip_xmit_attr_t *ixa; isv4 = (IPH_HDR_VERSION(inmp->b_rptr) == IPV4_VERSION); if (isv4) { ahlen = sctp->sctp_hdr_len; } else { ahlen = sctp->sctp_hdr6_len; } /* * If this is a labeled system, then check to see if we're allowed to * send a response to this particular sender. If not, then just drop. */ if (is_system_labeled() && !tsol_can_reply_error(inmp, ira)) return; hmp = allocb(sctps->sctps_wroff_xtra + ahlen, BPRI_MED); if (hmp == NULL) { /* XXX no resources */ return; } /* copy in the IP / SCTP header */ p = hmp->b_rptr + sctps->sctps_wroff_xtra; hmp->b_rptr = p; hmp->b_wptr = p + ahlen; if (isv4) { bcopy(sctp->sctp_iphc, p, sctp->sctp_hdr_len); /* * Composite is likely incomplete at this point, so pull * info from the incoming IP / SCTP headers. */ ahiph = (ipha_t *)p; iniph = (ipha_t *)inmp->b_rptr; ip_hdr_len = IPH_HDR_LENGTH(inmp->b_rptr); sh = (sctp_hdr_t *)(p + sctp->sctp_ip_hdr_len); ASSERT(OK_32PTR(sh)); insh = (sctp_hdr_t *)((uchar_t *)iniph + ip_hdr_len); ASSERT(OK_32PTR(insh)); /* Copy in the peer's IP addr */ ahiph->ipha_dst = iniph->ipha_src; ahiph->ipha_src = iniph->ipha_dst; } else { bcopy(sctp->sctp_iphc6, p, sctp->sctp_hdr6_len); ahip6h = (ip6_t *)p; inip6h = (ip6_t *)inmp->b_rptr; ip_hdr_len = ip_hdr_length_v6(inmp, inip6h); sh = (sctp_hdr_t *)(p + sctp->sctp_ip_hdr6_len); ASSERT(OK_32PTR(sh)); insh = (sctp_hdr_t *)((uchar_t *)inip6h + ip_hdr_len); ASSERT(OK_32PTR(insh)); /* Copy in the peer's IP addr */ ahip6h->ip6_dst = inip6h->ip6_src; ahip6h->ip6_src = inip6h->ip6_dst; } /* Fill in the holes in the SCTP common header */ sh->sh_sport = insh->sh_dport; sh->sh_dport = insh->sh_sport; sh->sh_verf = vtag; /* Link in the abort chunk */ if ((alen = sctp_link_abort(hmp, serror, details, len, iserror, tbit)) < 0) { freemsg(hmp); return; } /* * Base the transmission on any routing-related socket options * that have been set on the listener/connection. */ ixa = conn_get_ixa_exclusive(connp); if (ixa == NULL) { freemsg(hmp); return; } ixa->ixa_flags &= ~IXAF_VERIFY_PMTU; ixa->ixa_pktlen = ahlen + alen; if (isv4) { ixa->ixa_flags |= IXAF_IS_IPV4; ahiph->ipha_length = htons(ixa->ixa_pktlen); ixa->ixa_ip_hdr_length = sctp->sctp_ip_hdr_len; } else { ixa->ixa_flags &= ~IXAF_IS_IPV4; ahip6h->ip6_plen = htons(ixa->ixa_pktlen - IPV6_HDR_LEN); ixa->ixa_ip_hdr_length = sctp->sctp_ip_hdr6_len; } SCTPS_BUMP_MIB(sctps, sctpAborted); BUMP_LOCAL(sctp->sctp_obchunks); if (is_system_labeled() && ixa->ixa_tsl != NULL) { ASSERT(ira->ira_tsl != NULL); ixa->ixa_tsl = ira->ira_tsl; /* A multi-level responder */ } if (ira->ira_flags & IRAF_IPSEC_SECURE) { /* * Apply IPsec based on how IPsec was applied to * the packet that caused the abort. */ if (!ipsec_in_to_out(ira, ixa, hmp, ahiph, ahip6h)) { ip_stack_t *ipst = sctps->sctps_netstack->netstack_ip; BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsOutDiscards); /* Note: mp already consumed and ip_drop_packet done */ ixa_refrele(ixa); return; } } else { ixa->ixa_flags |= IXAF_NO_IPSEC; } BUMP_LOCAL(sctp->sctp_opkts); BUMP_LOCAL(sctp->sctp_obchunks); (void) ip_output_simple(hmp, ixa); ixa_refrele(ixa); }