void sctp_unlink_faddr(sctp_t *sctp, sctp_faddr_t *fp) { sctp_faddr_t *fpp; if (!sctp->sctp_faddrs) { return; } if (fp->timer_mp != NULL) { sctp_timer_free(fp->timer_mp); fp->timer_mp = NULL; fp->timer_running = 0; } if (fp->rc_timer_mp != NULL) { sctp_timer_free(fp->rc_timer_mp); fp->rc_timer_mp = NULL; fp->rc_timer_running = 0; } if (fp->ixa != NULL) { ixa_refrele(fp->ixa); fp->ixa = NULL; } if (fp == sctp->sctp_faddrs) { goto gotit; } for (fpp = sctp->sctp_faddrs; fpp->next != fp; fpp = fpp->next) ; gotit: ASSERT(sctp->sctp_conn_tfp != NULL); mutex_enter(&sctp->sctp_conn_tfp->tf_lock); if (fp == sctp->sctp_faddrs) { sctp->sctp_faddrs = fp->next; } else { fpp->next = fp->next; } mutex_exit(&sctp->sctp_conn_tfp->tf_lock); kmem_cache_free(sctp_kmem_faddr_cache, fp); sctp->sctp_nfaddrs--; }
void sctp_zap_faddrs(sctp_t *sctp, int caller_holds_lock) { sctp_faddr_t *fp, *fpn; if (sctp->sctp_faddrs == NULL) { ASSERT(sctp->sctp_lastfaddr == NULL); return; } ASSERT(sctp->sctp_lastfaddr != NULL); sctp->sctp_lastfaddr = NULL; sctp->sctp_current = NULL; sctp->sctp_primary = NULL; sctp_free_faddr_timers(sctp); if (sctp->sctp_conn_tfp != NULL && !caller_holds_lock) { /* in conn fanout; need to hold lock */ mutex_enter(&sctp->sctp_conn_tfp->tf_lock); } for (fp = sctp->sctp_faddrs; fp; fp = fpn) { fpn = fp->next; if (fp->ixa != NULL) { ixa_refrele(fp->ixa); fp->ixa = NULL; } kmem_cache_free(sctp_kmem_faddr_cache, fp); sctp->sctp_nfaddrs--; } sctp->sctp_faddrs = NULL; ASSERT(sctp->sctp_nfaddrs == 0); if (sctp->sctp_conn_tfp != NULL && !caller_holds_lock) { mutex_exit(&sctp->sctp_conn_tfp->tf_lock); } }
/* * Returns 0 if the check failed and the restart should be refused, * 1 if the check succeeded. */ int sctp_secure_restart_check(mblk_t *pkt, sctp_chunk_hdr_t *ich, uint32_t ports, int sleep, sctp_stack_t *sctps, ip_recv_attr_t *ira) { sctp_faddr_t *fp, *fphead = NULL; sctp_parm_hdr_t *ph; ssize_t remaining; int isv4; ipha_t *iph; ip6_t *ip6h; in6_addr_t hdraddr[1]; int retval = 0; sctp_tf_t *tf; sctp_t *sctp; int compres; sctp_init_chunk_t *init; int nadded = 0; /* extract the address from the IP header */ isv4 = (IPH_HDR_VERSION(pkt->b_rptr) == IPV4_VERSION); if (isv4) { iph = (ipha_t *)pkt->b_rptr; IN6_IPADDR_TO_V4MAPPED(iph->ipha_src, hdraddr); } else { ip6h = (ip6_t *)pkt->b_rptr; hdraddr[0] = ip6h->ip6_src; } /* Walk the params in the INIT [ACK], pulling out addr params */ remaining = ntohs(ich->sch_len) - sizeof (*ich) - sizeof (sctp_init_chunk_t); if (remaining < sizeof (*ph)) { /* no parameters; restart OK */ return (1); } init = (sctp_init_chunk_t *)(ich + 1); ph = (sctp_parm_hdr_t *)(init + 1); while (ph != NULL) { sctp_faddr_t *fpa = NULL; /* params will have already been byteordered when validating */ if (ph->sph_type == htons(PARM_ADDR4)) { if (remaining >= PARM_ADDR4_LEN) { in6_addr_t addr; IN6_INADDR_TO_V4MAPPED((struct in_addr *) (ph + 1), &addr); fpa = kmem_cache_alloc(sctp_kmem_faddr_cache, sleep); if (fpa == NULL) { goto done; } bzero(fpa, sizeof (*fpa)); fpa->faddr = addr; fpa->next = NULL; } } else if (ph->sph_type == htons(PARM_ADDR6)) { if (remaining >= PARM_ADDR6_LEN) { fpa = kmem_cache_alloc(sctp_kmem_faddr_cache, sleep); if (fpa == NULL) { goto done; } bzero(fpa, sizeof (*fpa)); bcopy(ph + 1, &fpa->faddr, sizeof (fpa->faddr)); fpa->next = NULL; } } /* link in the new addr, if it was an addr param */ if (fpa != NULL) { if (fphead == NULL) { fphead = fpa; } else { fpa->next = fphead; fphead = fpa; } } ph = sctp_next_parm(ph, &remaining); } if (fphead == NULL) { /* no addr parameters; restart OK */ return (1); } /* * got at least one; make sure the header's addr is * in the list */ fp = sctp_lookup_faddr_nosctp(fphead, hdraddr); if (fp == NULL) { /* not included; add it now */ fp = kmem_cache_alloc(sctp_kmem_faddr_cache, sleep); if (fp == NULL) { goto done; } bzero(fp, sizeof (*fp)); fp->faddr = *hdraddr; fp->next = fphead; fphead = fp; } /* * Now, we can finally do the check: For each sctp instance * on the hash line for ports, compare its faddr set against * the new one. If the new one is a strict subset of any * existing sctp's faddrs, the restart is OK. However, if there * is an overlap, this could be an attack, so return failure. * If all sctp's faddrs are disjoint, this is a legitimate new * association. */ tf = &(sctps->sctps_conn_fanout[SCTP_CONN_HASH(sctps, ports)]); mutex_enter(&tf->tf_lock); for (sctp = tf->tf_sctp; sctp; sctp = sctp->sctp_conn_hash_next) { if (ports != sctp->sctp_connp->conn_ports) { continue; } compres = sctp_compare_faddrsets(fphead, sctp->sctp_faddrs); if (compres <= SCTP_ADDR_SUBSET) { retval = 1; mutex_exit(&tf->tf_lock); goto done; } if (compres == SCTP_ADDR_OVERLAP) { dprint(1, ("new assoc from %x:%x:%x:%x overlaps with %p\n", SCTP_PRINTADDR(*hdraddr), (void *)sctp)); /* * While we still hold the lock, we need to * figure out which addresses have been * added so we can include them in the abort * we will send back. Since these faddrs will * never be used, we overload the rto field * here, setting it to 0 if the address was * not added, 1 if it was added. */ for (fp = fphead; fp; fp = fp->next) { if (sctp_lookup_faddr(sctp, &fp->faddr)) { fp->rto = 0; } else { fp->rto = 1; nadded++; } } mutex_exit(&tf->tf_lock); goto done; } } mutex_exit(&tf->tf_lock); /* All faddrs are disjoint; legit new association */ retval = 1; done: /* If are attempted adds, send back an abort listing the addrs */ if (nadded > 0) { void *dtail; size_t dlen; dtail = kmem_alloc(PARM_ADDR6_LEN * nadded, KM_NOSLEEP); if (dtail == NULL) { goto cleanup; } ph = dtail; dlen = 0; for (fp = fphead; fp; fp = fp->next) { if (fp->rto == 0) { continue; } if (IN6_IS_ADDR_V4MAPPED(&fp->faddr)) { ipaddr_t addr4; ph->sph_type = htons(PARM_ADDR4); ph->sph_len = htons(PARM_ADDR4_LEN); IN6_V4MAPPED_TO_IPADDR(&fp->faddr, addr4); ph++; bcopy(&addr4, ph, sizeof (addr4)); ph = (sctp_parm_hdr_t *) ((char *)ph + sizeof (addr4)); dlen += PARM_ADDR4_LEN; } else { ph->sph_type = htons(PARM_ADDR6); ph->sph_len = htons(PARM_ADDR6_LEN); ph++; bcopy(&fp->faddr, ph, sizeof (fp->faddr)); ph = (sctp_parm_hdr_t *) ((char *)ph + sizeof (fp->faddr)); dlen += PARM_ADDR6_LEN; } } /* Send off the abort */ sctp_send_abort(sctp, sctp_init2vtag(ich), SCTP_ERR_RESTART_NEW_ADDRS, dtail, dlen, pkt, 0, B_TRUE, ira); kmem_free(dtail, PARM_ADDR6_LEN * nadded); } cleanup: /* Clean up */ if (fphead) { sctp_faddr_t *fpn; for (fp = fphead; fp; fp = fpn) { fpn = fp->next; if (fp->ixa != NULL) { ixa_refrele(fp->ixa); fp->ixa = NULL; } kmem_cache_free(sctp_kmem_faddr_cache, fp); } } return (retval); }
/* * If iserror == 0, sends an abort. If iserror != 0, sends an error. */ void sctp_send_abort(sctp_t *sctp, uint32_t vtag, uint16_t serror, char *details, size_t len, mblk_t *inmp, int iserror, boolean_t tbit, ip_recv_attr_t *ira) { mblk_t *hmp; uint32_t ip_hdr_len; ipha_t *iniph; ipha_t *ahiph = NULL; ip6_t *inip6h; ip6_t *ahip6h = NULL; sctp_hdr_t *sh; sctp_hdr_t *insh; size_t ahlen; uchar_t *p; ssize_t alen; int isv4; conn_t *connp = sctp->sctp_connp; sctp_stack_t *sctps = sctp->sctp_sctps; ip_xmit_attr_t *ixa; isv4 = (IPH_HDR_VERSION(inmp->b_rptr) == IPV4_VERSION); if (isv4) { ahlen = sctp->sctp_hdr_len; } else { ahlen = sctp->sctp_hdr6_len; } /* * If this is a labeled system, then check to see if we're allowed to * send a response to this particular sender. If not, then just drop. */ if (is_system_labeled() && !tsol_can_reply_error(inmp, ira)) return; hmp = allocb(sctps->sctps_wroff_xtra + ahlen, BPRI_MED); if (hmp == NULL) { /* XXX no resources */ return; } /* copy in the IP / SCTP header */ p = hmp->b_rptr + sctps->sctps_wroff_xtra; hmp->b_rptr = p; hmp->b_wptr = p + ahlen; if (isv4) { bcopy(sctp->sctp_iphc, p, sctp->sctp_hdr_len); /* * Composite is likely incomplete at this point, so pull * info from the incoming IP / SCTP headers. */ ahiph = (ipha_t *)p; iniph = (ipha_t *)inmp->b_rptr; ip_hdr_len = IPH_HDR_LENGTH(inmp->b_rptr); sh = (sctp_hdr_t *)(p + sctp->sctp_ip_hdr_len); ASSERT(OK_32PTR(sh)); insh = (sctp_hdr_t *)((uchar_t *)iniph + ip_hdr_len); ASSERT(OK_32PTR(insh)); /* Copy in the peer's IP addr */ ahiph->ipha_dst = iniph->ipha_src; ahiph->ipha_src = iniph->ipha_dst; } else { bcopy(sctp->sctp_iphc6, p, sctp->sctp_hdr6_len); ahip6h = (ip6_t *)p; inip6h = (ip6_t *)inmp->b_rptr; ip_hdr_len = ip_hdr_length_v6(inmp, inip6h); sh = (sctp_hdr_t *)(p + sctp->sctp_ip_hdr6_len); ASSERT(OK_32PTR(sh)); insh = (sctp_hdr_t *)((uchar_t *)inip6h + ip_hdr_len); ASSERT(OK_32PTR(insh)); /* Copy in the peer's IP addr */ ahip6h->ip6_dst = inip6h->ip6_src; ahip6h->ip6_src = inip6h->ip6_dst; } /* Fill in the holes in the SCTP common header */ sh->sh_sport = insh->sh_dport; sh->sh_dport = insh->sh_sport; sh->sh_verf = vtag; /* Link in the abort chunk */ if ((alen = sctp_link_abort(hmp, serror, details, len, iserror, tbit)) < 0) { freemsg(hmp); return; } /* * Base the transmission on any routing-related socket options * that have been set on the listener/connection. */ ixa = conn_get_ixa_exclusive(connp); if (ixa == NULL) { freemsg(hmp); return; } ixa->ixa_flags &= ~IXAF_VERIFY_PMTU; ixa->ixa_pktlen = ahlen + alen; if (isv4) { ixa->ixa_flags |= IXAF_IS_IPV4; ahiph->ipha_length = htons(ixa->ixa_pktlen); ixa->ixa_ip_hdr_length = sctp->sctp_ip_hdr_len; } else { ixa->ixa_flags &= ~IXAF_IS_IPV4; ahip6h->ip6_plen = htons(ixa->ixa_pktlen - IPV6_HDR_LEN); ixa->ixa_ip_hdr_length = sctp->sctp_ip_hdr6_len; } SCTPS_BUMP_MIB(sctps, sctpAborted); BUMP_LOCAL(sctp->sctp_obchunks); if (is_system_labeled() && ixa->ixa_tsl != NULL) { ASSERT(ira->ira_tsl != NULL); ixa->ixa_tsl = ira->ira_tsl; /* A multi-level responder */ } if (ira->ira_flags & IRAF_IPSEC_SECURE) { /* * Apply IPsec based on how IPsec was applied to * the packet that caused the abort. */ if (!ipsec_in_to_out(ira, ixa, hmp, ahiph, ahip6h)) { ip_stack_t *ipst = sctps->sctps_netstack->netstack_ip; BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsOutDiscards); /* Note: mp already consumed and ip_drop_packet done */ ixa_refrele(ixa); return; } } else { ixa->ixa_flags |= IXAF_NO_IPSEC; } BUMP_LOCAL(sctp->sctp_opkts); BUMP_LOCAL(sctp->sctp_obchunks); (void) ip_output_simple(hmp, ixa); ixa_refrele(ixa); }