/* * To change the currently used peer address to the specified one. */ void sctp_set_faddr_current(sctp_t *sctp, sctp_faddr_t *fp) { /* Now setup the composite header. */ if (fp->isv4) { IN6_V4MAPPED_TO_IPADDR(&fp->faddr, sctp->sctp_ipha->ipha_dst); IN6_V4MAPPED_TO_IPADDR(&fp->saddr, sctp->sctp_ipha->ipha_src); /* update don't fragment bit */ if (fp->df) { sctp->sctp_ipha->ipha_fragment_offset_and_flags = htons(IPH_DF); } else { sctp->sctp_ipha->ipha_fragment_offset_and_flags = 0; } } else { sctp->sctp_ip6h->ip6_dst = fp->faddr; sctp->sctp_ip6h->ip6_src = fp->saddr; } sctp->sctp_current = fp; sctp->sctp_mss = fp->sfa_pmss; /* Update the uppper layer for the change. */ if (!SCTP_IS_DETACHED(sctp)) sctp_set_ulp_prop(sctp); }
/* * au_to_in_addr_ex * returns: * pointer to au_membuf chain containing an ipv6 token */ token_t * au_to_in_addr_ex(int32_t *internet_addr) { token_t *m; /* local au_membuf */ adr_t adr; /* adr memory stream header */ char data_header_v4 = AUT_IN_ADDR; /* header for v4 token */ char data_header_v6 = AUT_IN_ADDR_EX; /* header for v6 token */ int32_t type = AU_IPv6; m = au_getclr(); adr_start(&adr, memtod(m, char *)); if (IN6_IS_ADDR_V4MAPPED((in6_addr_t *)internet_addr)) { ipaddr_t in4; /* * An IPv4-mapped IPv6 address is really an IPv4 address * in IPv6 format. */ IN6_V4MAPPED_TO_IPADDR((in6_addr_t *)internet_addr, in4); adr_char(&adr, &data_header_v4, 1); adr_char(&adr, (char *)&in4, sizeof (ipaddr_t)); } else { adr_char(&adr, &data_header_v6, 1); adr_int32(&adr, &type, 1); adr_char(&adr, (char *)internet_addr, sizeof (struct in6_addr)); } m->len = adr_count(&adr); return (m); }
static boolean_t check_rtm_addr(const struct ifa_msghdr *ifam, int msglen, boolean_t isv6, const in6_addr_t *addr) { const char *cp, *lim; uint_t flag; const struct sockaddr *sa; if (!(ifam->ifam_addrs & RTA_IFA)) return (B_FALSE); cp = (const char *)(ifam + 1); lim = (const char *)ifam + msglen; for (flag = 1; flag < RTA_IFA; flag <<= 1) { if (ifam->ifam_addrs & flag) { /* LINTED: alignment */ sa = (const struct sockaddr *)cp; if ((const char *)(sa + 1) > lim) return (B_FALSE); switch (sa->sa_family) { case AF_INET: cp += sizeof (struct sockaddr_in); break; case AF_LINK: cp += sizeof (struct sockaddr_dl); break; case AF_INET6: cp += sizeof (struct sockaddr_in6); break; default: cp += sizeof (struct sockaddr); break; } } } if (isv6) { const struct sockaddr_in6 *sin6; /* LINTED: alignment */ sin6 = (const struct sockaddr_in6 *)cp; if ((const char *)(sin6 + 1) > lim) return (B_FALSE); if (sin6->sin6_family != AF_INET6) return (B_FALSE); return (IN6_ARE_ADDR_EQUAL(&sin6->sin6_addr, addr)); } else { const struct sockaddr_in *sinp; ipaddr_t v4addr; /* LINTED: alignment */ sinp = (const struct sockaddr_in *)cp; if ((const char *)(sinp + 1) > lim) return (B_FALSE); if (sinp->sin_family != AF_INET) return (B_FALSE); IN6_V4MAPPED_TO_IPADDR(addr, v4addr); return (sinp->sin_addr.s_addr == v4addr); } }
void sctp_intf_event(sctp_t *sctp, in6_addr_t addr, int state, int error) { struct sctp_paddr_change *spc; ipaddr_t addr4; struct sockaddr_in *sin; struct sockaddr_in6 *sin6; mblk_t *mp; if (!sctp->sctp_recvpathevnt) { return; } if ((mp = allocb(sizeof (*spc), BPRI_MED)) == NULL) { return; } spc = (struct sctp_paddr_change *)mp->b_rptr; spc->spc_type = SCTP_PEER_ADDR_CHANGE; spc->spc_flags = 0; spc->spc_length = sizeof (*spc); if (IN6_IS_ADDR_V4MAPPED(&addr)) { IN6_V4MAPPED_TO_IPADDR(&addr, addr4); sin = (struct sockaddr_in *)&spc->spc_aaddr; sin->sin_family = AF_INET; sin->sin_port = 0; sin->sin_addr.s_addr = addr4; } else { sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr; sin6->sin6_family = AF_INET6; sin6->sin6_port = 0; sin6->sin6_addr = addr; } spc->spc_state = state; spc->spc_error = error; spc->spc_assoc_id = 0; mp->b_wptr = (uchar_t *)(spc + 1); sctp_notify(sctp, mp, spc->spc_length); }
/* ARGSUSED */ static void sctp_notify(sctp_t *sctp, mblk_t *emp, size_t len) { struct T_unitdata_ind *tudi; mblk_t *mp; sctp_faddr_t *fp; int32_t rwnd = 0; int error; conn_t *connp = sctp->sctp_connp; if ((mp = allocb(sizeof (*tudi) + sizeof (void *) + sizeof (struct sockaddr_in6), BPRI_HI)) == NULL) { /* XXX trouble: don't want to drop events. should queue it. */ freemsg(emp); return; } dprint(3, ("sctp_notify: event %d\n", (*(uint16_t *)emp->b_rptr))); mp->b_datap->db_type = M_PROTO; mp->b_flag |= MSGMARK; mp->b_rptr += sizeof (void *); /* pointer worth of padding */ tudi = (struct T_unitdata_ind *)mp->b_rptr; tudi->PRIM_type = T_UNITDATA_IND; tudi->SRC_offset = sizeof (*tudi); tudi->OPT_length = 0; tudi->OPT_offset = 0; fp = sctp->sctp_primary; ASSERT(fp); /* * Fill in primary remote address. */ if (IN6_IS_ADDR_V4MAPPED(&fp->faddr)) { struct sockaddr_in *sin4; tudi->SRC_length = sizeof (*sin4); sin4 = (struct sockaddr_in *)(tudi + 1); sin4->sin_family = AF_INET; sin4->sin_port = connp->conn_fport; IN6_V4MAPPED_TO_IPADDR(&fp->faddr, sin4->sin_addr.s_addr); mp->b_wptr = (uchar_t *)(sin4 + 1); } else { struct sockaddr_in6 *sin6; tudi->SRC_length = sizeof (*sin6); sin6 = (struct sockaddr_in6 *)(tudi + 1); sin6->sin6_family = AF_INET6; sin6->sin6_port = connp->conn_fport; sin6->sin6_addr = fp->faddr; mp->b_wptr = (uchar_t *)(sin6 + 1); } mp->b_cont = emp; /* * Notifications are queued regardless of socket rx space. So * we do not decrement sctp_rwnd here as this will confuse the * other side. */ #ifdef DEBUG for (emp = mp->b_cont; emp; emp = emp->b_cont) { rwnd += emp->b_wptr - emp->b_rptr; } ASSERT(len == rwnd); #endif /* * Override b_flag for SCTP sockfs internal use */ mp->b_flag = (short)SCTP_NOTIFICATION; rwnd = sctp->sctp_ulp_recv(sctp->sctp_ulpd, mp, msgdsize(mp), 0, &error, NULL); if (rwnd > sctp->sctp_rwnd) { sctp->sctp_rwnd = rwnd; } }
/* * Returns 0 on success, ENOMEM on memory allocation failure, EHOSTUNREACH * if the connection credentials fail remote host accreditation or * if the new destination does not support the previously established * connection security label. If sleep is true, this function should * never fail for a memory allocation failure. The boolean parameter * "first" decides whether the newly created faddr structure should be * added at the beginning of the list or at the end. * * Note: caller must hold conn fanout lock. */ int sctp_add_faddr(sctp_t *sctp, in6_addr_t *addr, int sleep, boolean_t first) { sctp_faddr_t *faddr; mblk_t *timer_mp; int err; conn_t *connp = sctp->sctp_connp; if (is_system_labeled()) { ip_xmit_attr_t *ixa = connp->conn_ixa; ts_label_t *effective_tsl = NULL; ASSERT(ixa->ixa_tsl != NULL); /* * Verify the destination is allowed to receive packets * at the security label of the connection we are initiating. * * tsol_check_dest() will create a new effective label for * this connection with a modified label or label flags only * if there are changes from the original label. * * Accept whatever label we get if this is the first * destination address for this connection. The security * label and label flags must match any previuous settings * for all subsequent destination addresses. */ if (IN6_IS_ADDR_V4MAPPED(addr)) { uint32_t dst; IN6_V4MAPPED_TO_IPADDR(addr, dst); err = tsol_check_dest(ixa->ixa_tsl, &dst, IPV4_VERSION, connp->conn_mac_mode, connp->conn_zone_is_global, &effective_tsl); } else { err = tsol_check_dest(ixa->ixa_tsl, addr, IPV6_VERSION, connp->conn_mac_mode, connp->conn_zone_is_global, &effective_tsl); } if (err != 0) return (err); if (sctp->sctp_faddrs == NULL && effective_tsl != NULL) { ip_xmit_attr_replace_tsl(ixa, effective_tsl); } else if (effective_tsl != NULL) { label_rele(effective_tsl); return (EHOSTUNREACH); } } if ((faddr = kmem_cache_alloc(sctp_kmem_faddr_cache, sleep)) == NULL) return (ENOMEM); bzero(faddr, sizeof (*faddr)); timer_mp = sctp_timer_alloc((sctp), sctp_rexmit_timer, sleep); if (timer_mp == NULL) { kmem_cache_free(sctp_kmem_faddr_cache, faddr); return (ENOMEM); } ((sctpt_t *)(timer_mp->b_rptr))->sctpt_faddr = faddr; /* Start with any options set on the conn */ faddr->ixa = conn_get_ixa_exclusive(connp); if (faddr->ixa == NULL) { freemsg(timer_mp); kmem_cache_free(sctp_kmem_faddr_cache, faddr); return (ENOMEM); } faddr->ixa->ixa_notify_cookie = connp->conn_sctp; sctp_init_faddr(sctp, faddr, addr, timer_mp); ASSERT(faddr->ixa->ixa_cred != NULL); /* ip_attr_connect didn't allow broadcats/multicast dest */ ASSERT(faddr->next == NULL); if (sctp->sctp_faddrs == NULL) { ASSERT(sctp->sctp_lastfaddr == NULL); /* only element on list; first and last are same */ sctp->sctp_faddrs = sctp->sctp_lastfaddr = faddr; } else if (first) { ASSERT(sctp->sctp_lastfaddr != NULL); faddr->next = sctp->sctp_faddrs; sctp->sctp_faddrs = faddr; } else { sctp->sctp_lastfaddr->next = faddr; sctp->sctp_lastfaddr = faddr; } sctp->sctp_nfaddrs++; return (0); }
/* * The sender must later set the total length in the IP header. */ mblk_t * sctp_make_mp(sctp_t *sctp, sctp_faddr_t *fp, int trailer) { mblk_t *mp; size_t ipsctplen; int isv4; sctp_stack_t *sctps = sctp->sctp_sctps; boolean_t src_changed = B_FALSE; ASSERT(fp != NULL); isv4 = fp->isv4; if (SCTP_IS_ADDR_UNSPEC(isv4, fp->saddr) || (fp->ixa->ixa_ire->ire_flags & (RTF_REJECT|RTF_BLACKHOLE))) { /* Need to pick a source */ sctp_get_dest(sctp, fp); /* * Although we still may not get an IRE, the source address * may be changed in sctp_get_ire(). Set src_changed to * true so that the source address is copied again. */ src_changed = B_TRUE; } /* There is no suitable source address to use, return. */ if (fp->state == SCTP_FADDRS_UNREACH) return (NULL); ASSERT(fp->ixa->ixa_ire != NULL); ASSERT(!SCTP_IS_ADDR_UNSPEC(isv4, fp->saddr)); if (isv4) { ipsctplen = sctp->sctp_hdr_len; } else { ipsctplen = sctp->sctp_hdr6_len; } mp = allocb(ipsctplen + sctps->sctps_wroff_xtra + trailer, BPRI_MED); if (mp == NULL) { ip1dbg(("sctp_make_mp: error making mp..\n")); return (NULL); } mp->b_rptr += sctps->sctps_wroff_xtra; mp->b_wptr = mp->b_rptr + ipsctplen; ASSERT(OK_32PTR(mp->b_wptr)); if (isv4) { ipha_t *iph = (ipha_t *)mp->b_rptr; bcopy(sctp->sctp_iphc, mp->b_rptr, ipsctplen); if (fp != sctp->sctp_current || src_changed) { /* Fix the source and destination addresses. */ IN6_V4MAPPED_TO_IPADDR(&fp->faddr, iph->ipha_dst); IN6_V4MAPPED_TO_IPADDR(&fp->saddr, iph->ipha_src); } /* set or clear the don't fragment bit */ if (fp->df) { iph->ipha_fragment_offset_and_flags = htons(IPH_DF); } else { iph->ipha_fragment_offset_and_flags = 0; } } else { bcopy(sctp->sctp_iphc6, mp->b_rptr, ipsctplen); if (fp != sctp->sctp_current || src_changed) { /* Fix the source and destination addresses. */ ((ip6_t *)(mp->b_rptr))->ip6_dst = fp->faddr; ((ip6_t *)(mp->b_rptr))->ip6_src = fp->saddr; } } ASSERT(sctp->sctp_connp != NULL); return (mp); }
/* * Returns 0 if the check failed and the restart should be refused, * 1 if the check succeeded. */ int sctp_secure_restart_check(mblk_t *pkt, sctp_chunk_hdr_t *ich, uint32_t ports, int sleep, sctp_stack_t *sctps, ip_recv_attr_t *ira) { sctp_faddr_t *fp, *fphead = NULL; sctp_parm_hdr_t *ph; ssize_t remaining; int isv4; ipha_t *iph; ip6_t *ip6h; in6_addr_t hdraddr[1]; int retval = 0; sctp_tf_t *tf; sctp_t *sctp; int compres; sctp_init_chunk_t *init; int nadded = 0; /* extract the address from the IP header */ isv4 = (IPH_HDR_VERSION(pkt->b_rptr) == IPV4_VERSION); if (isv4) { iph = (ipha_t *)pkt->b_rptr; IN6_IPADDR_TO_V4MAPPED(iph->ipha_src, hdraddr); } else { ip6h = (ip6_t *)pkt->b_rptr; hdraddr[0] = ip6h->ip6_src; } /* Walk the params in the INIT [ACK], pulling out addr params */ remaining = ntohs(ich->sch_len) - sizeof (*ich) - sizeof (sctp_init_chunk_t); if (remaining < sizeof (*ph)) { /* no parameters; restart OK */ return (1); } init = (sctp_init_chunk_t *)(ich + 1); ph = (sctp_parm_hdr_t *)(init + 1); while (ph != NULL) { sctp_faddr_t *fpa = NULL; /* params will have already been byteordered when validating */ if (ph->sph_type == htons(PARM_ADDR4)) { if (remaining >= PARM_ADDR4_LEN) { in6_addr_t addr; IN6_INADDR_TO_V4MAPPED((struct in_addr *) (ph + 1), &addr); fpa = kmem_cache_alloc(sctp_kmem_faddr_cache, sleep); if (fpa == NULL) { goto done; } bzero(fpa, sizeof (*fpa)); fpa->faddr = addr; fpa->next = NULL; } } else if (ph->sph_type == htons(PARM_ADDR6)) { if (remaining >= PARM_ADDR6_LEN) { fpa = kmem_cache_alloc(sctp_kmem_faddr_cache, sleep); if (fpa == NULL) { goto done; } bzero(fpa, sizeof (*fpa)); bcopy(ph + 1, &fpa->faddr, sizeof (fpa->faddr)); fpa->next = NULL; } } /* link in the new addr, if it was an addr param */ if (fpa != NULL) { if (fphead == NULL) { fphead = fpa; } else { fpa->next = fphead; fphead = fpa; } } ph = sctp_next_parm(ph, &remaining); } if (fphead == NULL) { /* no addr parameters; restart OK */ return (1); } /* * got at least one; make sure the header's addr is * in the list */ fp = sctp_lookup_faddr_nosctp(fphead, hdraddr); if (fp == NULL) { /* not included; add it now */ fp = kmem_cache_alloc(sctp_kmem_faddr_cache, sleep); if (fp == NULL) { goto done; } bzero(fp, sizeof (*fp)); fp->faddr = *hdraddr; fp->next = fphead; fphead = fp; } /* * Now, we can finally do the check: For each sctp instance * on the hash line for ports, compare its faddr set against * the new one. If the new one is a strict subset of any * existing sctp's faddrs, the restart is OK. However, if there * is an overlap, this could be an attack, so return failure. * If all sctp's faddrs are disjoint, this is a legitimate new * association. */ tf = &(sctps->sctps_conn_fanout[SCTP_CONN_HASH(sctps, ports)]); mutex_enter(&tf->tf_lock); for (sctp = tf->tf_sctp; sctp; sctp = sctp->sctp_conn_hash_next) { if (ports != sctp->sctp_connp->conn_ports) { continue; } compres = sctp_compare_faddrsets(fphead, sctp->sctp_faddrs); if (compres <= SCTP_ADDR_SUBSET) { retval = 1; mutex_exit(&tf->tf_lock); goto done; } if (compres == SCTP_ADDR_OVERLAP) { dprint(1, ("new assoc from %x:%x:%x:%x overlaps with %p\n", SCTP_PRINTADDR(*hdraddr), (void *)sctp)); /* * While we still hold the lock, we need to * figure out which addresses have been * added so we can include them in the abort * we will send back. Since these faddrs will * never be used, we overload the rto field * here, setting it to 0 if the address was * not added, 1 if it was added. */ for (fp = fphead; fp; fp = fp->next) { if (sctp_lookup_faddr(sctp, &fp->faddr)) { fp->rto = 0; } else { fp->rto = 1; nadded++; } } mutex_exit(&tf->tf_lock); goto done; } } mutex_exit(&tf->tf_lock); /* All faddrs are disjoint; legit new association */ retval = 1; done: /* If are attempted adds, send back an abort listing the addrs */ if (nadded > 0) { void *dtail; size_t dlen; dtail = kmem_alloc(PARM_ADDR6_LEN * nadded, KM_NOSLEEP); if (dtail == NULL) { goto cleanup; } ph = dtail; dlen = 0; for (fp = fphead; fp; fp = fp->next) { if (fp->rto == 0) { continue; } if (IN6_IS_ADDR_V4MAPPED(&fp->faddr)) { ipaddr_t addr4; ph->sph_type = htons(PARM_ADDR4); ph->sph_len = htons(PARM_ADDR4_LEN); IN6_V4MAPPED_TO_IPADDR(&fp->faddr, addr4); ph++; bcopy(&addr4, ph, sizeof (addr4)); ph = (sctp_parm_hdr_t *) ((char *)ph + sizeof (addr4)); dlen += PARM_ADDR4_LEN; } else { ph->sph_type = htons(PARM_ADDR6); ph->sph_len = htons(PARM_ADDR6_LEN); ph++; bcopy(&fp->faddr, ph, sizeof (fp->faddr)); ph = (sctp_parm_hdr_t *) ((char *)ph + sizeof (fp->faddr)); dlen += PARM_ADDR6_LEN; } } /* Send off the abort */ sctp_send_abort(sctp, sctp_init2vtag(ich), SCTP_ERR_RESTART_NEW_ADDRS, dtail, dlen, pkt, 0, B_TRUE, ira); kmem_free(dtail, PARM_ADDR6_LEN * nadded); } cleanup: /* Clean up */ if (fphead) { sctp_faddr_t *fpn; for (fp = fphead; fp; fp = fpn) { fpn = fp->next; if (fp->ixa != NULL) { ixa_refrele(fp->ixa); fp->ixa = NULL; } kmem_cache_free(sctp_kmem_faddr_cache, fp); } } return (retval); }
void sctp_wput_asconf(sctp_t *sctp, sctp_faddr_t *fp) { #define SCTP_SET_SENT_FLAG(mp) ((mp)->b_flag = SCTP_CHUNK_FLAG_SENT) mblk_t *mp; mblk_t *ipmp; uint32_t *snp; sctp_parm_hdr_t *ph; boolean_t isv4; sctp_stack_t *sctps = sctp->sctp_sctps; boolean_t saddr_set; if (sctp->sctp_cchunk_pend || sctp->sctp_cxmit_list == NULL || /* Queue it for later transmission if not yet established */ sctp->sctp_state < SCTPS_ESTABLISHED) { ip2dbg(("sctp_wput_asconf: cchunk pending? (%d) or null "\ "sctp_cxmit_list? (%s) or incorrect state? (%x)\n", sctp->sctp_cchunk_pend, sctp->sctp_cxmit_list == NULL ? "yes" : "no", sctp->sctp_state)); return; } if (fp == NULL) fp = sctp->sctp_current; /* OK to send */ ipmp = sctp_make_mp(sctp, fp, 0); if (ipmp == NULL) { SCTP_FADDR_RC_TIMER_RESTART(sctp, fp, fp->rto); SCTP_KSTAT(sctps, sctp_send_asconf_failed); return; } mp = sctp->sctp_cxmit_list; /* Fill in the mandatory Address Parameter TLV */ isv4 = (fp != NULL) ? fp->isv4 : sctp->sctp_current->isv4; ph = (sctp_parm_hdr_t *)(mp->b_rptr + sizeof (sctp_chunk_hdr_t) + sizeof (uint32_t)); if (isv4) { ipha_t *ipha = (ipha_t *)ipmp->b_rptr; in6_addr_t ipaddr; ipaddr_t addr4; ph->sph_type = htons(PARM_ADDR4); ph->sph_len = htons(PARM_ADDR4_LEN); if (ipha->ipha_src != INADDR_ANY) { bcopy(&ipha->ipha_src, ph + 1, IP_ADDR_LEN); } else { ipaddr = sctp_get_valid_addr(sctp, B_FALSE, &saddr_set); /* * All the addresses are down. * Maybe we might have better luck next time. */ if (!saddr_set) { SCTP_FADDR_RC_TIMER_RESTART(sctp, fp, fp->rto); freeb(ipmp); return; } IN6_V4MAPPED_TO_IPADDR(&ipaddr, addr4); bcopy(&addr4, ph + 1, IP_ADDR_LEN); } } else { ip6_t *ip6 = (ip6_t *)ipmp->b_rptr; in6_addr_t ipaddr; ph->sph_type = htons(PARM_ADDR6); ph->sph_len = htons(PARM_ADDR6_LEN); if (!IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) { bcopy(&ip6->ip6_src, ph + 1, IPV6_ADDR_LEN); } else { ipaddr = sctp_get_valid_addr(sctp, B_TRUE, &saddr_set); /* * All the addresses are down. * Maybe we might have better luck next time. */ if (!saddr_set) { SCTP_FADDR_RC_TIMER_RESTART(sctp, fp, fp->rto); freeb(ipmp); return; } bcopy(&ipaddr, ph + 1, IPV6_ADDR_LEN); } } /* Don't exceed CWND */ if ((MBLKL(mp) > (fp->cwnd - fp->suna)) || ((mp = dupb(sctp->sctp_cxmit_list)) == NULL)) { SCTP_FADDR_RC_TIMER_RESTART(sctp, fp, fp->rto); freeb(ipmp); return; } /* Set the serial number now, if sending for the first time */ if (!SCTP_CHUNK_WANT_REXMIT(mp)) { snp = (uint32_t *)(mp->b_rptr + sizeof (sctp_chunk_hdr_t)); *snp = htonl(sctp->sctp_lcsn++); } SCTP_CHUNK_CLEAR_FLAGS(mp); fp->suna += MBLKL(mp); /* Attach the header and send the chunk */ ipmp->b_cont = mp; sctp->sctp_cchunk_pend = 1; SCTP_SET_SENT_FLAG(sctp->sctp_cxmit_list); SCTP_SET_CHUNK_DEST(sctp->sctp_cxmit_list, fp); sctp_set_iplen(sctp, ipmp, fp->ixa); (void) conn_ip_output(ipmp, fp->ixa); BUMP_LOCAL(sctp->sctp_opkts); SCTP_FADDR_RC_TIMER_RESTART(sctp, fp, fp->rto); #undef SCTP_SET_SENT_FLAG }
boolean_t dhcp_extending(dhcp_smach_t *dsmp) { dhcp_pkt_t *dpkt; stop_pkt_retransmission(dsmp); /* * We change state here because this function is also called when * adopting a lease and on demand by the user. */ if (dsmp->dsm_state == BOUND) { dsmp->dsm_neg_hrtime = gethrtime(); dsmp->dsm_bad_offers = 0; dsmp->dsm_sent = 0; dsmp->dsm_received = 0; /* Bound->renew can't fail */ (void) set_smach_state(dsmp, RENEWING); } dhcpmsg(MSG_DEBUG, "dhcp_extending: sending request on %s", dsmp->dsm_name); if (dsmp->dsm_isv6) { dhcp_lease_t *dlp; dhcp_lif_t *lif; uint_t nlifs; uint_t irt, mrt; /* * Start constructing the Renew/Rebind message. Only Renew has * a server ID, as we still think our server might be * reachable. */ if (dsmp->dsm_state == RENEWING) { dpkt = init_pkt(dsmp, DHCPV6_MSG_RENEW); (void) add_pkt_opt(dpkt, DHCPV6_OPT_SERVERID, dsmp->dsm_serverid, dsmp->dsm_serveridlen); irt = DHCPV6_REN_TIMEOUT; mrt = DHCPV6_REN_MAX_RT; } else { dpkt = init_pkt(dsmp, DHCPV6_MSG_REBIND); irt = DHCPV6_REB_TIMEOUT; mrt = DHCPV6_REB_MAX_RT; } /* * Loop over the leases, and add an IA_NA for each and an * IAADDR for each address. */ for (dlp = dsmp->dsm_leases; dlp != NULL; dlp = dlp->dl_next) { lif = dlp->dl_lifs; for (nlifs = dlp->dl_nlifs; nlifs > 0; nlifs--, lif = lif->lif_next) { (void) add_pkt_lif(dpkt, lif, DHCPV6_STAT_SUCCESS, NULL); } } /* Add required Option Request option */ (void) add_pkt_prl(dpkt, dsmp); return (send_pkt_v6(dsmp, dpkt, dsmp->dsm_server, stop_extending, irt, mrt)); } else { dhcp_lif_t *lif = dsmp->dsm_lif; ipaddr_t server; /* assemble the DHCPREQUEST message. */ dpkt = init_pkt(dsmp, REQUEST); dpkt->pkt->ciaddr.s_addr = lif->lif_addr; /* * The max dhcp message size option is set to the interface * max, minus the size of the udp and ip headers. */ (void) add_pkt_opt16(dpkt, CD_MAX_DHCP_SIZE, htons(lif->lif_max - sizeof (struct udpiphdr))); (void) add_pkt_opt32(dpkt, CD_LEASE_TIME, htonl(DHCP_PERM)); if (class_id_len != 0) { (void) add_pkt_opt(dpkt, CD_CLASS_ID, class_id, class_id_len); } (void) add_pkt_prl(dpkt, dsmp); /* * dsm_reqhost was set for this state machine in * dhcp_selecting() if the REQUEST_HOSTNAME option was set and * a host name was found. */ if (!dhcp_add_fqdn_opt(dpkt, dsmp) && dsmp->dsm_reqhost != NULL) { (void) add_pkt_opt(dpkt, CD_HOSTNAME, dsmp->dsm_reqhost, strlen(dsmp->dsm_reqhost)); } (void) add_pkt_opt(dpkt, CD_END, NULL, 0); IN6_V4MAPPED_TO_IPADDR(&dsmp->dsm_server, server); return (send_pkt(dsmp, dpkt, server, stop_extending)); } }
int inet_matchaddr(const void *sa, const char *name) { int ret = -1; char *lname, *mp, *p; char *ep; int serrno = errno; uint32_t claddr4 = 0; if ((p = lname = strdup(name)) == NULL) { errno = ENOMEM; return (-1); } if ((mp = strchr(p, '/')) != NULL) *mp++ = '\0'; switch (((struct sockaddr_in *)sa)->sin_family) { case AF_INET6: { char *pp; ipaddr_t ipaddr4; struct in6_addr hcaddr6; struct in6_addr *claddr6 = &((struct sockaddr_in6 *)sa)->sin6_addr; if (!IN6_IS_ADDR_V4MAPPED(claddr6)) { /* IPv6 address */ if (*p != '[') { errno = EINVAL; break; } p++; if ((pp = strchr(p, ']')) == NULL || (mp != NULL && pp != mp - 2) || (mp == NULL && *(pp + 1) != '\0')) { errno = EINVAL; break; } *pp = '\0'; if (inet_pton(AF_INET6, p, &hcaddr6) != 1) { errno = EINVAL; break; } if (mp != NULL) { /* Match only first prefix bits */ long prefix6; errno = 0; prefix6 = strtol(mp, &ep, 10); if (errno != 0 || prefix6 < 0 || prefix6 > 128 || *ep != '\0') { errno = EINVAL; break; } ret = IN6_ARE_PREFIXEDADDR_EQUAL(claddr6, &hcaddr6, prefix6) ? 1 : 0; break; } else { /* No prefix, exact match */ ret = IN6_ARE_ADDR_EQUAL(claddr6, &hcaddr6) ? 1 : 0; break; } } else { /* IPv4-mapped IPv6 address, fallthrough to IPv4 */ IN6_V4MAPPED_TO_IPADDR(claddr6, ipaddr4); claddr4 = ntohl(ipaddr4); } } /*FALLTHROUGH*/ case AF_INET: { int i; uint32_t hcaddr4 = 0, mask4; if (claddr4 == 0) { claddr4 = ntohl( ((struct sockaddr_in *)sa)->sin_addr.s_addr); } for (i = 0; i < 4; i++) { long qaddr4; errno = 0; qaddr4 = strtol(p, &ep, 10); if (errno != 0 || qaddr4 < 0 || qaddr4 > 255 || (*ep != '.' && *ep != '\0')) { errno = EINVAL; break; } hcaddr4 |= qaddr4 << ((3 - i) * 8); if (*ep == '\0') break; p = ep + 1; } if (errno != 0) break; if (mp != NULL) { /* Mask is specified explicitly */ long mb; errno = 0; mb = strtol(mp, &ep, 10); if (errno != 0 || mb < 0 || mb > 32 || *ep != '\0') { errno = EINVAL; break; } mask4 = mb ? ~0 << ((sizeof (struct in_addr) * NBBY) - mb) : 0; hcaddr4 &= mask4; } else { /* * Use old-fashioned implicit netmasking by checking * for lower-end zeroes. On the off chance we don't * match any well-known prefixes, return an exact- * match prefix which is misleadingly labelled as * IN_CLASSE_NET. */ if ((hcaddr4 & IN_CLASSA_HOST) == 0) mask4 = IN_CLASSA_NET; else if ((hcaddr4 & IN_CLASSB_HOST) == 0) mask4 = IN_CLASSB_NET; else if ((hcaddr4 & IN_CLASSC_HOST) == 0) mask4 = IN_CLASSC_NET; else mask4 = IN_CLASSE_NET; } ret = ((claddr4 & mask4) == hcaddr4) ? 1 : 0; break; } } free(lname); if (ret != -1) errno = serrno; return (ret); }