static int in_ifadownkill(struct rtentry *rt, void *xap) { struct in_ifadown_arg *ap = xap; RT_LOCK(rt); if (rt->rt_ifa == ap->ifa && (ap->del || !(rt->rt_flags & RTF_STATIC))) { /* * Aquire a reference so that it can later be freed * as the refcount would be 0 here in case of at least * ap->del. */ RT_ADDREF(rt); /* * Disconnect it from the tree and permit protocols * to cleanup. */ rt_expunge(ap->rnh, rt); /* * At this point it is an rttrash node, and in case * the above is the only reference we must free it. * If we do not noone will have a pointer and the * rtentry will be leaked forever. * In case someone else holds a reference, we are * fine as we only decrement the refcount. In that * case if the other entity calls RT_REMREF, we * will still be leaking but at least we tried. */ RTFREE_LOCKED(rt); return (0); } RT_UNLOCK(rt); return 0; }
void rtalloc_mpath_fib(struct route *ro, uint32_t hash, u_int fibnum) { struct radix_node *rn0, *rn; u_int32_t n; struct rtentry *rt; int64_t weight; /* * XXX we don't attempt to lookup cached route again; what should * be done for sendto(3) case? */ if (ro->ro_rt && ro->ro_rt->rt_ifp && (ro->ro_rt->rt_flags & RTF_UP) && RT_LINK_IS_UP(ro->ro_rt->rt_ifp)) return; ro->ro_rt = rtalloc1_fib(&ro->ro_dst, 1, 0, fibnum); /* if the route does not exist or it is not multipath, don't care */ if (ro->ro_rt == NULL) return; if (rn_mpath_next((struct radix_node *)ro->ro_rt) == NULL) { RT_UNLOCK(ro->ro_rt); return; } /* beyond here, we use rn as the master copy */ rn0 = rn = (struct radix_node *)ro->ro_rt; n = rn_mpath_count(rn0); /* gw selection by Modulo-N Hash (RFC2991) XXX need improvement? */ hash += hashjitter; hash %= n; for (weight = abs((int32_t)hash), rt = ro->ro_rt; weight >= rt->rt_weight && rn; weight -= rt->rt_weight) { /* stay within the multipath routes */ if (rn->rn_dupedkey && rn->rn_mask != rn->rn_dupedkey->rn_mask) break; rn = rn->rn_dupedkey; rt = (struct rtentry *)rn; } /* XXX try filling rt_gwroute and avoid unreachable gw */ /* gw selection has failed - there must be only zero weight routes */ if (!rn) { RT_UNLOCK(ro->ro_rt); ro->ro_rt = NULL; return; } if (ro->ro_rt != rt) { RTFREE_LOCKED(ro->ro_rt); ro->ro_rt = (struct rtentry *)rn; RT_LOCK(ro->ro_rt); RT_ADDREF(ro->ro_rt); } RT_UNLOCK(ro->ro_rt); }
void rtalloc_mpath_fib(struct route *ro, uint32_t hash, u_int fibnum) { struct rtentry *rt; /* * XXX we don't attempt to lookup cached route again; what should * be done for sendto(3) case? */ if (ro->ro_rt && ro->ro_rt->rt_ifp && (ro->ro_rt->rt_flags & RTF_UP) && RT_LINK_IS_UP(ro->ro_rt->rt_ifp)) return; ro->ro_rt = rtalloc1_fib(&ro->ro_dst, 1, 0, fibnum); /* if the route does not exist or it is not multipath, don't care */ if (ro->ro_rt == NULL) return; if (rn_mpath_next((struct radix_node *)ro->ro_rt) == NULL) { RT_UNLOCK(ro->ro_rt); return; } rt = rt_mpath_selectrte(ro->ro_rt, hash); /* XXX try filling rt_gwroute and avoid unreachable gw */ /* gw selection has failed - there must be only zero weight routes */ if (!rt) { RT_UNLOCK(ro->ro_rt); ro->ro_rt = NULL; return; } if (ro->ro_rt != rt) { RTFREE_LOCKED(ro->ro_rt); ro->ro_rt = rt; RT_LOCK(ro->ro_rt); RT_ADDREF(ro->ro_rt); } RT_UNLOCK(ro->ro_rt); }
/* * Apply routing function on the affected upstream and downstream prefixes, * i.e. either set or clear RTF_PROXY on the cloning prefix route; all route * entries that were cloned off these prefixes will be blown away. Caller * must have acquried proxy6_lock and must not be holding nd6_mutex. */ static void nd6_prproxy_prelist_setroute(boolean_t enable, struct nd6_prproxy_prelist_head *up_head, struct nd6_prproxy_prelist_head *down_head) { struct nd6_prproxy_prelist *up, *down, *ndprl_tmp; struct nd_prefix *pr; lck_mtx_assert(&proxy6_lock, LCK_MTX_ASSERT_OWNED); lck_mtx_assert(nd6_mutex, LCK_MTX_ASSERT_NOTOWNED); SLIST_FOREACH_SAFE(up, up_head, ndprl_le, ndprl_tmp) { struct rtentry *rt; boolean_t prproxy, set_allmulti = FALSE; int allmulti_sw; struct ifnet *ifp = NULL; SLIST_REMOVE(up_head, up, nd6_prproxy_prelist, ndprl_le); pr = up->ndprl_pr; VERIFY(up->ndprl_up == NULL); NDPR_LOCK(pr); ifp = pr->ndpr_ifp; prproxy = (pr->ndpr_stateflags & NDPRF_PRPROXY); VERIFY(!prproxy || ((pr->ndpr_stateflags & NDPRF_ONLINK) && !(pr->ndpr_stateflags & NDPRF_IFSCOPE))); nd6_prproxy_sols_reap(pr); VERIFY(pr->ndpr_prproxy_sols_cnt == 0); VERIFY(RB_EMPTY(&pr->ndpr_prproxy_sols)); if (enable && pr->ndpr_allmulti_cnt == 0) { nd6_prproxy++; pr->ndpr_allmulti_cnt++; set_allmulti = TRUE; allmulti_sw = TRUE; } else if (!enable && pr->ndpr_allmulti_cnt > 0) { nd6_prproxy--; pr->ndpr_allmulti_cnt--; set_allmulti = TRUE; allmulti_sw = FALSE; } if ((rt = pr->ndpr_rt) != NULL) { if ((enable && prproxy) || (!enable && !prproxy)) RT_ADDREF(rt); else rt = NULL; NDPR_UNLOCK(pr); } else { NDPR_UNLOCK(pr); } /* Call the following ioctl after releasing NDPR lock */ if (set_allmulti && ifp != NULL) if_allmulti(ifp, allmulti_sw); NDPR_REMREF(pr); if (rt != NULL) { rt_set_proxy(rt, enable); rtfree(rt); } nd6_ndprl_free(up); } SLIST_FOREACH_SAFE(down, down_head, ndprl_le, ndprl_tmp) { struct nd_prefix *pr_up; struct rtentry *rt; boolean_t prproxy, set_allmulti = FALSE; int allmulti_sw; struct ifnet *ifp = NULL; SLIST_REMOVE(down_head, down, nd6_prproxy_prelist, ndprl_le); pr = down->ndprl_pr; pr_up = down->ndprl_up; VERIFY(pr_up != NULL); NDPR_LOCK(pr_up); ifp = pr->ndpr_ifp; prproxy = (pr_up->ndpr_stateflags & NDPRF_PRPROXY); VERIFY(!prproxy || ((pr_up->ndpr_stateflags & NDPRF_ONLINK) && !(pr_up->ndpr_stateflags & NDPRF_IFSCOPE))); NDPR_UNLOCK(pr_up); NDPR_LOCK(pr); if (enable && pr->ndpr_allmulti_cnt == 0) { pr->ndpr_allmulti_cnt++; set_allmulti = TRUE; allmulti_sw = TRUE; } else if (!enable && pr->ndpr_allmulti_cnt > 0) { pr->ndpr_allmulti_cnt--; set_allmulti = TRUE; allmulti_sw = FALSE; } if ((rt = pr->ndpr_rt) != NULL) { if ((enable && prproxy) || (!enable && !prproxy)) RT_ADDREF(rt); else rt = NULL; NDPR_UNLOCK(pr); } else { NDPR_UNLOCK(pr); } if (set_allmulti && ifp != NULL) if_allmulti(ifp, allmulti_sw); NDPR_REMREF(pr); NDPR_REMREF(pr_up); if (rt != NULL) { rt_set_proxy(rt, enable); rtfree(rt); } nd6_ndprl_free(down); } }
__private_extern__ errno_t arp_route_to_gateway_route(const struct sockaddr *net_dest, route_t hint0, route_t *out_route) { struct timeval timenow; route_t rt = hint0, hint = hint0; errno_t error = 0; *out_route = NULL; /* * Next hop determination. Because we may involve the gateway route * in addition to the original route, locking is rather complicated. * The general concept is that regardless of whether the route points * to the original route or to the gateway route, this routine takes * an extra reference on such a route. This extra reference will be * released at the end. * * Care must be taken to ensure that the "hint0" route never gets freed * via rtfree(), since the caller may have stored it inside a struct * route with a reference held for that placeholder. */ if (rt != NULL) { unsigned int ifindex; RT_LOCK_SPIN(rt); ifindex = rt->rt_ifp->if_index; RT_ADDREF_LOCKED(rt); if (!(rt->rt_flags & RTF_UP)) { RT_REMREF_LOCKED(rt); RT_UNLOCK(rt); /* route is down, find a new one */ hint = rt = rtalloc1_scoped((struct sockaddr *) (size_t)net_dest, 1, 0, ifindex); if (hint != NULL) { RT_LOCK_SPIN(rt); ifindex = rt->rt_ifp->if_index; } else { senderr(EHOSTUNREACH); } } /* * We have a reference to "rt" by now; it will either * be released or freed at the end of this routine. */ RT_LOCK_ASSERT_HELD(rt); if (rt->rt_flags & RTF_GATEWAY) { struct rtentry *gwrt = rt->rt_gwroute; struct sockaddr_in gw; /* If there's no gateway rt, look it up */ if (gwrt == NULL) { gw = *((struct sockaddr_in *)rt->rt_gateway); RT_UNLOCK(rt); goto lookup; } /* Become a regular mutex */ RT_CONVERT_LOCK(rt); /* * Take gwrt's lock while holding route's lock; * this is okay since gwrt never points back * to "rt", so no lock ordering issues. */ RT_LOCK_SPIN(gwrt); if (!(gwrt->rt_flags & RTF_UP)) { struct rtentry *ogwrt; rt->rt_gwroute = NULL; RT_UNLOCK(gwrt); gw = *((struct sockaddr_in *)rt->rt_gateway); RT_UNLOCK(rt); rtfree(gwrt); lookup: gwrt = rtalloc1_scoped( (struct sockaddr *)&gw, 1, 0, ifindex); RT_LOCK(rt); /* * Bail out if the route is down, no route * to gateway, circular route, or if the * gateway portion of "rt" has changed. */ if (!(rt->rt_flags & RTF_UP) || gwrt == NULL || gwrt == rt || !equal(SA(&gw), rt->rt_gateway)) { if (gwrt == rt) { RT_REMREF_LOCKED(gwrt); gwrt = NULL; } RT_UNLOCK(rt); if (gwrt != NULL) rtfree(gwrt); senderr(EHOSTUNREACH); } /* Remove any existing gwrt */ ogwrt = rt->rt_gwroute; if ((rt->rt_gwroute = gwrt) != NULL) RT_ADDREF(gwrt); /* Clean up "rt" now while we can */ if (rt == hint0) { RT_REMREF_LOCKED(rt); RT_UNLOCK(rt); } else { RT_UNLOCK(rt); rtfree(rt); } rt = gwrt; /* Now free the replaced gwrt */ if (ogwrt != NULL) rtfree(ogwrt); /* If still no route to gateway, bail out */ if (rt == NULL) senderr(EHOSTUNREACH); } else { RT_ADDREF_LOCKED(gwrt); RT_UNLOCK(gwrt); /* Clean up "rt" now while we can */ if (rt == hint0) { RT_REMREF_LOCKED(rt); RT_UNLOCK(rt); } else { RT_UNLOCK(rt); rtfree(rt); } rt = gwrt; } /* rt == gwrt; if it is now down, give up */ RT_LOCK_SPIN(rt); if (!(rt->rt_flags & RTF_UP)) { RT_UNLOCK(rt); senderr(EHOSTUNREACH); } } if (rt->rt_flags & RTF_REJECT) { getmicrotime(&timenow); if (rt->rt_rmx.rmx_expire == 0 || timenow.tv_sec < rt->rt_rmx.rmx_expire) { RT_UNLOCK(rt); senderr(rt == hint ? EHOSTDOWN : EHOSTUNREACH); } } /* Become a regular mutex */ RT_CONVERT_LOCK(rt); /* Caller is responsible for cleaning up "rt" */ *out_route = rt; } return (0); bad: /* Clean up route (either it is "rt" or "gwrt") */ if (rt != NULL) { RT_LOCK_SPIN(rt); if (rt == hint0) { RT_REMREF_LOCKED(rt); RT_UNLOCK(rt); } else { RT_UNLOCK(rt); rtfree(rt); } } return (error); }