static void p_rtentry(struct rtentry *rt) { static struct ifnet ifnet, *lastif; struct rtentry parent; static char buffer[128]; static char prettyname[128]; struct sockaddr *sa; sa_u addr, mask; /* * Don't print protocol-cloned routes unless -a. */ if (rt->rt_flags & RTF_WASCLONED && !aflag) { kget(rt->rt_parent, parent); if (parent.rt_flags & RTF_PRCLONING) return; } bzero(&addr, sizeof(addr)); if ((sa = kgetsa(rt_key(rt)))) bcopy(sa, &addr, sa->sa_len); bzero(&mask, sizeof(mask)); if (rt_mask(rt) && (sa = kgetsa(rt_mask(rt)))) bcopy(sa, &mask, sa->sa_len); p_sockaddr(&addr.u_sa, &mask.u_sa, rt->rt_flags, wid_dst); p_sockaddr(kgetsa(rt->rt_gateway), NULL, RTF_HOST, wid_gw); snprintf(buffer, sizeof(buffer), "%%-%d.%ds ", wid_flags, wid_flags); p_flags(rt->rt_flags, buffer); if (addr.u_sa.sa_family == AF_INET || Wflag) { printf("%*ld %*lu ", wid_refs, rt->rt_refcnt, wid_use, rt->rt_use); if (Wflag) { if (rt->rt_rmx.rmx_mtu != 0) printf("%*lu ", wid_mtu, rt->rt_rmx.rmx_mtu); else printf("%*s ", wid_mtu, ""); } } if (rt->rt_ifp) { if (rt->rt_ifp != lastif) { kget(rt->rt_ifp, ifnet); kread((u_long)ifnet.if_name, buffer, sizeof(buffer)); lastif = rt->rt_ifp; snprintf(prettyname, sizeof(prettyname), "%s%d", buffer, ifnet.if_unit); } printf("%*.*s", wid_if, wid_if, prettyname); if (rt->rt_rmx.rmx_expire) { time_t expire_time; if ((expire_time = rt->rt_rmx.rmx_expire - time((time_t *)0)) > 0) printf(" %*d", wid_expire, (int)expire_time); } if (rt->rt_nodes[0].rn_dupedkey) printf(" =>"); } putchar('\n'); }
/* * Check for alternatives when higher level complains * about service problems. For now, invalidate cached * routing information. If the route was created dynamically * (by a redirect), time to try a default gateway again. */ void in_losing(struct inpcb *inp) { register struct rtentry *rt; struct rt_addrinfo info; if ((rt = inp->inp_route.ro_rt)) { inp->inp_route.ro_rt = 0; bzero((caddr_t)&info, sizeof(info)); info.rti_info[RTAX_DST] = (struct sockaddr *)&inp->inp_route.ro_dst; info.rti_info[RTAX_GATEWAY] = rt->rt_gateway; info.rti_info[RTAX_NETMASK] = rt_mask(rt); rt_missmsg(RTM_LOSING, &info, rt->rt_flags, 0); if (rt->rt_flags & RTF_DYNAMIC) (void) rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway, rt_mask(rt), rt->rt_flags, (struct rtentry **)0); else /* * A new route can be allocated * the next time output is attempted. */ rtfree(rt); } }
/* * Check for alternatives when higher level complains * about service problems. For now, invalidate cached * routing information. If the route was created dynamically * (by a redirect), time to try a default gateway again. */ void in6_losing(struct in6pcb *in6p) { struct rtentry *rt; struct rt_addrinfo info; if (in6p->in6p_af != AF_INET6) return; if ((rt = rtcache_validate(&in6p->in6p_route)) == NULL) return; memset(&info, 0, sizeof(info)); info.rti_info[RTAX_DST] = rtcache_getdst(&in6p->in6p_route); info.rti_info[RTAX_GATEWAY] = rt->rt_gateway; info.rti_info[RTAX_NETMASK] = rt_mask(rt); rt_missmsg(RTM_LOSING, &info, rt->rt_flags, 0); if (rt->rt_flags & RTF_DYNAMIC) { (void)rtrequest(RTM_DELETE, rt_getkey(rt), rt->rt_gateway, rt_mask(rt), rt->rt_flags, NULL); } /* * A new route can be allocated * the next time output is attempted. */ rtcache_free(&in6p->in6p_route); }
static void size_cols_rtentry(struct rtentry *rt) { static struct ifnet ifnet, *lastif; static char buffer[100]; const char *bp; struct sockaddr *sa; sa_u addr, mask; int len; bzero(&addr, sizeof(addr)); if ((sa = kgetsa(rt_key(rt)))) bcopy(sa, &addr, sa->sa_len); bzero(&mask, sizeof(mask)); if (rt_mask(rt) && (sa = kgetsa(rt_mask(rt)))) bcopy(sa, &mask, sa->sa_len); bp = fmt_sockaddr(&addr.u_sa, &mask.u_sa, rt->rt_flags); len = strlen(bp); wid_dst = MAX(len, wid_dst); bp = fmt_sockaddr(kgetsa(rt->rt_gateway), NULL, RTF_HOST); len = strlen(bp); wid_gw = MAX(len, wid_gw); bp = fmt_flags(rt->rt_flags); len = strlen(bp); wid_flags = MAX(len, wid_flags); if (addr.u_sa.sa_family == AF_INET || Wflag) { len = snprintf(buffer, sizeof(buffer), "%d", rt->rt_refcnt); wid_refs = MAX(len, wid_refs); len = snprintf(buffer, sizeof(buffer), "%lu", rt->rt_use); wid_use = MAX(len, wid_use); if (Wflag && rt->rt_rmx.rmx_mtu != 0) { len = snprintf(buffer, sizeof(buffer), "%lu", rt->rt_rmx.rmx_mtu); wid_mtu = MAX(len, wid_mtu); } } if (rt->rt_ifp) { if (rt->rt_ifp != lastif) { if (kget(rt->rt_ifp, ifnet) == 0) len = strlen(ifnet.if_xname); else len = strlen("---"); lastif = rt->rt_ifp; wid_if = MAX(len, wid_if); } if (rt->rt_rmx.rmx_expire) { time_t expire_time; if ((expire_time = rt->rt_rmx.rmx_expire - uptime.tv_sec) > 0) { len = snprintf(buffer, sizeof(buffer), "%d", (int)expire_time); wid_expire = MAX(len, wid_expire); } } } }
static void p_rtentry(struct rtentry *rt) { static struct ifnet ifnet, *lastif; static char buffer[128]; static char prettyname[128]; struct sockaddr *sa; sa_u addr, mask; bzero(&addr, sizeof(addr)); if ((sa = kgetsa(rt_key(rt)))) bcopy(sa, &addr, sa->sa_len); bzero(&mask, sizeof(mask)); if (rt_mask(rt) && (sa = kgetsa(rt_mask(rt)))) bcopy(sa, &mask, sa->sa_len); p_sockaddr(&addr.u_sa, &mask.u_sa, rt->rt_flags, wid_dst); p_sockaddr(kgetsa(rt->rt_gateway), NULL, RTF_HOST, wid_gw); snprintf(buffer, sizeof(buffer), "%%-%d.%ds ", wid_flags, wid_flags); p_flags(rt->rt_flags, buffer); if (addr.u_sa.sa_family == AF_INET || Wflag) { #if 0 printf("%*d %*lu ", wid_refs, rt->rt_refcnt, wid_use, rt->rt_use); #endif if (Wflag) { if (rt->rt_rmx.rmx_mtu != 0) printf("%*lu ", wid_mtu, rt->rt_rmx.rmx_mtu); else printf("%*s ", wid_mtu, ""); } } if (rt->rt_ifp) { if (rt->rt_ifp != lastif) { if (kget(rt->rt_ifp, ifnet) == 0) strlcpy(prettyname, ifnet.if_xname, sizeof(prettyname)); else strlcpy(prettyname, "---", sizeof(prettyname)); lastif = rt->rt_ifp; } printf("%*.*s", wid_if, wid_if, prettyname); if (rt->rt_rmx.rmx_expire) { time_t expire_time; if ((expire_time = rt->rt_rmx.rmx_expire - uptime.tv_sec) > 0) printf(" %*d", wid_expire, (int)expire_time); } if (rt->rt_nodes[0].rn_dupedkey) printf(" =>"); } putchar('\n'); }
static void p_krtentry(struct rtentry *rt) { static struct ifnet ifnet, *lastif; struct sockaddr_storage sock1, sock2; struct sockaddr *sa = (struct sockaddr *)&sock1; struct sockaddr *mask = (struct sockaddr *)&sock2; bcopy(kgetsa(rt_key(rt)), sa, sizeof(struct sockaddr)); if (sa->sa_len > sizeof(struct sockaddr)) bcopy(kgetsa(rt_key(rt)), sa, sa->sa_len); if (sa->sa_family == PF_KEY) { encap_print(rt); return; } if (rt_mask(rt)) { bcopy(kgetsa(rt_mask(rt)), mask, sizeof(struct sockaddr)); if (sa->sa_len > sizeof(struct sockaddr)) bcopy(kgetsa(rt_mask(rt)), mask, sa->sa_len); } else mask = 0; p_addr(sa, mask, rt->rt_flags); p_gwaddr(kgetsa(rt->rt_gateway), sa->sa_family); p_flags(rt->rt_flags, "%-6.6s "); printf("%5u %8lld ", rt->rt_refcnt, rt->rt_use); if (rt->rt_rmx.rmx_mtu) printf("%5u ", rt->rt_rmx.rmx_mtu); else printf("%5s ", "-"); putchar((rt->rt_rmx.rmx_locks & RTV_MTU) ? 'L' : ' '); printf(" %2d", rt->rt_priority); if (rt->rt_ifp) { if (rt->rt_ifp != lastif) { kread((u_long)rt->rt_ifp, &ifnet, sizeof(ifnet)); lastif = rt->rt_ifp; } printf(" %.16s%s", ifnet.if_xname, rt->rt_nodes[0].rn_dupedkey ? " =>" : ""); } putchar('\n'); if (vflag) printf("\texpire %10lld%c\n", (long long)rt->rt_rmx.rmx_expire, (rt->rt_rmx.rmx_locks & RTV_EXPIRE) ? 'L' : ' '); }
/* * Free an arp entry. */ static void arptfree(struct llinfo_arp *la) { struct rtentry *rt = la->la_rt; struct sockaddr_dl *sdl; lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED); RT_LOCK_ASSERT_HELD(rt); if (rt->rt_refcnt > 0 && (sdl = SDL(rt->rt_gateway)) && sdl->sdl_family == AF_LINK) { sdl->sdl_alen = 0; la->la_asked = 0; rt->rt_flags &= ~RTF_REJECT; RT_UNLOCK(rt); } else { /* * Safe to drop rt_lock and use rt_key, since holding * rnh_lock here prevents another thread from calling * rt_setgate() on this route. */ RT_UNLOCK(rt); rtrequest_locked(RTM_DELETE, rt_key(rt), NULL, rt_mask(rt), 0, NULL); } }
static int in_ifadownkill(struct radix_node *rn, void *xap) { struct in_ifadown_arg *ap = xap; struct rtentry *rt = (struct rtentry *)rn; int err; if (rt->rt_ifa == ap->ifa && !(rt->rt_flags & RTF_STATIC)) { /* * We need to disable the automatic prune that happens * in this case in rtrequest() because it will blow * away the pointers that rn_walktree() needs in order * continue our descent. We will end up deleting all * the routes that rtrequest() would have in any case, * so that behavior is not needed there. */ rt->rt_flags &= ~RTF_PRCLONING; err = rtrequest(RTM_DELETE, (struct sockaddr *)rt_key(rt), rt->rt_gateway, rt_mask(rt), rt->rt_flags, 0); if (err) { log(LOG_WARNING, "in_ifadownkill: error %d\n", err); } } return 0; }
/* * Get rid of old routes. When draining, this deletes everything, even when * the timeout is not expired yet. When updating, this makes sure that * nothing has a timeout longer than the current value of rtq_reallyold. */ static int in6_rtqkill(struct radix_node *rn, void *rock) { struct rtqk_arg *ap = rock; struct rtentry *rt = (struct rtentry *)rn; int err; if (rt->rt_flags & RTPRF_OURS) { ap->found++; if (ap->draining || rt->rt_rmx.rmx_expire <= time_second) { if (rt->rt_refcnt > 0) panic("rtqkill route really not free"); err = rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway, rt_mask(rt), rt->rt_flags, NULL); if (err) log(LOG_WARNING, "in6_rtqkill: error %d", err); else ap->killed++; } else { if (ap->updating && (rt->rt_rmx.rmx_expire - time_second > rtq_reallyold)) { rt->rt_rmx.rmx_expire = time_second + rtq_reallyold; } ap->nextstop = lmin(ap->nextstop, rt->rt_rmx.rmx_expire); } } return 0; }
/* * Delete a route and generate a message */ int rtdeletemsg(struct rtentry *rt, u_int tableid) { int error; struct rt_addrinfo info; struct ifnet *ifp; /* * Request the new route so that the entry is not actually * deleted. That will allow the information being reported to * be accurate (and consistent with route_output()). */ bzero((caddr_t)&info, sizeof(info)); info.rti_info[RTAX_DST] = rt_key(rt); info.rti_info[RTAX_NETMASK] = rt_mask(rt); info.rti_info[RTAX_GATEWAY] = rt->rt_gateway; info.rti_flags = rt->rt_flags; ifp = rt->rt_ifp; error = rtrequest1(RTM_DELETE, &info, rt->rt_priority, &rt, tableid); rt_missmsg(RTM_DELETE, &info, info.rti_flags, ifp, error, tableid); /* Adjust the refcount */ if (error == 0 && rt->rt_refcnt <= 0) { rt->rt_refcnt++; rtfree(rt); } return (error); }
/* * On last reference drop, mark the route as belong to us so that it can be * timed out. */ static void in_clsroute(struct radix_node *rn, struct radix_node_head *head) { struct rtentry *rt = (struct rtentry *)rn; if(!(rt->rt_flags & RTF_UP)) return; /* prophylactic measures */ if((rt->rt_flags & (RTF_LLINFO | RTF_HOST)) != RTF_HOST) return; if((rt->rt_flags & (RTF_WASCLONED | RTPRF_OURS)) != RTF_WASCLONED) return; /* * As requested by David Greenman: * If rtq_reallyold is 0, just delete the route without * waiting for a timeout cycle to kill it. */ if(rtq_reallyold != 0) { rt->rt_flags |= RTPRF_OURS; rt->rt_rmx.rmx_expire = time_second + rtq_reallyold; } else { rtrequest(RTM_DELETE, (struct sockaddr *)rt_key(rt), rt->rt_gateway, rt_mask(rt), rt->rt_flags, 0); } }
struct rtentry * rtalloc1(struct sockaddr *dst, int report, u_int tableid) { struct radix_node_head *rnh; struct rtentry *rt; struct radix_node *rn; struct rtentry *newrt = 0; struct rt_addrinfo info; int s = splsoftnet(), err = 0, msgtype = RTM_MISS; rnh = rt_gettable(dst->sa_family, tableid); if (rnh && (rn = rnh->rnh_matchaddr((caddr_t)dst, rnh)) && ((rn->rn_flags & RNF_ROOT) == 0)) { newrt = rt = (struct rtentry *)rn; if (report && (rt->rt_flags & RTF_CLONING)) { err = rtrequest(RTM_RESOLVE, dst, SA(NULL), SA(NULL), 0, &newrt, tableid); if (err) { newrt = rt; rt->rt_refcnt++; goto miss; } if ((rt = newrt) && (rt->rt_flags & RTF_XRESOLVE)) { msgtype = RTM_RESOLVE; goto miss; } /* Inform listeners of the new route */ bzero(&info, sizeof(info)); info.rti_info[RTAX_DST] = rt_key(rt); info.rti_info[RTAX_NETMASK] = rt_mask(rt); info.rti_info[RTAX_GATEWAY] = rt->rt_gateway; if (rt->rt_ifp != NULL) { info.rti_info[RTAX_IFP] = TAILQ_FIRST(&rt->rt_ifp->if_addrlist)->ifa_addr; info.rti_info[RTAX_IFA] = rt->rt_ifa->ifa_addr; } rt_missmsg(RTM_ADD, &info, rt->rt_flags, rt->rt_ifp, 0, tableid); } else rt->rt_refcnt++; } else { if (dst->sa_family != PF_KEY) rtstat.rts_unreach++; /* * IP encapsulation does lots of lookups where we don't need nor want * the RTM_MISSes that would be generated. It causes RTM_MISS storms * sent upward breaking user-level routing queries. */ miss: if (report && dst->sa_family != PF_KEY) { bzero((caddr_t)&info, sizeof(info)); info.rti_info[RTAX_DST] = dst; rt_missmsg(msgtype, &info, 0, NULL, err, tableid); } } splx(s); return (newrt); }
struct sockaddr * rt_plen2mask(struct rtentry *rt, struct sockaddr_in6 *sa_mask) { #ifndef ART return (rt_mask(rt)); #else return (rt_plentosa(rt_key(rt)->sa_family, rt_plen(rt), sa_mask)); #endif /* ART */ }
/* * Get rid of old routes. When draining, this deletes everything, even when * the timeout is not expired yet. This also applies if the route is dynamic * and there are sufficiently large number of such routes (more than a half of * maximum). When updating, this makes sure that nothing has a timeout longer * than the current value of rtq_reallyold. */ static int in6_rtqkill(struct radix_node *rn, void *rock) { struct rtqk_arg *ap = rock; struct rtentry *rt = (struct rtentry *)rn; int err; struct timeval timenow; getmicrotime(&timenow); lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED); RT_LOCK(rt); if (rt->rt_flags & RTPRF_OURS) { ap->found++; if (ap->draining || rt->rt_rmx.rmx_expire <= timenow.tv_sec || ((rt->rt_flags & RTF_DYNAMIC) != 0 && ip6_maxdynroutes >= 0 && in6dynroutes > ip6_maxdynroutes / 2)) { if (rt->rt_refcnt > 0) panic("rtqkill route really not free"); /* * Delete this route since we're done with it; * the route may be freed afterwards, so we * can no longer refer to 'rt' upon returning * from rtrequest(). Safe to drop rt_lock and * use rt_key, rt_gateway, since holding rnh_lock * here prevents another thread from calling * rt_setgate() on this route. */ RT_UNLOCK(rt); err = rtrequest_locked(RTM_DELETE, rt_key(rt), rt->rt_gateway, rt_mask(rt), rt->rt_flags, 0); if (err) { log(LOG_WARNING, "in6_rtqkill: error %d", err); } else { ap->killed++; } } else { if (ap->updating && (rt->rt_rmx.rmx_expire - timenow.tv_sec > rtq_reallyold)) { rt->rt_rmx.rmx_expire = timenow.tv_sec + rtq_reallyold; } ap->nextstop = lmin(ap->nextstop, rt->rt_rmx.rmx_expire); RT_UNLOCK(rt); } } else { RT_UNLOCK(rt); } return 0; }
//------------------------------------------------------------------------------ // FUNCTION // // // DESCRIPTION // // // PARAMETERS // // // RETURN // // //------------------------------------------------------------------------------ static int ifart_delete( struct radix_node *rn, struct in_addr *addr ) { struct rtentry *rt = (struct rtentry *)rn; if (((struct sockaddr_in *)rt_key(rt))->sin_addr.s_addr == addr->s_addr) { rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway, rt_mask(rt), rt->rt_flags, NULL); } return 0; }
//------------------------------------------------------------------------------ // FUNCTION // // // DESCRIPTION // // // PARAMETERS // // // RETURN // // //------------------------------------------------------------------------------ static int ifprt_delete( struct radix_node *rn, void *ifp ) { struct rtentry *rt = (struct rtentry *)rn; if(rt->rt_ifp == ifp) { rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway, rt_mask(rt), rt->rt_flags, NULL); } return 0; }
//------------------------------------------------------------------------------ // FUNCTION // // // DESCRIPTION // // // PARAMETERS // // // RETURN // // //------------------------------------------------------------------------------ static int staticrt_delete( struct radix_node *rn, void *vifp ) { struct rtentry *rt = (struct rtentry *)rn; if((rt->rt_flags & RTF_STATIC) && !(rt->rt_flags & RTF_LLINFO)) { rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway, rt_mask(rt), 0, NULL); } return 0; }
void bootpboot_p_rtentry(struct rtentry *rt) { bootpboot_p_sa(rt_key(rt), rt_mask(rt)); printf(" "); bootpboot_p_sa(rt->rt_gateway, NULL); printf(" "); printf("flags %x", (unsigned short) rt->rt_flags); printf(" %d", (int) rt->rt_rmx.rmx_expire); printf(" %s\n", rt->rt_ifp->if_xname); }
/* * On last reference drop, mark the route as belong to us so that it can be * timed out. */ static void in6_clsroute(struct radix_node *rn, __unused struct radix_node_head *head) { struct rtentry *rt = (struct rtentry *)rn; lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED); RT_LOCK_ASSERT_HELD(rt); if (!(rt->rt_flags & RTF_UP)) return; /* prophylactic measures */ if ((rt->rt_flags & (RTF_LLINFO | RTF_HOST)) != RTF_HOST) return; if (rt->rt_flags & RTPRF_OURS) return; if (!(rt->rt_flags & (RTF_WASCLONED | RTF_DYNAMIC))) return; /* * Delete the route immediately if RTF_DELCLONE is set or * if route caching is disabled (rtq_reallyold set to 0). * Otherwise, let it expire and be deleted by in6_rtqkill(). */ if ((rt->rt_flags & RTF_DELCLONE) || rtq_reallyold == 0) { /* * Delete the route from the radix tree but since we are * called when the route's reference count is 0, don't * deallocate it until we return from this routine by * telling rtrequest that we're interested in it. * Safe to drop rt_lock and use rt_key, rt_gateway, * since holding rnh_lock here prevents another thread * from calling rt_setgate() on this route. */ RT_UNLOCK(rt); if (rtrequest_locked(RTM_DELETE, rt_key(rt), rt->rt_gateway, rt_mask(rt), rt->rt_flags, &rt) == 0) { /* Now let the caller free it */ RT_LOCK(rt); RT_REMREF_LOCKED(rt); } else { RT_LOCK(rt); } } else { struct timeval timenow; getmicrotime(&timenow); rt->rt_flags |= RTPRF_OURS; rt->rt_rmx.rmx_expire = timenow.tv_sec + rtq_reallyold; } }
/* * Package everything up before printing it. * We don't want to block all network operations till * the printing completes! */ static int show_inet_route ( struct radix_node *rn, void *vw ) { struct rtentry *rt = (struct rtentry *)rn; struct ifnet *ifp; struct dinfo *dp = (struct dinfo *)vw; struct rinfo *r; /* * Get a pointer to a new route info structure */ if (dp->count >= dp->capacity) { r = realloc (dp->routes, (sizeof *r) * (dp->capacity + 20)); if (r == 0) return ENOMEM; dp->capacity += 20; dp->routes = r; } r = dp->routes + dp->count++; /* * Fill in the route info structure */ copyAddress (&r->dst, rt_key(rt), sizeof r->dst); if (rt->rt_flags & (RTF_GATEWAY | RTF_HOST)) { copyAddress (&r->un, rt->rt_gateway, sizeof r->un); } else { /* * Create a fake address to hold the mask */ struct sockaddr_in dummy; dummy.sin_family = AF_INET; dummy.sin_len = sizeof dummy; dummy.sin_addr = ((struct sockaddr_in *)rt_mask(rt))->sin_addr; copyAddress (&r->un, &dummy, sizeof r->un); } r->flags = rt->rt_flags; r->refcnt = rt->rt_refcnt; r->pksent = rt->rt_rmx.rmx_pksent; r->expire = rt->rt_rmx.rmx_expire; ifp = rt->rt_ifp; strncpy (r->ifname, (ifp->if_name ? ifp->if_name : ""), sizeof r->ifname); r->ifunit = ifp->if_unit; return 0; }
static int nfs_boot_delroute(struct rtentry *rt, void *w) { int error; if ((void *)rt->rt_ifp != w) return 0; error = rtrequest(RTM_DELETE, rt_getkey(rt), NULL, rt_mask(rt), 0, NULL); if (error != 0) printf("%s: del route, error=%d\n", __func__, error); return 0; }
/* * Get rid of old routes. When draining, this deletes everything, even when * the timeout is not expired yet. When updating, this makes sure that * nothing has a timeout longer than the current value of rtq_reallyold. */ static int in_rtqkill(struct radix_node *rn, void *rock) { struct rtqk_arg *ap = rock; struct rtentry *rt = (struct rtentry *)rn; int err; struct timeval timenow; getmicrotime(&timenow); lck_mtx_assert(rt_mtx, LCK_MTX_ASSERT_OWNED); if (rt->rt_flags & RTPRF_OURS) { ap->found++; if (ap->draining || rt->rt_rmx.rmx_expire <= timenow.tv_sec) { if (rt->rt_refcnt > 0) panic("rtqkill route really not free"); err = rtrequest_locked(RTM_DELETE, (struct sockaddr *)rt_key(rt), rt->rt_gateway, rt_mask(rt), rt->rt_flags, 0); if (err) { log(LOG_WARNING, "in_rtqkill: error %d\n", err); } else { ap->killed++; } } else { if (ap->updating && (rt->rt_rmx.rmx_expire - timenow.tv_sec > rtq_reallyold)) { rt->rt_rmx.rmx_expire = timenow.tv_sec + rtq_reallyold; } ap->nextstop = lmin(ap->nextstop, rt->rt_rmx.rmx_expire); } } return 0; }
/* * On last reference drop, mark the route as belong to us so that it can be * timed out. */ static void in6_clsroute(struct radix_node *rn, struct radix_node_head *head) { struct rtentry *rt = (struct rtentry *)rn; if (!(rt->rt_flags & RTF_UP)) return; /* prophylactic measures */ if ((rt->rt_flags & (RTF_LLINFO | RTF_HOST)) != RTF_HOST) return; if ((rt->rt_flags & (RTF_WASCLONED | RTPRF_OURS)) != RTF_WASCLONED) return; /* * As requested by David Greenman: * If rtq_reallyold is 0, just delete the route without * waiting for a timeout cycle to kill it. */ if (rtq_reallyold != 0) { rt->rt_flags |= RTPRF_OURS; rt->rt_rmx.rmx_expire = time_second + rtq_reallyold; } else { #ifdef __FreeBSD__ rtexpunge(rt); #else struct rtentry *dummy; /* * rtrequest() would recursively call rtfree() without the * dummy entry argument, causing duplicated free. */ rtrequest(RTM_DELETE, (struct sockaddr *)rt_key(rt), rt->rt_gateway, rt_mask(rt), rt->rt_flags, &dummy); #endif } }
/* * Get rid of old routes. When draining, this deletes everything, even when * the timeout is not expired yet. When updating, this makes sure that * nothing has a timeout longer than the current value of rtq_reallyold. */ static int in_rtqkill(struct radix_node *rn, void *rock) { struct rtqk_arg *ap = rock; struct rtentry *rt = (struct rtentry *)rn; int err; RADIX_NODE_HEAD_WLOCK_ASSERT(ap->rnh); if (rt->rt_flags & RTPRF_OURS) { ap->found++; if (ap->draining || rt->rt_rmx.rmx_expire <= time_uptime) { if (rt->rt_refcnt > 0) panic("rtqkill route really not free"); err = in_rtrequest(RTM_DELETE, (struct sockaddr *)rt_key(rt), rt->rt_gateway, rt_mask(rt), rt->rt_flags | RTF_RNH_LOCKED, 0, rt->rt_fibnum); if (err) { log(LOG_WARNING, "in_rtqkill: error %d\n", err); } else { ap->killed++; } } else { if (ap->updating && (rt->rt_rmx.rmx_expire - time_uptime > V_rtq_reallyold)) { rt->rt_rmx.rmx_expire = time_uptime + V_rtq_reallyold; } ap->nextstop = lmin(ap->nextstop, rt->rt_rmx.rmx_expire); } } return 0; }
/* * On last reference drop, mark the route as belong to us so that it can be * timed out. */ static void in_clsroute(struct radix_node *rn, __unused struct radix_node_head *head) { struct rtentry *rt = (struct rtentry *)rn; if (!(rt->rt_flags & RTF_UP)) return; /* prophylactic measures */ if ((rt->rt_flags & (RTF_LLINFO | RTF_HOST)) != RTF_HOST) return; if ((rt->rt_flags & (RTF_WASCLONED | RTPRF_OURS)) != RTF_WASCLONED) return; /* * Delete the route immediately if RTF_DELCLONE is set or * if route caching is disabled (rtq_reallyold set to 0). * Otherwise, let it expire and be deleted by in_rtqkill(). */ if ((rt->rt_flags & RTF_DELCLONE) || rtq_reallyold == 0) { /* * Delete the route from the radix tree but since we are * called when the route's reference count is 0, don't * deallocate it until we return from this routine by * telling rtrequest that we're interested in it. */ if (rtrequest_locked(RTM_DELETE, (struct sockaddr *)rt_key(rt), rt->rt_gateway, rt_mask(rt), rt->rt_flags, &rt) == 0) { /* Now let the caller free it */ rtunref(rt); } } else { struct timeval timenow; getmicrotime(&timenow); rt->rt_flags |= RTPRF_OURS; rt->rt_rmx.rmx_expire = timenow.tv_sec + rtq_reallyold; } }
static int _radix_walk(int fd, struct radix_node *rn, route_handler callback, void *arg) { struct radix_node rnode; struct rtentry rt; struct sockaddr_in sin; struct route_entry entry; int ret = 0; again: _kread(fd, rn, &rnode, sizeof(rnode)); if (rnode.rn_b < 0) { if (!(rnode.rn_flags & RNF_ROOT)) { entry.intf_name[0] = '\0'; _kread(fd, rn, &rt, sizeof(rt)); _kread(fd, rt_key(&rt), &sin, sizeof(sin)); addr_ston((struct sockaddr *)&sin, &entry.route_dst); if (!(rt.rt_flags & RTF_HOST)) { _kread(fd, rt_mask(&rt), &sin, sizeof(sin)); addr_stob((struct sockaddr *)&sin, &entry.route_dst.addr_bits); } _kread(fd, rt.rt_gateway, &sin, sizeof(sin)); addr_ston((struct sockaddr *)&sin, &entry.route_gw); entry.metric = 0; if ((ret = callback(&entry, arg)) != 0) return (ret); } if ((rn = rnode.rn_dupedkey)) goto again; } else { rn = rnode.rn_r; if ((ret = _radix_walk(fd, rnode.rn_l, callback, arg)) != 0) return (ret); if ((ret = _radix_walk(fd, rn, callback, arg)) != 0) return (ret); } return (ret); }
/* * Get rid of old routes. When draining, this deletes everything, even when * the timeout is not expired yet. When updating, this makes sure that * nothing has a timeout longer than the current value of rtq_reallyold. */ static int in_rtqkill(struct radix_node *rn, void *rock) { struct rtqk_arg *ap = rock; struct rtentry *rt = (struct rtentry *)rn; int err; if(rt->rt_flags & RTPRF_OURS) { ap->found++; if(ap->draining || rt->rt_rmx.rmx_expire <= rtems_bsdnet_seconds_since_boot()) { if(rt->rt_refcnt > 0) panic("rtqkill route really not free"); err = rtrequest(RTM_DELETE, (struct sockaddr *)rt_key(rt), rt->rt_gateway, rt_mask(rt), rt->rt_flags, 0); if(err) { log(LOG_WARNING, "in_rtqkill: error %d\n", err); } else { ap->killed++; } } else { if(ap->updating && (rt->rt_rmx.rmx_expire - rtems_bsdnet_seconds_since_boot() > rtq_reallyold)) { rt->rt_rmx.rmx_expire = rtems_bsdnet_seconds_since_boot() + rtq_reallyold; } ap->nextstop = lmin(ap->nextstop, rt->rt_rmx.rmx_expire); } } return 0; }
/* * Force a routing table entry to the specified * destination to go through the given gateway. * Normally called as a result of a routing redirect * message from the network layer. * * N.B.: must be called at splsoftnet */ void rtredirect(struct sockaddr *dst, struct sockaddr *gateway, struct sockaddr *netmask, int flags, struct sockaddr *src, struct rtentry **rtp, u_int rdomain) { struct rtentry *rt; int error = 0; u_int32_t *stat = NULL; struct rt_addrinfo info; struct ifaddr *ifa; struct ifnet *ifp = NULL; splsoftassert(IPL_SOFTNET); /* verify the gateway is directly reachable */ if ((ifa = ifa_ifwithnet(gateway, rdomain)) == NULL) { error = ENETUNREACH; goto out; } ifp = ifa->ifa_ifp; rt = rtalloc1(dst, 0, rdomain); /* * If the redirect isn't from our current router for this dst, * it's either old or wrong. If it redirects us to ourselves, * we have a routing loop, perhaps as a result of an interface * going down recently. */ #define equal(a1, a2) \ ((a1)->sa_len == (a2)->sa_len && \ bcmp((caddr_t)(a1), (caddr_t)(a2), (a1)->sa_len) == 0) if (!(flags & RTF_DONE) && rt && (!equal(src, rt->rt_gateway) || rt->rt_ifa != ifa)) error = EINVAL; else if (ifa_ifwithaddr(gateway, rdomain) != NULL) error = EHOSTUNREACH; if (error) goto done; /* * Create a new entry if we just got back a wildcard entry * or the lookup failed. This is necessary for hosts * which use routing redirects generated by smart gateways * to dynamically build the routing tables. */ if ((rt == NULL) || (rt_mask(rt) && rt_mask(rt)->sa_len < 2)) goto create; /* * Don't listen to the redirect if it's * for a route to an interface. */ if (rt->rt_flags & RTF_GATEWAY) { if (((rt->rt_flags & RTF_HOST) == 0) && (flags & RTF_HOST)) { /* * Changing from route to net => route to host. * Create new route, rather than smashing route to net. */ create: if (rt) rtfree(rt); flags |= RTF_GATEWAY | RTF_DYNAMIC; bzero(&info, sizeof(info)); info.rti_info[RTAX_DST] = dst; info.rti_info[RTAX_GATEWAY] = gateway; info.rti_info[RTAX_NETMASK] = netmask; info.rti_ifa = ifa; info.rti_flags = flags; rt = NULL; error = rtrequest1(RTM_ADD, &info, RTP_DEFAULT, &rt, rdomain); if (rt != NULL) flags = rt->rt_flags; stat = &rtstat.rts_dynamic; } else { /* * Smash the current notion of the gateway to * this destination. Should check about netmask!!! */ rt->rt_flags |= RTF_MODIFIED; flags |= RTF_MODIFIED; stat = &rtstat.rts_newgateway; rt_setgate(rt, rt_key(rt), gateway, rdomain); } } else error = EHOSTUNREACH; done: if (rt) { if (rtp && !error) *rtp = rt; else rtfree(rt); } out: if (error) rtstat.rts_badredirect++; else if (stat != NULL) (*stat)++; bzero((caddr_t)&info, sizeof(info)); info.rti_info[RTAX_DST] = dst; info.rti_info[RTAX_GATEWAY] = gateway; info.rti_info[RTAX_NETMASK] = netmask; info.rti_info[RTAX_AUTHOR] = src; rt_missmsg(RTM_REDIRECT, &info, flags, ifp, error, rdomain); }
/* * check if we have the same key/mask/gateway on the table already. */ int rt_mpath_conflict(struct radix_node_head *rnh, struct rtentry *rt, struct sockaddr *netmask, int mpathok) { struct radix_node *rn, *rn1; struct rtentry *rt1; char *p, *q, *eq; int same, l, skip; rn = (struct radix_node *)rt; rn1 = rnh->rnh_lookup(rt_key(rt), netmask, rnh); if (!rn1 || rn1->rn_flags & RNF_ROOT) return 0; /* * unlike other functions we have in this file, we have to check * all key/mask/gateway as rnh_lookup can match less specific entry. */ rt1 = (struct rtentry *)rn1; /* compare key. */ if (rt_key(rt1)->sa_len != rt_key(rt)->sa_len || bcmp(rt_key(rt1), rt_key(rt), rt_key(rt1)->sa_len)) goto different; /* key was the same. compare netmask. hairy... */ if (rt_mask(rt1) && netmask) { skip = rnh->rnh_treetop->rn_off; if (rt_mask(rt1)->sa_len > netmask->sa_len) { /* * as rt_mask(rt1) is made optimal by radix.c, * there must be some 1-bits on rt_mask(rt1) * after netmask->sa_len. therefore, in * this case, the entries are different. */ if (rt_mask(rt1)->sa_len > skip) goto different; else { /* no bits to compare, i.e. same*/ goto maskmatched; } } l = rt_mask(rt1)->sa_len; if (skip > l) { /* no bits to compare, i.e. same */ goto maskmatched; } p = (char *)rt_mask(rt1); q = (char *)netmask; if (bcmp(p + skip, q + skip, l - skip)) goto different; /* * need to go through all the bit, as netmask is not * optimal and can contain trailing 0s */ eq = (char *)netmask + netmask->sa_len; q += l; same = 1; while (eq > q) if (*q++) { same = 0; break; } if (!same) goto different; } else if (!rt_mask(rt1) && !netmask) ; /* no mask to compare, i.e. same */ else { /* one has mask and the other does not, different */ goto different; } maskmatched: if (!mpathok && rt1->rt_priority == rt->rt_priority) return EEXIST; rn1 = rn_mpath_prio((struct radix_node *)rt1, rt->rt_priority); /* key/mask were the same. compare gateway for all multipaths */ do { rt1 = (struct rtentry *)rn1; /* sanity: no use in comparing the same thing */ if (rn1 == rn) continue; if (rt1->rt_gateway->sa_len != rt->rt_gateway->sa_len || bcmp(rt1->rt_gateway, rt->rt_gateway, rt1->rt_gateway->sa_len)) continue; /* check the route priority */ if (rt1->rt_priority != rt->rt_priority) continue; /* all key/mask/gateway are the same. conflicting entry. */ return EEXIST; } while ((rn1 = rn_mpath_next(rn1, 0)) != NULL); different: return 0; }
void atm_rtrequest(int req, struct rtentry *rt, const struct rt_addrinfo *info) { struct sockaddr *gate = rt->rt_gateway; struct atm_pseudoioctl api; #ifdef NATM const struct sockaddr_in *sin; struct natmpcb *npcb = NULL; const struct atm_pseudohdr *aph; #endif const struct ifnet *ifp = rt->rt_ifp; uint8_t namelen = strlen(ifp->if_xname); uint8_t addrlen = ifp->if_addrlen; if (rt->rt_flags & RTF_GATEWAY) /* link level requests only */ return; switch (req) { case RTM_RESOLVE: /* resolve: only happens when cloning */ printf("atm_rtrequest: RTM_RESOLVE request detected?\n"); break; case RTM_ADD: /* * route added by a command (e.g. ifconfig, route, arp...). * * first check to see if this is not a host route, in which * case we are being called via "ifconfig" to set the address. */ if ((rt->rt_flags & RTF_HOST) == 0) { union { struct sockaddr sa; struct sockaddr_dl sdl; struct sockaddr_storage ss; } u; sockaddr_dl_init(&u.sdl, sizeof(u.ss), ifp->if_index, ifp->if_type, NULL, namelen, NULL, addrlen); rt_setgate(rt, &u.sa); gate = rt->rt_gateway; break; } if ((rt->rt_flags & RTF_CLONING) != 0) { printf("atm_rtrequest: cloning route detected?\n"); break; } if (gate->sa_family != AF_LINK || gate->sa_len < sockaddr_dl_measure(namelen, addrlen)) { log(LOG_DEBUG, "atm_rtrequest: bad gateway value\n"); break; } #ifdef DIAGNOSTIC if (rt->rt_ifp->if_ioctl == NULL) panic("atm null ioctl"); #endif #ifdef NATM /* * let native ATM know we are using this VCI/VPI * (i.e. reserve it) */ sin = satocsin(rt_getkey(rt)); if (sin->sin_family != AF_INET) goto failed; aph = (const struct atm_pseudohdr *)CLLADDR(satosdl(gate)); npcb = npcb_add(NULL, rt->rt_ifp, ATM_PH_VCI(aph), ATM_PH_VPI(aph)); if (npcb == NULL) goto failed; npcb->npcb_flags |= NPCB_IP; npcb->ipaddr.s_addr = sin->sin_addr.s_addr; /* XXX: move npcb to llinfo when ATM ARP is ready */ rt->rt_llinfo = (void *) npcb; rt->rt_flags |= RTF_LLINFO; #endif /* * let the lower level know this circuit is active */ memcpy(&api.aph, CLLADDR(satocsdl(gate)), sizeof(api.aph)); api.rxhand = NULL; if (rt->rt_ifp->if_ioctl(rt->rt_ifp, SIOCATMENA, &api) != 0) { printf("atm: couldn't add VC\n"); goto failed; } satosdl(gate)->sdl_type = rt->rt_ifp->if_type; satosdl(gate)->sdl_index = rt->rt_ifp->if_index; break; failed: #ifdef NATM if (npcb) { npcb_free(npcb, NPCB_DESTROY); rt->rt_llinfo = NULL; rt->rt_flags &= ~RTF_LLINFO; } #endif rtrequest(RTM_DELETE, rt_getkey(rt), NULL, rt_mask(rt), 0, NULL); break; case RTM_DELETE: #ifdef NATM /* * tell native ATM we are done with this VC */ if (rt->rt_flags & RTF_LLINFO) { npcb_free((struct natmpcb *)rt->rt_llinfo, NPCB_DESTROY); rt->rt_llinfo = NULL; rt->rt_flags &= ~RTF_LLINFO; } #endif /* * tell the lower layer to disable this circuit */ memcpy(&api.aph, CLLADDR(satocsdl(gate)), sizeof(api.aph)); api.rxhand = NULL; (void)rt->rt_ifp->if_ioctl(rt->rt_ifp, SIOCATMDIS, &api); break; } }