/* Runs under child process. */ void bgp_dump_routes_func (int afi) { struct stream *obuf; struct_bgp_node *rn; struct bgp_info *info; struct bgp *bgp; struct bgp_table *table; unsigned int seq = 0; obuf = bgp_dump_obuf; bgp = bgp_get_default (); if (!bgp) return; if (bgp_dump_routes.fp == NULL) return; /* Walk down each BGP route. */ table = bgp->rib[afi][SAFI_UNICAST]; for (rn = bgp_table_top (table); rn; rn = bgp_route_next (rn)) for (info = rn->info; info; info = info->next) bgp_dump_routes_entry (&rn->p, info, afi, MSG_TABLE_DUMP, seq++); }
void rpki_revalidate_all_routes(struct bgp* bgp, afi_t afi) { struct bgp_node* bgp_node; struct bgp_info* bgp_info; safi_t safi; for (safi = SAFI_UNICAST; safi < SAFI_MAX; safi++) { for (bgp_node = bgp_table_top(bgp->rib[afi][safi]); bgp_node; bgp_node = bgp_route_next(bgp_node)) { if (bgp_node->info != NULL ) { bool status_changed = false; for (bgp_info = bgp_node->info; bgp_info; bgp_info = bgp_info->next) { u_char old_status = bgp_info->rpki_validation_status; bgp_info->rpki_validation_status = rpki_validate_prefix( bgp_info->peer, bgp_info->attr, &bgp_node->p); if (old_status != bgp_info->rpki_validation_status) { status_changed = true; } } if (status_changed) { bgp_process(bgp, bgp_node, afi, safi); } } } } }
/* Reset and free all BGP nexthop cache. */ static void bgp_nexthop_cache_reset (struct bgp_table *table) { struct bgp_node *rn; struct bgp_nexthop_cache *bnc; for (rn = bgp_table_top (table); rn; rn = bgp_route_next (rn)) if ((bnc = rn->info) != NULL) { bnc_free (bnc); rn->info = NULL; bgp_unlock_node (rn); } }
s_int32_t bnh_network_scan_afi (struct bgp *bgp, afi_t afi) { struct bgp_static *bstatic; struct bgp_peer *peer; struct listnode *nn; struct bgp_node *rn; u_int32_t baai; u_int32_t bsai; s_int32_t ret; struct prefix rnp; baai = BGP_AFI2BAAI (afi); ret = 0; /* Scan and validate all static network routes */ for (rn = bgp_table_top (bgp->route [baai][BSAI_UNICAST]); rn; rn = bgp_route_next (rn)) { if (! (bstatic = rn->info)) continue; BGP_GET_PREFIX_FROM_NODE(rn); bgp_static_network_update (bgp, &rnp, bstatic, afi, SAFI_UNICAST, PAL_FALSE); } /* Scan and validate default routes (once for each AFI-SAFI) */ for (bsai = BSAI_UNICAST; bsai < BSAI_MAX; bsai++) LIST_LOOP (bgp->peer_list, peer, nn) if (CHECK_FLAG (peer->af_flags [baai][bsai], PEER_FLAG_DEFAULT_ORIGINATE)) { /* Send default route only if the PEER_FLAG_DEFAULT_ORIGINATE flag for this peer is set. */ bgp_peer_default_originate (peer, afi, BGP_BSAI2SAFI (bsai), PAL_FALSE); } return ret; }
/* Runs under child process. */ static unsigned int bgp_dump_routes_func (int afi, int first_run, unsigned int seq) { struct bgp_info *info; struct bgp_node *rn; struct bgp *bgp; struct bgp_table *table; bgp = bgp_get_default (); if (!bgp) return seq; if (bgp_dump_routes.fp == NULL) return seq; /* Note that bgp_dump_routes_index_table will do ipv4 and ipv6 peers, so this should only be done on the first call to bgp_dump_routes_func. ( this function will be called once for ipv4 and once for ipv6 ) */ if(first_run) bgp_dump_routes_index_table(bgp); /* Walk down each BGP route. */ table = bgp->rib[afi][SAFI_UNICAST]; for (rn = bgp_table_top (table); rn; rn = bgp_route_next (rn)) { info = rn->info; while (info) { info = bgp_dump_route_node_record(afi, rn, info, seq); seq++; } } fflush (bgp_dump_routes.fp); return seq; }
static void bgp_scan (afi_t afi, safi_t safi) { struct bgp_node *rn; struct bgp *bgp; struct bgp_info *bi; struct bgp_info *next; struct peer *peer; struct listnode *node, *nnode; int valid; int current; int changed; int metricchanged; /* Change cache. */ if (bgp_nexthop_cache_table[afi] == cache1_table[afi]) bgp_nexthop_cache_table[afi] = cache2_table[afi]; else bgp_nexthop_cache_table[afi] = cache1_table[afi]; /* Get default bgp. */ bgp = bgp_get_default (); if (bgp == NULL) return; /* Maximum prefix check */ for (ALL_LIST_ELEMENTS (bgp->peer, node, nnode, peer)) { if (peer->status != Established) continue; if (peer->afc[afi][SAFI_UNICAST]) bgp_maximum_prefix_overflow (peer, afi, SAFI_UNICAST, 1); if (peer->afc[afi][SAFI_MULTICAST]) bgp_maximum_prefix_overflow (peer, afi, SAFI_MULTICAST, 1); if (peer->afc[afi][SAFI_MPLS_VPN]) bgp_maximum_prefix_overflow (peer, afi, SAFI_MPLS_VPN, 1); } for (rn = bgp_table_top (bgp->rib[afi][SAFI_UNICAST]); rn; rn = bgp_route_next (rn)) { for (bi = rn->info; bi; bi = next) { next = bi->next; if (bi->type == ZEBRA_ROUTE_BGP && bi->sub_type == BGP_ROUTE_NORMAL) { changed = 0; metricchanged = 0; if (bi->peer->sort == BGP_PEER_EBGP && bi->peer->ttl == 1) valid = bgp_nexthop_onlink (afi, bi->attr); else valid = bgp_nexthop_lookup (afi, bi->peer, bi, &changed, &metricchanged); current = CHECK_FLAG (bi->flags, BGP_INFO_VALID) ? 1 : 0; if (changed) SET_FLAG (bi->flags, BGP_INFO_IGP_CHANGED); else UNSET_FLAG (bi->flags, BGP_INFO_IGP_CHANGED); if (valid != current) { if (CHECK_FLAG (bi->flags, BGP_INFO_VALID)) { bgp_aggregate_decrement (bgp, &rn->p, bi, afi, SAFI_UNICAST); bgp_info_unset_flag (rn, bi, BGP_INFO_VALID); } else { bgp_info_set_flag (rn, bi, BGP_INFO_VALID); bgp_aggregate_increment (bgp, &rn->p, bi, afi, SAFI_UNICAST); } } if (CHECK_FLAG (bgp->af_flags[afi][SAFI_UNICAST], BGP_CONFIG_DAMPENING) && bi->extra && bi->extra->damp_info ) if (bgp_damp_scan (bi, afi, SAFI_UNICAST)) bgp_aggregate_increment (bgp, &rn->p, bi, afi, SAFI_UNICAST); } } bgp_process (bgp, rn, afi, SAFI_UNICAST); } /* Flash old cache. */ if (bgp_nexthop_cache_table[afi] == cache1_table[afi]) bgp_nexthop_cache_reset (cache2_table[afi]); else bgp_nexthop_cache_reset (cache1_table[afi]); if (BGP_DEBUG (events, EVENTS)) { if (afi == AFI_IP) zlog_debug ("scanning IPv4 Unicast routing tables"); else if (afi == AFI_IP6) zlog_debug ("scanning IPv6 Unicast routing tables"); } /* Reevaluate default-originate route-maps and announce/withdraw * default route if neccesary. */ for (ALL_LIST_ELEMENTS (bgp->peer, node, nnode, peer)) { if (peer->status == Established && CHECK_FLAG(peer->af_flags[afi][safi], PEER_FLAG_DEFAULT_ORIGINATE) && peer->default_rmap[afi][safi].name) bgp_default_originate (peer, afi, safi, 0); } }
/* Scan all configured BGP route then check the route exists in IGP or not. */ static int bgp_import (struct thread *t) { struct bgp *bgp; struct bgp_node *rn; struct bgp_static *bgp_static; struct listnode *node, *nnode; int valid; u_int32_t metric; struct in_addr nexthop; afi_t afi; safi_t safi; bgp_import_thread = thread_add_timer (master, bgp_import, NULL, bgp_import_interval); if (BGP_DEBUG (events, EVENTS)) zlog_debug ("Import timer expired."); for (ALL_LIST_ELEMENTS (bm->bgp, node, nnode, bgp)) { for (afi = AFI_IP; afi < AFI_MAX; afi++) for (safi = SAFI_UNICAST; safi < SAFI_MPLS_VPN; safi++) for (rn = bgp_table_top (bgp->route[afi][safi]); rn; rn = bgp_route_next (rn)) if ((bgp_static = rn->info) != NULL) { if (bgp_static->backdoor) continue; valid = bgp_static->valid; metric = bgp_static->igpmetric; nexthop = bgp_static->igpnexthop; if (bgp_flag_check (bgp, BGP_FLAG_IMPORT_CHECK) && afi == AFI_IP && safi == SAFI_UNICAST) bgp_static->valid = bgp_import_check (&rn->p, &bgp_static->igpmetric, &bgp_static->igpnexthop); else { bgp_static->valid = 1; bgp_static->igpmetric = 0; bgp_static->igpnexthop.s_addr = 0; } if (bgp_static->valid != valid) { if (bgp_static->valid) bgp_static_update (bgp, &rn->p, bgp_static, afi, safi); else bgp_static_withdraw (bgp, &rn->p, afi, safi); } else if (bgp_static->valid) { if (bgp_static->igpmetric != metric || bgp_static->igpnexthop.s_addr != nexthop.s_addr || bgp_static->rmap.name) bgp_static_update (bgp, &rn->p, bgp_static, afi, safi); } } } return 0; }
/* Runs under child process. */ static unsigned int bgp_dump_routes_func (int afi, int first_run, unsigned int seq) { struct stream *obuf; struct bgp_info *info; struct bgp_node *rn; struct bgp *bgp; struct bgp_table *table; bgp = bgp_get_default (); if (!bgp) return seq; if (bgp_dump_routes.fp == NULL) return seq; /* Note that bgp_dump_routes_index_table will do ipv4 and ipv6 peers, so this should only be done on the first call to bgp_dump_routes_func. ( this function will be called once for ipv4 and once for ipv6 ) */ if(first_run) bgp_dump_routes_index_table(bgp); obuf = bgp_dump_obuf; stream_reset(obuf); /* Walk down each BGP route. */ table = bgp->rib[afi][SAFI_UNICAST]; for (rn = bgp_table_top (table); rn; rn = bgp_route_next (rn)) { if(!rn->info) continue; stream_reset(obuf); /* MRT header */ if (afi == AFI_IP) bgp_dump_header (obuf, MSG_TABLE_DUMP_V2, TABLE_DUMP_V2_RIB_IPV4_UNICAST, BGP_DUMP_ROUTES); else if (afi == AFI_IP6) bgp_dump_header (obuf, MSG_TABLE_DUMP_V2, TABLE_DUMP_V2_RIB_IPV6_UNICAST, BGP_DUMP_ROUTES); /* Sequence number */ stream_putl(obuf, seq); /* Prefix length */ stream_putc (obuf, rn->p.prefixlen); /* Prefix */ if (afi == AFI_IP) { /* We'll dump only the useful bits (those not 0), but have to align on 8 bits */ stream_write(obuf, (u_char *)&rn->p.u.prefix4, (rn->p.prefixlen+7)/8); } else if (afi == AFI_IP6) { /* We'll dump only the useful bits (those not 0), but have to align on 8 bits */ stream_write (obuf, (u_char *)&rn->p.u.prefix6, (rn->p.prefixlen+7)/8); } /* Save where we are now, so we can overwride the entry count later */ int sizep = stream_get_endp(obuf); /* Entry count */ uint16_t entry_count = 0; /* Entry count, note that this is overwritten later */ stream_putw(obuf, 0); for (info = rn->info; info; info = info->next) { entry_count++; /* Peer index */ stream_putw(obuf, info->peer->table_dump_index); /* Originated */ #ifdef HAVE_CLOCK_MONOTONIC stream_putl (obuf, time(NULL) - (bgp_clock() - info->uptime)); #else stream_putl (obuf, info->uptime); #endif /* HAVE_CLOCK_MONOTONIC */ /* Dump attribute. */ /* Skip prefix & AFI/SAFI for MP_NLRI */ bgp_dump_routes_attr (obuf, info->attr, &rn->p); } /* Overwrite the entry count, now that we know the right number */ stream_putw_at (obuf, sizep, entry_count); seq++; bgp_dump_set_size(obuf, MSG_TABLE_DUMP_V2); fwrite (STREAM_DATA (obuf), stream_get_endp (obuf), 1, bgp_dump_routes.fp); } fflush (bgp_dump_routes.fp); return seq; }
static int show_ip_bgp_nexthop_table (struct vty *vty, int detail) { struct bgp_node *rn; struct bgp_nexthop_cache *bnc; char buf[INET6_ADDRSTRLEN]; struct nexthop *nexthop; time_t tbuf; afi_t afi; vty_out (vty, "Current BGP nexthop cache:%s", VTY_NEWLINE); for (afi = AFI_IP ; afi < AFI_MAX ; afi++) { if (!bgp_nexthop_cache_table[afi]) continue; for (rn = bgp_table_top (bgp_nexthop_cache_table[afi]); rn; rn = bgp_route_next (rn)) { if ((bnc = rn->info) != NULL) { if (CHECK_FLAG(bnc->flags, BGP_NEXTHOP_VALID)) { vty_out (vty, " %s valid [IGP metric %d], #paths %d%s", inet_ntop (rn->p.family, &rn->p.u.prefix, buf, sizeof (buf)), bnc->metric, bnc->path_count, VTY_NEWLINE); if (detail) for (nexthop = bnc->nexthop ; nexthop; nexthop = nexthop->next) switch (nexthop->type) { case NEXTHOP_TYPE_IPV6: vty_out (vty, " gate %s%s", inet_ntop (AF_INET6, &nexthop->gate.ipv6, buf, INET6_ADDRSTRLEN), VTY_NEWLINE); break; case NEXTHOP_TYPE_IPV6_IFINDEX: vty_out(vty, " gate %s, if %s%s", inet_ntop(AF_INET6, &nexthop->gate.ipv6, buf, INET6_ADDRSTRLEN), ifindex2ifname(nexthop->ifindex), VTY_NEWLINE); break; case NEXTHOP_TYPE_IPV4: vty_out (vty, " gate %s%s", inet_ntop (AF_INET, &nexthop->gate.ipv4, buf, INET6_ADDRSTRLEN), VTY_NEWLINE); break; case NEXTHOP_TYPE_IFINDEX: vty_out (vty, " if %s%s", ifindex2ifname(nexthop->ifindex), VTY_NEWLINE); break; case NEXTHOP_TYPE_IPV4_IFINDEX: vty_out (vty, " gate %s, if %s%s", inet_ntop(AF_INET, &nexthop->gate.ipv4, buf, INET6_ADDRSTRLEN), ifindex2ifname(nexthop->ifindex), VTY_NEWLINE); break; default: vty_out (vty, " invalid nexthop type %u%s", nexthop->type, VTY_NEWLINE); } } else vty_out (vty, " %s invalid%s", inet_ntop (AF_INET, &rn->p.u.prefix, buf, sizeof (buf)), VTY_NEWLINE); #ifdef HAVE_CLOCK_MONOTONIC tbuf = time(NULL) - (bgp_clock() - bnc->last_update); vty_out (vty, " Last update: %s", ctime(&tbuf)); #else vty_out (vty, " Last update: %s", ctime(&bnc->uptime)); #endif /* HAVE_CLOCK_MONOTONIC */ vty_out(vty, "%s", VTY_NEWLINE); } } } return CMD_SUCCESS; }