int bgp_show_dampening_parameters (struct vty *vty, afi_t afi, safi_t safi) { struct bgp *bgp; bgp = bgp_get_default(); if (bgp == NULL) { vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); return CMD_WARNING; } if (CHECK_FLAG (bgp->af_flags[afi][safi], BGP_CONFIG_DAMPENING)) { vty_out (vty, "Half-life time: %ld min%s", damp->half_life / 60, VTY_NEWLINE); vty_out (vty, "Reuse penalty: %d%s", damp->reuse_limit, VTY_NEWLINE); vty_out (vty, "Suppress penalty: %d%s", damp->suppress_value, VTY_NEWLINE); vty_out (vty, "Max suppress time: %ld min%s", damp->max_suppress_time / 60, VTY_NEWLINE); vty_out (vty, "Max supress penalty: %u%s", damp->ceiling, VTY_NEWLINE); vty_out (vty, "%s", VTY_NEWLINE); } else vty_out (vty, "dampening not enabled for %s%s", afi == AFI_IP ? "IPv4" : "IPv6", VTY_NEWLINE); return CMD_SUCCESS; }
/* Runs under child process. */ void bgp_dump_routes_func (int afi) { struct stream *obuf; struct_bgp_node *rn; struct bgp_info *info; struct bgp *bgp; struct bgp_table *table; unsigned int seq = 0; obuf = bgp_dump_obuf; bgp = bgp_get_default (); if (!bgp) return; if (bgp_dump_routes.fp == NULL) return; /* Walk down each BGP route. */ table = bgp->rib[afi][SAFI_UNICAST]; for (rn = bgp_table_top (table); rn; rn = bgp_route_next (rn)) for (info = rn->info; info; info = info->next) bgp_dump_routes_entry (&rn->p, info, afi, MSG_TABLE_DUMP, seq++); }
/* Runs under child process. */ static unsigned int bgp_dump_routes_func (int afi, int first_run, unsigned int seq) { struct bgp_info *info; struct bgp_node *rn; struct bgp *bgp; struct bgp_table *table; bgp = bgp_get_default (); if (!bgp) return seq; if (bgp_dump_routes.fp == NULL) return seq; /* Note that bgp_dump_routes_index_table will do ipv4 and ipv6 peers, so this should only be done on the first call to bgp_dump_routes_func. ( this function will be called once for ipv4 and once for ipv6 ) */ if(first_run) bgp_dump_routes_index_table(bgp); /* Walk down each BGP route. */ table = bgp->rib[afi][SAFI_UNICAST]; for (rn = bgp_table_top (table); rn; rn = bgp_route_next (rn)) { info = rn->info; while (info) { info = bgp_dump_route_node_record(afi, rn, info, seq); seq++; } } fflush (bgp_dump_routes.fp); return seq; }
static void bgp_scan (afi_t afi, safi_t safi) { struct bgp_node *rn; struct bgp *bgp; struct bgp_info *bi; struct bgp_info *next; struct peer *peer; struct listnode *node, *nnode; int valid; int current; int changed; int metricchanged; /* Change cache. */ if (bgp_nexthop_cache_table[afi] == cache1_table[afi]) bgp_nexthop_cache_table[afi] = cache2_table[afi]; else bgp_nexthop_cache_table[afi] = cache1_table[afi]; /* Get default bgp. */ bgp = bgp_get_default (); if (bgp == NULL) return; /* Maximum prefix check */ for (ALL_LIST_ELEMENTS (bgp->peer, node, nnode, peer)) { if (peer->status != Established) continue; if (peer->afc[afi][SAFI_UNICAST]) bgp_maximum_prefix_overflow (peer, afi, SAFI_UNICAST, 1); if (peer->afc[afi][SAFI_MULTICAST]) bgp_maximum_prefix_overflow (peer, afi, SAFI_MULTICAST, 1); if (peer->afc[afi][SAFI_MPLS_VPN]) bgp_maximum_prefix_overflow (peer, afi, SAFI_MPLS_VPN, 1); } for (rn = bgp_table_top (bgp->rib[afi][SAFI_UNICAST]); rn; rn = bgp_route_next (rn)) { for (bi = rn->info; bi; bi = next) { next = bi->next; if (bi->type == ZEBRA_ROUTE_BGP && bi->sub_type == BGP_ROUTE_NORMAL) { changed = 0; metricchanged = 0; if (bi->peer->sort == BGP_PEER_EBGP && bi->peer->ttl == 1) valid = bgp_nexthop_onlink (afi, bi->attr); else valid = bgp_nexthop_lookup (afi, bi->peer, bi, &changed, &metricchanged); current = CHECK_FLAG (bi->flags, BGP_INFO_VALID) ? 1 : 0; if (changed) SET_FLAG (bi->flags, BGP_INFO_IGP_CHANGED); else UNSET_FLAG (bi->flags, BGP_INFO_IGP_CHANGED); if (valid != current) { if (CHECK_FLAG (bi->flags, BGP_INFO_VALID)) { bgp_aggregate_decrement (bgp, &rn->p, bi, afi, SAFI_UNICAST); bgp_info_unset_flag (rn, bi, BGP_INFO_VALID); } else { bgp_info_set_flag (rn, bi, BGP_INFO_VALID); bgp_aggregate_increment (bgp, &rn->p, bi, afi, SAFI_UNICAST); } } if (CHECK_FLAG (bgp->af_flags[afi][SAFI_UNICAST], BGP_CONFIG_DAMPENING) && bi->extra && bi->extra->damp_info ) if (bgp_damp_scan (bi, afi, SAFI_UNICAST)) bgp_aggregate_increment (bgp, &rn->p, bi, afi, SAFI_UNICAST); } } bgp_process (bgp, rn, afi, SAFI_UNICAST); } /* Flash old cache. */ if (bgp_nexthop_cache_table[afi] == cache1_table[afi]) bgp_nexthop_cache_reset (cache2_table[afi]); else bgp_nexthop_cache_reset (cache1_table[afi]); if (BGP_DEBUG (events, EVENTS)) { if (afi == AFI_IP) zlog_debug ("scanning IPv4 Unicast routing tables"); else if (afi == AFI_IP6) zlog_debug ("scanning IPv6 Unicast routing tables"); } /* Reevaluate default-originate route-maps and announce/withdraw * default route if neccesary. */ for (ALL_LIST_ELEMENTS (bgp->peer, node, nnode, peer)) { if (peer->status == Established && CHECK_FLAG(peer->af_flags[afi][safi], PEER_FLAG_DEFAULT_ORIGINATE) && peer->default_rmap[afi][safi].name) bgp_default_originate (peer, afi, safi, 0); } }
/* Runs under child process. */ static unsigned int bgp_dump_routes_func (int afi, int first_run, unsigned int seq) { struct stream *obuf; struct bgp_info *info; struct bgp_node *rn; struct bgp *bgp; struct bgp_table *table; bgp = bgp_get_default (); if (!bgp) return seq; if (bgp_dump_routes.fp == NULL) return seq; /* Note that bgp_dump_routes_index_table will do ipv4 and ipv6 peers, so this should only be done on the first call to bgp_dump_routes_func. ( this function will be called once for ipv4 and once for ipv6 ) */ if(first_run) bgp_dump_routes_index_table(bgp); obuf = bgp_dump_obuf; stream_reset(obuf); /* Walk down each BGP route. */ table = bgp->rib[afi][SAFI_UNICAST]; for (rn = bgp_table_top (table); rn; rn = bgp_route_next (rn)) { if(!rn->info) continue; stream_reset(obuf); /* MRT header */ if (afi == AFI_IP) bgp_dump_header (obuf, MSG_TABLE_DUMP_V2, TABLE_DUMP_V2_RIB_IPV4_UNICAST, BGP_DUMP_ROUTES); else if (afi == AFI_IP6) bgp_dump_header (obuf, MSG_TABLE_DUMP_V2, TABLE_DUMP_V2_RIB_IPV6_UNICAST, BGP_DUMP_ROUTES); /* Sequence number */ stream_putl(obuf, seq); /* Prefix length */ stream_putc (obuf, rn->p.prefixlen); /* Prefix */ if (afi == AFI_IP) { /* We'll dump only the useful bits (those not 0), but have to align on 8 bits */ stream_write(obuf, (u_char *)&rn->p.u.prefix4, (rn->p.prefixlen+7)/8); } else if (afi == AFI_IP6) { /* We'll dump only the useful bits (those not 0), but have to align on 8 bits */ stream_write (obuf, (u_char *)&rn->p.u.prefix6, (rn->p.prefixlen+7)/8); } /* Save where we are now, so we can overwride the entry count later */ int sizep = stream_get_endp(obuf); /* Entry count */ uint16_t entry_count = 0; /* Entry count, note that this is overwritten later */ stream_putw(obuf, 0); for (info = rn->info; info; info = info->next) { entry_count++; /* Peer index */ stream_putw(obuf, info->peer->table_dump_index); /* Originated */ #ifdef HAVE_CLOCK_MONOTONIC stream_putl (obuf, time(NULL) - (bgp_clock() - info->uptime)); #else stream_putl (obuf, info->uptime); #endif /* HAVE_CLOCK_MONOTONIC */ /* Dump attribute. */ /* Skip prefix & AFI/SAFI for MP_NLRI */ bgp_dump_routes_attr (obuf, info->attr, &rn->p); } /* Overwrite the entry count, now that we know the right number */ stream_putw_at (obuf, sizep, entry_count); seq++; bgp_dump_set_size(obuf, MSG_TABLE_DUMP_V2); fwrite (STREAM_DATA (obuf), stream_get_endp (obuf), 1, bgp_dump_routes.fp); } fflush (bgp_dump_routes.fp); return seq; }
/* * Obtain the BGP instance that the incoming connection should be processed * against. This is important because more than one VRF could be using the * same IP address space. The instance is got by obtaining the device to * which the incoming connection is bound to. This could either be a VRF * or it could be an interface, which in turn determines the VRF. */ static int bgp_get_instance_for_inc_conn(int sock, struct bgp **bgp_inst) { #ifndef SO_BINDTODEVICE /* only Linux has SO_BINDTODEVICE, but we're in Linux-specific code here * anyway since the assumption is that the interface name returned by * getsockopt() is useful in identifying the VRF, particularly with * Linux's * VRF l3master device. The whole mechanism is specific to Linux, so... * when other platforms add VRF support, this will need handling here as * well. (or, some restructuring) */ *bgp_inst = bgp_get_default(); return !*bgp_inst; #else char name[VRF_NAMSIZ + 1]; socklen_t name_len = VRF_NAMSIZ; struct bgp *bgp; int rc; struct listnode *node, *nnode; *bgp_inst = NULL; name[0] = '\0'; rc = getsockopt(sock, SOL_SOCKET, SO_BINDTODEVICE, name, &name_len); if (rc != 0) { #if defined(HAVE_CUMULUS) flog_err(EC_LIB_SOCKET, "[Error] BGP SO_BINDTODEVICE get failed (%s), sock %d", safe_strerror(errno), sock); return -1; #endif } if (!strlen(name)) { *bgp_inst = bgp_get_default(); return 0; /* default instance. */ } /* First try match to instance; if that fails, check for interfaces. */ bgp = bgp_lookup_by_name(name); if (bgp) { if (!bgp->vrf_id) // unexpected return -1; *bgp_inst = bgp; return 0; } /* TODO - This will be optimized once interfaces move into the NS */ for (ALL_LIST_ELEMENTS(bm->bgp, node, nnode, bgp)) { struct interface *ifp; if (bgp->inst_type == BGP_INSTANCE_TYPE_VIEW) continue; ifp = if_lookup_by_name(name, bgp->vrf_id); if (ifp) { *bgp_inst = bgp; return 0; } } /* We didn't match to either an instance or an interface. */ return -1; #endif }