/** * adj_mcast_update_rewrite * * Update the adjacency's rewrite string. A NULL string implies the * rewrite is reset (i.e. when ARP/ND entry is gone). * NB: the adj being updated may be handling traffic in the DP. */ void adj_mcast_update_rewrite (adj_index_t adj_index, u8 *rewrite, u8 offset) { ip_adjacency_t *adj; ASSERT(ADJ_INDEX_INVALID != adj_index); adj = adj_get(adj_index); /* * update the adj's rewrite string and build the arc * from the rewrite node to the interface's TX node */ adj_nbr_update_rewrite_internal(adj, IP_LOOKUP_NEXT_MCAST, adj_get_mcast_node(adj->ia_nh_proto), vnet_tx_node_index_for_sw_interface( vnet_get_main(), adj->rewrite_header.sw_if_index), rewrite); /* * set the offset corresponding to the mcast IP address rewrite */ adj->rewrite_header.dst_mcast_offset = offset; }
static void fib_entry_src_interface_path_swap (fib_entry_src_t *src, const fib_entry_t *entry, fib_path_list_flags_t pl_flags, const fib_route_path_t *paths) { ip_adjacency_t *adj; src->fes_pl = fib_path_list_create(pl_flags, paths); /* * this is a hack to get the entry's prefix into the glean adjacnecy * so that it is available for fast retreival in the switch path. */ if (!(FIB_ENTRY_FLAG_LOCAL & src->fes_entry_flags)) { adj = adj_get(fib_path_list_get_adj( src->fes_pl, fib_entry_get_default_chain_type(entry))); if (IP_LOOKUP_NEXT_GLEAN == adj->lookup_next_index) { /* * the connected prefix will link to a glean on a non-p2p * u.interface. */ adj->sub_type.glean.receive_addr = entry->fe_prefix.fp_addr; } } }
static clib_error_t * adj_mcast_interface_state_change (vnet_main_t * vnm, u32 sw_if_index, u32 flags) { /* * for each mcast on the interface trigger a walk back to the children */ fib_protocol_t proto; ip_adjacency_t *adj; for (proto = FIB_PROTOCOL_IP4; proto <= FIB_PROTOCOL_IP6; proto++) { if (sw_if_index >= vec_len(adj_mcasts[proto]) || ADJ_INDEX_INVALID == adj_mcasts[proto][sw_if_index]) continue; adj = adj_get(adj_mcasts[proto][sw_if_index]); fib_node_back_walk_ctx_t bw_ctx = { .fnbw_reason = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP ? FIB_NODE_BW_REASON_FLAG_INTERFACE_UP : FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN), }; fib_walk_sync(FIB_NODE_TYPE_ADJ, adj_get_index(adj), &bw_ctx); } return (NULL); } VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION(adj_mcast_interface_state_change); /** * @brief Invoked on each SW interface of a HW interface when the * HW interface state changes */ static walk_rc_t adj_mcast_hw_sw_interface_state_change (vnet_main_t * vnm, u32 sw_if_index, void *arg) { adj_mcast_interface_state_change(vnm, sw_if_index, (uword) arg); return (WALK_CONTINUE); }
u8* format_adj_mcast (u8* s, va_list *ap) { index_t index = va_arg(*ap, index_t); CLIB_UNUSED(u32 indent) = va_arg(*ap, u32); ip_adjacency_t * adj = adj_get(index); s = format(s, "%U-mcast: ", format_fib_protocol, adj->ia_nh_proto); if (adj->rewrite_header.flags & VNET_REWRITE_HAS_FEATURES) s = format(s, "[features] "); s = format (s, "%U", format_vnet_rewrite, &adj->rewrite_header, sizeof (adj->rewrite_data), 0); return (s); }
static clib_error_t * adj_mcast_interface_delete (vnet_main_t * vnm, u32 sw_if_index, u32 is_add) { /* * for each mcast on the interface trigger a walk back to the children */ fib_protocol_t proto; ip_adjacency_t *adj; if (is_add) { /* * not interested in interface additions. we will not back walk * to resolve paths through newly added interfaces. Why? The control * plane should have the brains to add interfaces first, then routes. * So the case where there are paths with a interface that matches * one just created is the case where the path resolved through an * interface that was deleted, and still has not been removed. The * new interface added, is NO GUARANTEE that the interface being * added now, even though it may have the same sw_if_index, is the * same interface that the path needs. So tough! * If the control plane wants these routes to resolve it needs to * remove and add them again. */ return (NULL); } for (proto = FIB_PROTOCOL_IP4; proto <= FIB_PROTOCOL_IP6; proto++) { if (sw_if_index >= vec_len(adj_mcasts[proto]) || ADJ_INDEX_INVALID == adj_mcasts[proto][sw_if_index]) continue; adj = adj_get(adj_mcasts[proto][sw_if_index]); fib_node_back_walk_ctx_t bw_ctx = { .fnbw_reason = FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE, }; fib_walk_sync(FIB_NODE_TYPE_ADJ, adj_get_index(adj), &bw_ctx); } return (NULL); }
/* * adj_mcast_add_or_lock * * The next_hop address here is used for source address selection in the DP. * The mcast adj is added to an interface's connected prefix, the next-hop * passed here is the local prefix on the same interface. */ adj_index_t adj_mcast_add_or_lock (fib_protocol_t proto, vnet_link_t link_type, u32 sw_if_index) { ip_adjacency_t * adj; vec_validate_init_empty(adj_mcasts[proto], sw_if_index, ADJ_INDEX_INVALID); if (ADJ_INDEX_INVALID == adj_mcasts[proto][sw_if_index]) { vnet_main_t *vnm; vnm = vnet_get_main(); adj = adj_alloc(proto); adj->lookup_next_index = IP_LOOKUP_NEXT_MCAST; adj->ia_nh_proto = proto; adj->ia_link = link_type; adj_mcasts[proto][sw_if_index] = adj_get_index(adj); adj_lock(adj_get_index(adj)); vnet_rewrite_init(vnm, sw_if_index, link_type, adj_get_mcast_node(proto), vnet_tx_node_index_for_sw_interface(vnm, sw_if_index), &adj->rewrite_header); /* * we need a rewrite where the destination IP address is converted * to the appropriate link-layer address. This is interface specific. * So ask the interface to do it. */ vnet_update_adjacency_for_sw_interface(vnm, sw_if_index, adj_get_index(adj)); } else { adj = adj_get(adj_mcasts[proto][sw_if_index]); adj_lock(adj_get_index(adj)); } return (adj_get_index(adj)); }
u8* format_adj_mcast_midchain (u8* s, va_list *ap) { index_t index = va_arg(*ap, index_t); CLIB_UNUSED(u32 indent) = va_arg(*ap, u32); ip_adjacency_t * adj = adj_get(index); s = format(s, "%U-mcast-midchain: ", format_fib_protocol, adj->ia_nh_proto); s = format (s, "%U", format_vnet_rewrite, &adj->rewrite_header, sizeof (adj->rewrite_data), 0); s = format (s, "\n%Ustacked-on:\n%U%U", format_white_space, indent, format_white_space, indent+2, format_dpo_id, &adj->sub_type.midchain.next_dpo, indent+2); return (s); }
/** * adj_mcast_midchain_update_rewrite * * Update the adjacency's rewrite string. A NULL string implies the * rewrite is reset (i.e. when ARP/ND entry is gone). * NB: the adj being updated may be handling traffic in the DP. */ void adj_mcast_midchain_update_rewrite (adj_index_t adj_index, adj_midchain_fixup_t fixup, const void *fixup_data, adj_flags_t flags, u8 *rewrite, u8 offset, u32 mask) { ip_adjacency_t *adj; ASSERT(ADJ_INDEX_INVALID != adj_index); adj = adj_get(adj_index); /* * one time only update. since we don't support changing the tunnel * src,dst, this is all we need. */ ASSERT(adj->lookup_next_index == IP_LOOKUP_NEXT_MCAST); /* * tunnels can always provide a rewrite. */ ASSERT(NULL != rewrite); adj_midchain_setup(adj_index, fixup, fixup_data, flags); /* * update the adj's rewrite string and build the arc * from the rewrite node to the interface's TX node */ adj_nbr_update_rewrite_internal(adj, IP_LOOKUP_NEXT_MCAST_MIDCHAIN, adj_get_mcast_node(adj->ia_nh_proto), vnet_tx_node_index_for_sw_interface( vnet_get_main(), adj->rewrite_header.sw_if_index), rewrite); adj->rewrite_header.dst_mcast_offset = offset; }
/** * @brief The LISP-GPE interface registered function to update, i.e. * provide an rewrite string for, an adjacency. */ void lisp_gpe_update_adjacency (vnet_main_t * vnm, u32 sw_if_index, adj_index_t ai) { const lisp_gpe_tunnel_t *lgt; lisp_gpe_adjacency_t *ladj; ip_adjacency_t *adj; ip_address_t rloc; vnet_link_t linkt; adj_flags_t af; index_t lai; adj = adj_get (ai); ip46_address_to_ip_address (&adj->sub_type.nbr.next_hop, &rloc); /* * find an existing or create a new adj */ lai = lisp_adj_find (&rloc, sw_if_index); ASSERT (INDEX_INVALID != lai); ladj = pool_elt_at_index (lisp_adj_pool, lai); lgt = lisp_gpe_tunnel_get (ladj->tunnel_index); linkt = adj_get_link_type (ai); af = ADJ_FLAG_MIDCHAIN_IP_STACK; if (VNET_LINK_ETHERNET == linkt) af |= ADJ_FLAG_MIDCHAIN_NO_COUNT; adj_nbr_midchain_update_rewrite (ai, lisp_gpe_fixup, NULL, af, lisp_gpe_tunnel_build_rewrite (lgt, ladj, lisp_gpe_adj_proto_from_vnet_link_type (linkt))); lisp_gpe_adj_stack_one (ladj, ai); }
always_inline uword adj_midchain_tx_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame, int interface_count) { u32 * from, * to_next, n_left_from, n_left_to_next; u32 next_index; vnet_main_t *vnm = vnet_get_main (); vnet_interface_main_t *im = &vnm->interface_main; u32 thread_index = vm->thread_index; /* Vector of buffer / pkt indices we're supposed to process */ from = vlib_frame_vector_args (frame); /* Number of buffers / pkts */ n_left_from = frame->n_vectors; /* Speculatively send the first buffer to the last disposition we used */ next_index = node->cached_next_index; while (n_left_from > 0) { /* set up to enqueue to our disposition with index = next_index */ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); while (n_left_from >= 8 && n_left_to_next > 4) { u32 bi0, adj_index0, next0; const ip_adjacency_t * adj0; const dpo_id_t *dpo0; vlib_buffer_t * b0; u32 bi1, adj_index1, next1; const ip_adjacency_t * adj1; const dpo_id_t *dpo1; vlib_buffer_t * b1; u32 bi2, adj_index2, next2; const ip_adjacency_t * adj2; const dpo_id_t *dpo2; vlib_buffer_t * b2; u32 bi3, adj_index3, next3; const ip_adjacency_t * adj3; const dpo_id_t *dpo3; vlib_buffer_t * b3; /* Prefetch next iteration. */ { vlib_buffer_t * p4, * p5; vlib_buffer_t * p6, * p7; p4 = vlib_get_buffer (vm, from[4]); p5 = vlib_get_buffer (vm, from[5]); p6 = vlib_get_buffer (vm, from[6]); p7 = vlib_get_buffer (vm, from[7]); vlib_prefetch_buffer_header (p4, LOAD); vlib_prefetch_buffer_header (p5, LOAD); vlib_prefetch_buffer_header (p6, LOAD); vlib_prefetch_buffer_header (p7, LOAD); } bi0 = from[0]; to_next[0] = bi0; bi1 = from[1]; to_next[1] = bi1; bi2 = from[2]; to_next[2] = bi2; bi3 = from[3]; to_next[3] = bi3; from += 4; to_next += 4; n_left_from -= 4; n_left_to_next -= 4; b0 = vlib_get_buffer(vm, bi0); b1 = vlib_get_buffer(vm, bi1); b2 = vlib_get_buffer(vm, bi2); b3 = vlib_get_buffer(vm, bi3); /* Follow the DPO on which the midchain is stacked */ adj_index0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX]; adj_index1 = vnet_buffer(b1)->ip.adj_index[VLIB_TX]; adj_index2 = vnet_buffer(b2)->ip.adj_index[VLIB_TX]; adj_index3 = vnet_buffer(b3)->ip.adj_index[VLIB_TX]; adj0 = adj_get(adj_index0); adj1 = adj_get(adj_index1); adj2 = adj_get(adj_index2); adj3 = adj_get(adj_index3); dpo0 = &adj0->sub_type.midchain.next_dpo; dpo1 = &adj1->sub_type.midchain.next_dpo; dpo2 = &adj2->sub_type.midchain.next_dpo; dpo3 = &adj3->sub_type.midchain.next_dpo; next0 = dpo0->dpoi_next_node; next1 = dpo1->dpoi_next_node; next2 = dpo2->dpoi_next_node; next3 = dpo3->dpoi_next_node; vnet_buffer(b1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index; vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; vnet_buffer(b2)->ip.adj_index[VLIB_TX] = dpo2->dpoi_index; vnet_buffer(b3)->ip.adj_index[VLIB_TX] = dpo3->dpoi_index; if (interface_count) { vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, thread_index, adj0->rewrite_header.sw_if_index, 1, vlib_buffer_length_in_chain (vm, b0)); vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, thread_index, adj1->rewrite_header.sw_if_index, 1, vlib_buffer_length_in_chain (vm, b1)); vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, thread_index, adj2->rewrite_header.sw_if_index, 1, vlib_buffer_length_in_chain (vm, b2)); vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, thread_index, adj3->rewrite_header.sw_if_index, 1, vlib_buffer_length_in_chain (vm, b3)); } if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) { adj_midchain_tx_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof (*tr)); tr->ai = adj_index0; } if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED)) { adj_midchain_tx_trace_t *tr = vlib_add_trace (vm, node, b1, sizeof (*tr)); tr->ai = adj_index1; } if (PREDICT_FALSE(b2->flags & VLIB_BUFFER_IS_TRACED)) { adj_midchain_tx_trace_t *tr = vlib_add_trace (vm, node, b2, sizeof (*tr)); tr->ai = adj_index2; } if (PREDICT_FALSE(b3->flags & VLIB_BUFFER_IS_TRACED)) { adj_midchain_tx_trace_t *tr = vlib_add_trace (vm, node, b3, sizeof (*tr)); tr->ai = adj_index3; } vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next, n_left_to_next, bi0, bi1, bi2, bi3, next0, next1, next2, next3); } while (n_left_from > 0 && n_left_to_next > 0) { u32 bi0, adj_index0, next0; const ip_adjacency_t * adj0; const dpo_id_t *dpo0; vlib_buffer_t * b0; bi0 = from[0]; to_next[0] = bi0; from += 1; to_next += 1; n_left_from -= 1; n_left_to_next -= 1; b0 = vlib_get_buffer(vm, bi0); /* Follow the DPO on which the midchain is stacked */ adj_index0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX]; adj0 = adj_get(adj_index0); dpo0 = &adj0->sub_type.midchain.next_dpo; next0 = dpo0->dpoi_next_node; vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; if (interface_count) { vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, thread_index, adj0->rewrite_header.sw_if_index, 1, vlib_buffer_length_in_chain (vm, b0)); } if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) { adj_midchain_tx_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof (*tr)); tr->ai = adj_index0; } vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, bi0, next0); } vlib_put_next_frame (vm, node, next_index, n_left_to_next); } return frame->n_vectors; }