void ip_mprefix_decode (const vl_api_mprefix_t * in, mfib_prefix_t * out) { out->fp_proto = (ADDRESS_IP6 == clib_net_to_host_u32 (in->af) ? FIB_PROTOCOL_IP6 : FIB_PROTOCOL_IP4); out->fp_len = clib_net_to_host_u16 (in->grp_address_length); ip_address_union_decode (&in->grp_address, in->af, &out->fp_grp_addr); ip_address_union_decode (&in->src_address, in->af, &out->fp_src_addr); }
/* * ip6_sixrd */ static uword ip6_sixrd (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame) { u32 n_left_from, *from, next_index, *to_next, n_left_to_next; vlib_node_runtime_t *error_node = vlib_node_get_runtime(vm, ip6_sixrd_node.index); u32 encap = 0; from = vlib_frame_vector_args(frame); n_left_from = frame->n_vectors; next_index = node->cached_next_index; while (n_left_from > 0) { vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next); while (n_left_from > 0 && n_left_to_next > 0) { u32 pi0; vlib_buffer_t *p0; sixrd_domain_t *d0; u8 error0 = SIXRD_ERROR_NONE; ip6_header_t *ip60; ip4_header_t *ip4h0; u32 next0 = IP6_SIXRD_NEXT_IP4_LOOKUP; u32 sixrd_domain_index0 = ~0; pi0 = to_next[0] = from[0]; from += 1; n_left_from -= 1; to_next +=1; n_left_to_next -= 1; p0 = vlib_get_buffer(vm, pi0); ip60 = vlib_buffer_get_current(p0); // p0->current_length = clib_net_to_host_u16(ip40->length); d0 = ip6_sixrd_get_domain(vnet_buffer(p0)->ip.adj_index[VLIB_TX], &sixrd_domain_index0); ASSERT(d0); /* SIXRD calc */ u64 dal60 = clib_net_to_host_u64(ip60->dst_address.as_u64[0]); u32 da40 = sixrd_get_addr(d0, dal60); u16 len = clib_net_to_host_u16(ip60->payload_length) + 60; if (da40 == 0) error0 = SIXRD_ERROR_UNKNOWN; /* construct ipv4 header */ vlib_buffer_advance(p0, - (sizeof(ip4_header_t))); ip4h0 = vlib_buffer_get_current(p0); vnet_buffer(p0)->sw_if_index[VLIB_TX] = (u32)~0; ip4h0->ip_version_and_header_length = 0x45; ip4h0->tos = 0; ip4h0->length = clib_host_to_net_u16(len); ip4h0->fragment_id = 0; ip4h0->flags_and_fragment_offset = 0; ip4h0->ttl = 0x40; ip4h0->protocol = IP_PROTOCOL_IPV6; ip4h0->src_address = d0->ip4_src; ip4h0->dst_address.as_u32 = clib_host_to_net_u32(da40); ip4h0->checksum = ip4_header_checksum(ip4h0); next0 = error0 == SIXRD_ERROR_NONE ? IP6_SIXRD_NEXT_IP4_LOOKUP : IP6_SIXRD_NEXT_DROP; if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED)) { sixrd_trace_t *tr = vlib_add_trace(vm, node, p0, sizeof(*tr)); tr->sixrd_domain_index = sixrd_domain_index0; } p0->error = error_node->errors[error0]; if (PREDICT_TRUE(error0 == SIXRD_ERROR_NONE)) encap++; vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, pi0, next0); } vlib_put_next_frame(vm, node, next_index, n_left_to_next); } vlib_node_increment_counter(vm, ip6_sixrd_node.index, SIXRD_ERROR_ENCAPSULATED, encap); return frame->n_vectors; }
typedef struct { u32 tunnel_id; u32 length; ip4_address_t src; ip4_address_t dst; } ipsec_gre_rx_trace_t; static u8 * format_ipsec_gre_rx_trace (u8 * s, va_list * args) { CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); ipsec_gre_rx_trace_t * t = va_arg (*args, ipsec_gre_rx_trace_t *); s = format (s, "GRE: tunnel %d len %d src %U dst %U", t->tunnel_id, clib_net_to_host_u16(t->length), format_ip4_address, &t->src.as_u8, format_ip4_address, &t->dst.as_u8); return s; } /** * @brief L2-GRE over IPSec input node. * @node ipsec-gre-input * * This node remove GRE header. * * @param vm vlib_main_t corresponding to the current thread. * @param node vlib_node_runtime_t data for this node. * @param from_frame vlib_frame_t whose contents should be dispatched. *
inline void swap_ip_dst_emip_src(ipv4_header *ip, icmp_em_ip_info *icmp_info, cnat_main_db_entry_t *db, u16 vrf) { icmp_v4_t *icmp; ipv4_header *em_ip; u16 *em_port; u32 old_ip; u16 old_port; u16 old_ip_checksum; /* * declear variable */ CNAT_UPDATE_L3_CHECKSUM_DECLARE CNAT_UPDATE_ICMP_ERR_CHECKSUM_DECLARE /* * fix inner layer ip & l4 checksum */ em_ip = icmp_info->em_ip; em_port = icmp_info->em_port; CNAT_UPDATE_L3_CHECKSUM(((u16)(db->out2in_key.k.ipv4)), ((u16)(db->out2in_key.k.ipv4 >> 16)), (clib_net_to_host_u16(em_ip->checksum)), ((u16)(db->in2out_key.k.ipv4)), ((u16)(db->in2out_key.k.ipv4 >> 16))) old_ip = clib_net_to_host_u32(em_ip->src_addr); old_port = clib_net_to_host_u16(*em_port); old_ip_checksum = clib_net_to_host_u16(em_ip->checksum); em_ip->src_addr = clib_host_to_net_u32(db->in2out_key.k.ipv4); em_ip->checksum = clib_host_to_net_u16(new_l3_c); *em_port = clib_host_to_net_u16(db->in2out_key.k.port); /* * fix outter layer ip & icmp checksum */ icmp = icmp_info->icmp; CNAT_UPDATE_ICMP_ERR_CHECKSUM(((u16)(old_ip & 0xFFFF)), ((u16)(old_ip >> 16)), (old_port), (old_ip_checksum), (clib_net_to_host_u16(icmp->checksum)), ((u16)(db->in2out_key.k.ipv4 & 0xffff)), ((u16)(db->in2out_key.k.ipv4 >> 16)), ((u16)(db->in2out_key.k.port)), ((u16)(new_l3_c))) icmp->checksum = clib_host_to_net_u16(new_icmp_c); old_ip = clib_net_to_host_u32(ip->dest_addr); ip->dest_addr = clib_host_to_net_u32(db->in2out_key.k.ipv4); CNAT_UPDATE_L3_CHECKSUM(((u16)(old_ip & 0xFFFF)), ((u16)(old_ip >> 16)), (clib_net_to_host_u16(ip->checksum)), ((u16)(db->in2out_key.k.ipv4)), ((u16)(db->in2out_key.k.ipv4 >> 16))) ip->checksum = clib_host_to_net_u16(new_l3_c); #if 0 if(is_static_dest_nat_enabled(vrf) == CNAT_SUCCESS) { /* * fix inner layer ip & l4 checksum */ em_snat_ip = icmp_info->em_ip; em_snat_port = icmp_info->em_port; old_ip = spp_net_to_host_byte_order_32(&(em_snat_ip->dest_addr)); old_port = spp_net_to_host_byte_order_16(em_snat_port); old_ip_checksum = spp_net_to_host_byte_order_16(&(em_snat_ip->checksum)); direction = 1; if(cnat_static_dest_db_get_translation(em_snat_ip->dest_addr, &postmap_ip, vrf, direction) == CNAT_SUCCESS) { old_postmap_ip = spp_net_to_host_byte_order_32(&postmap_ip); CNAT_UPDATE_L3_CHECKSUM(((u16)(old_ip)), ((u16)(old_ip >> 16)), (spp_net_to_host_byte_order_16(&(em_snat_ip->checksum))), ((u16)(old_postmap_ip)), ((u16)(old_postmap_ip >> 16))) em_snat_ip->dest_addr = postmap_ip; em_snat_ip->checksum = spp_host_to_net_byte_order_16(new_l3_c); /* * fix outter layer ip & icmp checksum */ icmp = icmp_info->icmp; CNAT_UPDATE_ICMP_ERR_CHECKSUM(((u16)(old_ip & 0xFFFF)), ((u16)(old_ip >> 16)), (old_port), (old_ip_checksum), (spp_net_to_host_byte_order_16(&(icmp->checksum))), ((u16)(old_postmap_ip & 0xffff)), ((u16)(old_postmap_ip >> 16)), ((u16)(old_port)), ((u16)(new_l3_c))) icmp->checksum = spp_host_to_net_byte_order_16(new_icmp_c); }
}; /* *INDENT-ON* */ u8 * format_flowprobe_entry (u8 * s, va_list * args) { flowprobe_entry_t *e = va_arg (*args, flowprobe_entry_t *); s = format (s, " %d/%d", e->key.rx_sw_if_index, e->key.tx_sw_if_index); s = format (s, " %U %U", format_ethernet_address, &e->key.src_mac, format_ethernet_address, &e->key.dst_mac); s = format (s, " %U -> %U", format_ip46_address, &e->key.src_address, IP46_TYPE_ANY, format_ip46_address, &e->key.dst_address, IP46_TYPE_ANY); s = format (s, " %d", e->key.protocol); s = format (s, " %d %d\n", clib_net_to_host_u16 (e->key.src_port), clib_net_to_host_u16 (e->key.dst_port)); return s; } static clib_error_t * flowprobe_show_table_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cm) { flowprobe_main_t *fm = &flowprobe_main; int i; flowprobe_entry_t *e; vlib_cli_output (vm, "Dumping IPFIX table");
inline void swap_ip_src_udp_port(ipv4_header *ip, udp_hdr_type_t *udp, cnat_main_db_entry_t *db) { /* * declare varibale */ CNAT_UPDATE_L3_L4_CHECKSUM_DECLARE /* * calculate checksum */ CNAT_UPDATE_L3_L4_CHECKSUM(((u16)(db->in2out_key.k.ipv4)), ((u16)(db->in2out_key.k.ipv4 >> 16)), (db->in2out_key.k.port), (clib_net_to_host_u16(ip->checksum)), (clib_net_to_host_u16(udp->udp_checksum)), ((u16)(db->out2in_key.k.ipv4)), ((u16)(db->out2in_key.k.ipv4 >> 16)), (db->out2in_key.k.port)) /* #define UDP_PACKET_DEBUG 1 */ // Temporary debugs which will be suppressed later #ifdef UDP_PACKET_DEBUG if (PREDICT_FALSE(udp_inside_packet_dump_enable)) { printf("\nIn2Out UDP packet before translation"); print_udp_pkt(ip); } #endif
static inline void * vl_msg_api_alloc_internal (int nbytes, int pool, int may_return_null) { int i; msgbuf_t *rv; ring_alloc_t *ap; svm_queue_t *q; void *oldheap; vl_shmem_hdr_t *shmem_hdr; api_main_t *am = &api_main; shmem_hdr = am->shmem_hdr; #if DEBUG_MESSAGE_BUFFER_OVERRUN > 0 nbytes += 4; #endif ASSERT (pool == 0 || vlib_get_thread_index () == 0); if (shmem_hdr == 0) { clib_warning ("shared memory header NULL"); return 0; } /* account for the msgbuf_t header */ nbytes += sizeof (msgbuf_t); if (shmem_hdr->vl_rings == 0) { clib_warning ("vl_rings NULL"); ASSERT (0); abort (); } if (shmem_hdr->client_rings == 0) { clib_warning ("client_rings NULL"); ASSERT (0); abort (); } ap = pool ? shmem_hdr->vl_rings : shmem_hdr->client_rings; for (i = 0; i < vec_len (ap); i++) { /* Too big? */ if (nbytes > ap[i].size) { continue; } q = ap[i].rp; if (pool == 0) { pthread_mutex_lock (&q->mutex); } rv = (msgbuf_t *) (&q->data[0] + q->head * q->elsize); /* * Is this item still in use? */ if (rv->q) { u32 now = (u32) time (0); if (PREDICT_TRUE (rv->gc_mark_timestamp == 0)) rv->gc_mark_timestamp = now; else { if (now - rv->gc_mark_timestamp > 10) { if (CLIB_DEBUG > 0) { u16 *msg_idp, msg_id; clib_warning ("garbage collect pool %d ring %d index %d", pool, i, q->head); msg_idp = (u16 *) (rv->data); msg_id = clib_net_to_host_u16 (*msg_idp); if (msg_id < vec_len (api_main.msg_names)) clib_warning ("msg id %d name %s", (u32) msg_id, api_main.msg_names[msg_id]); } shmem_hdr->garbage_collects++; goto collected; } } /* yes, loser; try next larger pool */ ap[i].misses++; if (pool == 0) pthread_mutex_unlock (&q->mutex); continue; } collected: /* OK, we have a winner */ ap[i].hits++; /* * Remember the source queue, although we * don't need to know the queue to free the item. */ rv->q = q; rv->gc_mark_timestamp = 0; q->head++; if (q->head == q->maxsize) q->head = 0; if (pool == 0) pthread_mutex_unlock (&q->mutex); goto out; } /* * Request too big, or head element of all size-compatible rings * still in use. Fall back to shared-memory malloc. */ am->ring_misses++; pthread_mutex_lock (&am->vlib_rp->mutex); oldheap = svm_push_data_heap (am->vlib_rp); if (may_return_null) { rv = clib_mem_alloc_or_null (nbytes); if (PREDICT_FALSE (rv == 0)) { svm_pop_heap (oldheap); pthread_mutex_unlock (&am->vlib_rp->mutex); return 0; } } else rv = clib_mem_alloc (nbytes); rv->q = 0; rv->gc_mark_timestamp = 0; svm_pop_heap (oldheap); pthread_mutex_unlock (&am->vlib_rp->mutex); out: #if DEBUG_MESSAGE_BUFFER_OVERRUN > 0 { nbytes -= 4; u32 *overrun; overrun = (u32 *) (rv->data + nbytes - sizeof (msgbuf_t)); *overrun = 0x1badbabe; } #endif rv->data_len = htonl (nbytes - sizeof (msgbuf_t)); return (rv->data); }