static void disable_events (perfmon_main_t * pm) { vlib_main_t *vm = vlib_get_main (); u32 my_thread_index = vm->thread_index; int i; /* Stop main loop collection */ vm->vlib_node_runtime_perf_counter_cb = 0; for (i = 0; i < pm->n_active; i++) { if (pm->pm_fds[i][my_thread_index] == 0) continue; if (ioctl (pm->pm_fds[i][my_thread_index], PERF_EVENT_IOC_DISABLE, 0) < 0) clib_unix_warning ("disable ioctl"); if (pm->perf_event_pages[i][my_thread_index]) if (munmap (pm->perf_event_pages[i][my_thread_index], pm->page_size) < 0) clib_unix_warning ("munmap"); (void) close (pm->pm_fds[i][my_thread_index]); pm->pm_fds[i][my_thread_index] = 0; } }
static int flowprobe_template_add_del (u32 domain_id, u16 src_port, flowprobe_record_t flags, vnet_flow_data_callback_t * flow_data_callback, vnet_flow_rewrite_callback_t * rewrite_callback, bool is_add, u16 * template_id) { flow_report_main_t *frm = &flow_report_main; vnet_flow_report_add_del_args_t a = { .rewrite_callback = rewrite_callback, .flow_data_callback = flow_data_callback, .is_add = is_add, .domain_id = domain_id, .src_port = src_port, .opaque.as_uword = flags, }; return vnet_flow_report_add_del (frm, &a, template_id); } static void flowprobe_expired_timer_callback (u32 * expired_timers) { vlib_main_t *vm = vlib_get_main (); flowprobe_main_t *fm = &flowprobe_main; u32 my_cpu_number = vm->thread_index; int i; u32 poolindex; for (i = 0; i < vec_len (expired_timers); i++) { poolindex = expired_timers[i] & 0x7FFFFFFF; vec_add1 (fm->expired_passive_per_worker[my_cpu_number], poolindex); } }
static inline int session_send_evt_to_thread (void *data, void *args, u32 thread_index, session_evt_type_t evt_type) { session_event_t *evt; svm_msg_q_msg_t msg; svm_msg_q_t *mq; u32 tries = 0, max_tries; mq = session_main_get_vpp_event_queue (thread_index); while (svm_msg_q_try_lock (mq)) { max_tries = vlib_get_current_process (vlib_get_main ())? 1e6 : 3; if (tries++ == max_tries) { SESSION_DBG ("failed to enqueue evt"); return -1; } } if (PREDICT_FALSE (svm_msg_q_ring_is_full (mq, SESSION_MQ_IO_EVT_RING))) { svm_msg_q_unlock (mq); return -2; } msg = svm_msg_q_alloc_msg_w_ring (mq, SESSION_MQ_IO_EVT_RING); if (PREDICT_FALSE (svm_msg_q_msg_is_invalid (&msg))) { svm_msg_q_unlock (mq); return -2; } evt = (session_event_t *) svm_msg_q_msg_data (mq, &msg); evt->event_type = evt_type; switch (evt_type) { case SESSION_CTRL_EVT_RPC: evt->rpc_args.fp = data; evt->rpc_args.arg = args; break; case SESSION_IO_EVT_TX: case SESSION_IO_EVT_TX_FLUSH: case SESSION_IO_EVT_BUILTIN_RX: evt->session_index = *(u32 *) data; break; case SESSION_IO_EVT_BUILTIN_TX: case SESSION_CTRL_EVT_CLOSE: evt->session_handle = session_handle ((session_t *) data); break; default: clib_warning ("evt unhandled!"); svm_msg_q_unlock (mq); return -1; } svm_msg_q_add_and_unlock (mq, &msg); return 0; }
static void ct_enable_disable_main_pre_input_node (u8 is_add) { u32 n_conns; if (!vlib_num_workers ()) return; n_conns = pool_elts (connections); if (n_conns > 2) return; if (n_conns > 0 && is_add) vlib_node_set_state (vlib_get_main (), session_queue_pre_input_node.index, VLIB_NODE_STATE_POLLING); else if (n_conns == 0) vlib_node_set_state (vlib_get_main (), session_queue_pre_input_node.index, VLIB_NODE_STATE_DISABLED); }
static clib_error_t * netmap_fd_read_ready (unix_file_t * uf) { vlib_main_t * vm = vlib_get_main(); netmap_main_t * nm = &netmap_main; u32 idx = uf->private_data; nm->pending_input_bitmap = clib_bitmap_set (nm->pending_input_bitmap, idx, 1); /* Schedule the rx node */ vlib_node_set_interrupt_pending (vm, netmap_input_node.index); return 0; }
static clib_error_t * flowprobe_create_state_tables (u32 active_timer) { flowprobe_main_t *fm = &flowprobe_main; vlib_thread_main_t *tm = &vlib_thread_main; vlib_main_t *vm = vlib_get_main (); clib_error_t *error = 0; u32 num_threads; int i; /* Decide how many worker threads we have */ num_threads = 1 /* main thread */ + tm->n_threads; /* Hash table per worker */ fm->ht_log2len = FLOWPROBE_LOG2_HASHSIZE; /* Init per worker flow state and timer wheels */ if (active_timer) { vec_validate (fm->timers_per_worker, num_threads - 1); vec_validate (fm->expired_passive_per_worker, num_threads - 1); vec_validate (fm->hash_per_worker, num_threads - 1); vec_validate (fm->pool_per_worker, num_threads - 1); for (i = 0; i < num_threads; i++) { int j; pool_alloc (fm->pool_per_worker[i], 1 << fm->ht_log2len); vec_resize (fm->hash_per_worker[i], 1 << fm->ht_log2len); for (j = 0; j < (1 << fm->ht_log2len); j++) fm->hash_per_worker[i][j] = ~0; fm->timers_per_worker[i] = clib_mem_alloc (sizeof (TWT (tw_timer_wheel))); tw_timer_wheel_init_2t_1w_2048sl (fm->timers_per_worker[i], flowprobe_expired_timer_callback, 1.0, 1024); } fm->disabled = true; } else { f64 now = vlib_time_now (vm); vec_validate (fm->stateless_entry, num_threads - 1); for (i = 0; i < num_threads; i++) fm->stateless_entry[i].last_exported = now; fm->disabled = false; } fm->initialized = true; return error; }
static void signal_report (prefix_report_t * r) { vlib_main_t *vm = vlib_get_main (); dhcp6_pd_client_main_t *cm = &dhcp6_pd_client_main; uword ni = cm->publisher_node; uword et = cm->publisher_et; if (ni == (uword) ~ 0) return; prefix_report_t *q = vlib_process_signal_event_data (vm, ni, et, 1, sizeof *q); *q = *r; }
static struct rte_mbuf * dpdk_replicate_packet_mb (vlib_buffer_t * b) { vlib_main_t * vm = vlib_get_main(); vlib_buffer_main_t * bm = vm->buffer_main; struct rte_mbuf * first_mb = 0, * new_mb, * pkt_mb, ** prev_mb_next = 0; u8 nb_segs, nb_segs_left; u32 copy_bytes; unsigned socket_id = rte_socket_id(); ASSERT (bm->pktmbuf_pools[socket_id]); pkt_mb = ((struct rte_mbuf *)b)-1; nb_segs = pkt_mb->nb_segs; for (nb_segs_left = nb_segs; nb_segs_left; nb_segs_left--) { if (PREDICT_FALSE(pkt_mb == 0)) { clib_warning ("Missing %d mbuf chain segment(s): " "(nb_segs = %d, nb_segs_left = %d)!", nb_segs - nb_segs_left, nb_segs, nb_segs_left); if (first_mb) rte_pktmbuf_free(first_mb); return NULL; } new_mb = rte_pktmbuf_alloc (bm->pktmbuf_pools[socket_id]); if (PREDICT_FALSE(new_mb == 0)) { if (first_mb) rte_pktmbuf_free(first_mb); return NULL; } /* * Copy packet info into 1st segment. */ if (first_mb == 0) { first_mb = new_mb; rte_pktmbuf_pkt_len (first_mb) = pkt_mb->pkt_len; first_mb->nb_segs = pkt_mb->nb_segs; first_mb->port = pkt_mb->port; #ifdef DAW_FIXME // TX Offload support TBD first_mb->vlan_macip = pkt_mb->vlan_macip; first_mb->hash = pkt_mb->hash; first_mb->ol_flags = pkt_mb->ol_flags #endif } else {
void vl_api_dhcp6_pd_send_client_message_t_handler (vl_api_dhcp6_pd_send_client_message_t * mp) { vl_api_dhcp6_pd_send_client_message_reply_t *rmp; dhcp6_pd_send_client_message_params_t params; vlib_main_t *vm = vlib_get_main (); u32 n_prefixes; u32 i; int rv = 0; VALIDATE_SW_IF_INDEX (mp); BAD_SW_IF_INDEX_LABEL; REPLY_MACRO (VL_API_DHCP6_PD_SEND_CLIENT_MESSAGE_REPLY); if (rv != 0) return; params.sw_if_index = ntohl (mp->sw_if_index); params.server_index = ntohl (mp->server_index); params.irt = ntohl (mp->irt); params.mrt = ntohl (mp->mrt); params.mrc = ntohl (mp->mrc); params.mrd = ntohl (mp->mrd); params.msg_type = mp->msg_type; params.T1 = ntohl (mp->T1); params.T2 = ntohl (mp->T2); n_prefixes = ntohl (mp->n_prefixes); params.prefixes = 0; if (n_prefixes > 0) vec_validate (params.prefixes, n_prefixes - 1); for (i = 0; i < n_prefixes; i++) { vl_api_dhcp6_pd_prefix_info_t *pi = &mp->prefixes[i]; dhcp6_pd_send_client_message_params_prefix_t *pref = ¶ms.prefixes[i]; pref->preferred_lt = ntohl (pi->preferred_time); pref->valid_lt = ntohl (pi->valid_time); memcpy (pref->prefix.as_u8, pi->prefix, 16); pref->prefix_length = pi->prefix_length; } dhcp6_pd_send_client_message (vm, ntohl (mp->sw_if_index), mp->stop, ¶ms); }
static void vl_api_create_vhost_user_if_t_handler (vl_api_create_vhost_user_if_t * mp) { int rv = 0; vl_api_create_vhost_user_if_reply_t *rmp; u32 sw_if_index = (u32) ~ 0; vnet_main_t *vnm = vnet_get_main (); vlib_main_t *vm = vlib_get_main (); u64 features = (u64) ~ (0ULL); u64 disabled_features = (u64) (0ULL); if (mp->disable_mrg_rxbuf) disabled_features = (1ULL << FEAT_VIRTIO_NET_F_MRG_RXBUF); if (mp->disable_indirect_desc) disabled_features |= (1ULL << FEAT_VIRTIO_F_INDIRECT_DESC); features &= ~disabled_features; rv = vhost_user_create_if (vnm, vm, (char *) mp->sock_filename, mp->is_server, &sw_if_index, features, mp->renumber, ntohl (mp->custom_dev_instance), (mp->use_custom_mac) ? mp->mac_address : NULL); /* Remember an interface tag for the new interface */ if (rv == 0) { /* If a tag was supplied... */ if (mp->tag[0]) { /* Make sure it's a proper C-string */ mp->tag[ARRAY_LEN (mp->tag) - 1] = 0; u8 *tag = format (0, "%s%c", mp->tag, 0); vnet_set_sw_interface_tag (vnm, tag, sw_if_index); } } /* *INDENT-OFF* */ REPLY_MACRO2(VL_API_CREATE_VHOST_USER_IF_REPLY, ({ rmp->sw_if_index = ntohl (sw_if_index); }));
/** * Enqueue buffer chain tail */ always_inline int session_enqueue_chain_tail (session_t * s, vlib_buffer_t * b, u32 offset, u8 is_in_order) { vlib_buffer_t *chain_b; u32 chain_bi, len, diff; vlib_main_t *vm = vlib_get_main (); u8 *data; u32 written = 0; int rv = 0; if (is_in_order && offset) { diff = offset - b->current_length; if (diff > b->total_length_not_including_first_buffer) return 0; chain_b = b; session_enqueue_discard_chain_bytes (vm, b, &chain_b, diff); chain_bi = vlib_get_buffer_index (vm, chain_b); } else chain_bi = b->next_buffer; do { chain_b = vlib_get_buffer (vm, chain_bi); data = vlib_buffer_get_current (chain_b); len = chain_b->current_length; if (!len) continue; if (is_in_order) { rv = svm_fifo_enqueue (s->rx_fifo, len, data); if (rv == len) { written += rv; } else if (rv < len) { return (rv > 0) ? (written + rv) : written; } else if (rv > len) { written += rv; /* written more than what was left in chain */ if (written > b->total_length_not_including_first_buffer) return written; /* drop the bytes that have already been delivered */ session_enqueue_discard_chain_bytes (vm, b, &chain_b, rv - len); } } else { rv = svm_fifo_enqueue_with_offset (s->rx_fifo, offset, len, data); if (rv) { clib_warning ("failed to enqueue multi-buffer seg"); return -1; } offset += len; } } while ((chain_bi = (chain_b->flags & VLIB_BUFFER_NEXT_PRESENT) ? chain_b->next_buffer : 0)); if (is_in_order) return written; return 0; }
static int flowprobe_tx_interface_add_del_feature (flowprobe_main_t * fm, u32 sw_if_index, u8 which, int is_add) { vlib_main_t *vm = vlib_get_main (); int rv = 0; u16 template_id = 0; flowprobe_record_t flags = fm->record; fm->flow_per_interface[sw_if_index] = (is_add) ? which : (u8) ~ 0; fm->template_per_flow[which] += (is_add) ? 1 : -1; if (is_add && fm->template_per_flow[which] > 1) template_id = fm->template_reports[flags]; if ((is_add && fm->template_per_flow[which] == 1) || (!is_add && fm->template_per_flow[which] == 0)) { if (which == FLOW_VARIANT_L2) { if (fm->record & FLOW_RECORD_L2) { rv = flowprobe_template_add_del (1, UDP_DST_PORT_ipfix, flags, flowprobe_data_callback_l2, flowprobe_template_rewrite_l2, is_add, &template_id); } if (fm->record & FLOW_RECORD_L3 || fm->record & FLOW_RECORD_L4) { rv = flowprobe_template_add_del (1, UDP_DST_PORT_ipfix, flags, flowprobe_data_callback_l2, flowprobe_template_rewrite_l2_ip4, is_add, &template_id); fm->template_reports[flags | FLOW_RECORD_L2_IP4] = (is_add) ? template_id : 0; rv = flowprobe_template_add_del (1, UDP_DST_PORT_ipfix, flags, flowprobe_data_callback_l2, flowprobe_template_rewrite_l2_ip6, is_add, &template_id); fm->template_reports[flags | FLOW_RECORD_L2_IP6] = (is_add) ? template_id : 0; /* Special case L2 */ fm->context[FLOW_VARIANT_L2_IP4].flags = flags | FLOW_RECORD_L2_IP4; fm->context[FLOW_VARIANT_L2_IP6].flags = flags | FLOW_RECORD_L2_IP6; fm->template_reports[flags] = template_id; } } else if (which == FLOW_VARIANT_IP4) rv = flowprobe_template_add_del (1, UDP_DST_PORT_ipfix, flags, flowprobe_data_callback_ip4, flowprobe_template_rewrite_ip4, is_add, &template_id); else if (which == FLOW_VARIANT_IP6) rv = flowprobe_template_add_del (1, UDP_DST_PORT_ipfix, flags, flowprobe_data_callback_ip6, flowprobe_template_rewrite_ip6, is_add, &template_id); } if (rv && rv != VNET_API_ERROR_VALUE_EXIST) { clib_warning ("vnet_flow_report_add_del returned %d", rv); return -1; } if (which != (u8) ~ 0) { fm->context[which].flags = fm->record; fm->template_reports[flags] = (is_add) ? template_id : 0; } if (which == FLOW_VARIANT_IP4) vnet_feature_enable_disable ("ip4-output", "flowprobe-ip4", sw_if_index, is_add, 0, 0); else if (which == FLOW_VARIANT_IP6) vnet_feature_enable_disable ("ip6-output", "flowprobe-ip6", sw_if_index, is_add, 0, 0); else if (which == FLOW_VARIANT_L2) vnet_feature_enable_disable ("interface-output", "flowprobe-l2", sw_if_index, is_add, 0, 0); /* Stateful flow collection */ if (is_add && !fm->initialized) { flowprobe_create_state_tables (fm->active_timer); if (fm->active_timer) vlib_process_signal_event (vm, flowprobe_timer_node.index, 1, 0); } return 0; }
static void enable_current_events (perfmon_main_t * pm) { struct perf_event_attr pe; int fd; struct perf_event_mmap_page *p = 0; perfmon_event_config_t *c; vlib_main_t *vm = vlib_get_main (); u32 my_thread_index = vm->thread_index; u32 index; int i, limit = 1; int cpu; if ((pm->current_event + 1) < vec_len (pm->single_events_to_collect)) limit = 2; for (i = 0; i < limit; i++) { c = vec_elt_at_index (pm->single_events_to_collect, pm->current_event + i); memset (&pe, 0, sizeof (struct perf_event_attr)); pe.type = c->pe_type; pe.size = sizeof (struct perf_event_attr); pe.config = c->pe_config; pe.disabled = 1; pe.pinned = 1; /* * Note: excluding the kernel makes the * (software) context-switch counter read 0... */ if (pe.type != PERF_TYPE_SOFTWARE) { /* Exclude kernel and hypervisor */ pe.exclude_kernel = 1; pe.exclude_hv = 1; } cpu = vm->cpu_id; fd = perf_event_open (&pe, 0, cpu, -1, 0); if (fd == -1) { clib_unix_warning ("event open: type %d config %d", c->pe_type, c->pe_config); return; } if (pe.type != PERF_TYPE_SOFTWARE) { p = mmap (0, pm->page_size, PROT_READ, MAP_SHARED, fd, 0); if (p == MAP_FAILED) { clib_unix_warning ("mmap"); close (fd); return; } } else p = 0; if (ioctl (fd, PERF_EVENT_IOC_RESET, 0) < 0) clib_unix_warning ("reset ioctl"); if (ioctl (fd, PERF_EVENT_IOC_ENABLE, 0) < 0) clib_unix_warning ("enable ioctl"); /* * Software event counters - and others not capable of being * read via the "rdpmc" instruction - will be read * by system calls. */ if (pe.type == PERF_TYPE_SOFTWARE || p->cap_user_rdpmc == 0) index = ~0; else index = p->index - 1; pm->rdpmc_indices[i][my_thread_index] = index; pm->perf_event_pages[i][my_thread_index] = (void *) p; pm->pm_fds[i][my_thread_index] = fd; } pm->n_active = i; /* Enable the main loop counter snapshot mechanism */ vm->vlib_node_runtime_perf_counter_cb = read_current_perf_counters; }
} pg_l2tp_header_l2_sublayer_t; static inline void pg_l2tp_header_init (pg_l2tp_header_t * e) { pg_edit_init (&e->session_id, l2tpv3_header_t, session_id); pg_edit_init (&e->cookie, l2tpv3_header_t, cookie); } uword unformat_pg_l2tp_header (unformat_input_t * input, va_list * args) { pg_stream_t * s = va_arg (*args, pg_stream_t *); pg_l2tp_header_t * h; u32 group_index, error; vlib_main_t * vm = vlib_get_main(); h = pg_create_edit_group (s, sizeof (h[0]), sizeof (l2tpv3_header_t) - sizeof(u32), &group_index); pg_l2tp_header_init (h); error = 1; // session id and cookie are required if (! unformat (input, "L2TP: session_id %U cookie %U", unformat_pg_edit, unformat_pg_number, &h->session_id, unformat_pg_edit, unformat_pg_number, &h->cookie)) { goto done; }
static ethernet_header_t stn_ip4_ethernet_header = {}; static ethernet_header_t stn_ip6_ethernet_header = {}; typedef struct { clib_bihash_kv_16_8_t kv; } stn_ip46_punt_trace_t; static u8 * format_stn_rule (u8 * s, va_list * args) { stn_rule_t *r = va_arg (*args, stn_rule_t *); stn_main_t *stn = &stn_main; u32 indent = format_get_indent (s); u32 node_index = ip46_address_is_ip4(&r->address)?stn_ip4_punt.index:stn_ip6_punt.index; vlib_node_t *next_node = vlib_get_next_node(vlib_get_main(), node_index, r->next_node_index); s = format (s, "rule_index: %d\n", r - stn->rules); s = format (s, "%Uaddress: %U\n", format_white_space, indent, format_ip46_address, &r->address, IP46_TYPE_ANY); s = format (s, "%Uiface: %U (%d)\n", format_white_space, indent, format_vnet_sw_if_index_name, vnet_get_main(), r->sw_if_index, r->sw_if_index); s = format (s, "%Unext_node: %s (%d)", format_white_space, indent, next_node->name, next_node->index); return s; } static_always_inline u8 * format_stn_ip46_punt_trace (u8 * s, va_list * args, u8 is_ipv4) { CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);