void vl_register_mapped_shmem_region (svm_region_t * rp) { api_main_t *am = &api_main; vec_add1 (am->mapped_shmem_regions, rp); }
static void elt_delete (heap_header_t * h, heap_elt_t * e) { heap_elt_t * l = vec_end (h->elts) - 1; ASSERT (e >= h->elts && e <= l); /* Update doubly linked pointers. */ { heap_elt_t * p = heap_prev (e); heap_elt_t * n = heap_next (e); if (p == e) { n->prev = 0; h->head = n - h->elts; } else if (n == e) { p->next = 0; h->tail = p - h->elts; } else { p->next = n - p; n->prev = p - n; } } /* Add to index free list or delete from end. */ if (e < l) vec_add1 (h->free_elts, e - h->elts); else _vec_len (h->elts)--; }
static int flowprobe_template_add_del (u32 domain_id, u16 src_port, flowprobe_record_t flags, vnet_flow_data_callback_t * flow_data_callback, vnet_flow_rewrite_callback_t * rewrite_callback, bool is_add, u16 * template_id) { flow_report_main_t *frm = &flow_report_main; vnet_flow_report_add_del_args_t a = { .rewrite_callback = rewrite_callback, .flow_data_callback = flow_data_callback, .is_add = is_add, .domain_id = domain_id, .src_port = src_port, .opaque.as_uword = flags, }; return vnet_flow_report_add_del (frm, &a, template_id); } static void flowprobe_expired_timer_callback (u32 * expired_timers) { vlib_main_t *vm = vlib_get_main (); flowprobe_main_t *fm = &flowprobe_main; u32 my_cpu_number = vm->thread_index; int i; u32 poolindex; for (i = 0; i < vec_len (expired_timers); i++) { poolindex = expired_timers[i] & 0x7FFFFFFF; vec_add1 (fm->expired_passive_per_worker[my_cpu_number], poolindex); } }
int session_enqueue_dgram_connection (session_t * s, session_dgram_hdr_t * hdr, vlib_buffer_t * b, u8 proto, u8 queue_event) { int enqueued = 0, rv, in_order_off; ASSERT (svm_fifo_max_enqueue_prod (s->rx_fifo) >= b->current_length + sizeof (*hdr)); svm_fifo_enqueue (s->rx_fifo, sizeof (session_dgram_hdr_t), (u8 *) hdr); enqueued = svm_fifo_enqueue (s->rx_fifo, b->current_length, vlib_buffer_get_current (b)); if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) && enqueued >= 0)) { in_order_off = enqueued > b->current_length ? enqueued : 0; rv = session_enqueue_chain_tail (s, b, in_order_off, 1); if (rv > 0) enqueued += rv; } if (queue_event) { /* Queue RX event on this fifo. Eventually these will need to be flushed * by calling stream_server_flush_enqueue_events () */ session_worker_t *wrk; wrk = session_main_get_worker (s->thread_index); if (!(s->flags & SESSION_F_RX_EVT)) { s->flags |= SESSION_F_RX_EVT; vec_add1 (wrk->session_to_enqueue[proto], s->session_index); } } return enqueued; }
static vnet_config_t * find_config_with_features (vlib_main_t * vm, vnet_config_main_t * cm, vnet_config_feature_t * feature_vector) { u32 last_node_index = ~0; vnet_config_feature_t * f; u32 * config_string; uword * p; vnet_config_t * c; config_string = cm->config_string_temp; cm->config_string_temp = 0; if (config_string) _vec_len (config_string) = 0; vec_foreach (f, feature_vector) { /* Connect node graph. */ f->next_index = add_next (vm, cm, last_node_index, f->node_index); last_node_index = f->node_index; /* Store next index in config string. */ vec_add1 (config_string, f->next_index); /* Store feature config. */ vec_add (config_string, f->feature_config, vec_len (f->feature_config)); }
static svmdb_client_t * svmdb_map_internal (char *root_path, uword size) { svmdb_client_t *client = 0; svm_map_region_args_t *a = 0; svm_region_t *db_rp; void *oldheap; svmdb_shm_hdr_t *hp = 0; vec_validate (client, 0); vec_validate (a, 0); svm_region_init_chroot (root_path); a->root_path = root_path; a->name = "/db"; a->size = size ? size : SVMDB_DEFAULT_SIZE; a->flags = SVM_FLAGS_MHEAP; db_rp = client->db_rp = svm_region_find_or_create (a); ASSERT (db_rp); vec_free (a); region_lock (client->db_rp, 10); /* Has someone else set up the shared-memory variable table? */ if (db_rp->user_ctx) { client->shm = (void *) db_rp->user_ctx; client->pid = getpid (); region_unlock (client->db_rp); ASSERT (client->shm->version == SVMDB_SHM_VERSION); return (client); } /* Nope, it's our problem... */ /* Add a bogus client (pid=0) so the svm won't be deallocated */ oldheap = svm_push_pvt_heap (db_rp); vec_add1 (client->db_rp->client_pids, 0); svm_pop_heap (oldheap); oldheap = svm_push_data_heap (db_rp); vec_validate (hp, 0); hp->version = SVMDB_SHM_VERSION; hp->namespaces[SVMDB_NAMESPACE_STRING] = hash_create_string (0, sizeof (uword)); hp->namespaces[SVMDB_NAMESPACE_VEC] = hash_create_string (0, sizeof (uword)); db_rp->user_ctx = hp; client->shm = hp; svm_pop_heap (oldheap); region_unlock (client->db_rp); client->pid = getpid (); return (client); }
static clib_error_t * abf_policy_cmd (vlib_main_t * vm, unformat_input_t * main_input, vlib_cli_command_t * cmd) { unformat_input_t _line_input, *line_input = &_line_input; u32 acl_index, policy_id; fib_route_path_t *rpaths = NULL, rpath; u32 is_del; is_del = 0; acl_index = INDEX_INVALID; policy_id = INDEX_INVALID; /* Get a line of input. */ if (!unformat_user (main_input, unformat_line_input, line_input)) return 0; while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) { if (unformat (line_input, "acl %d", &acl_index)) ; else if (unformat (line_input, "id %d", &policy_id)) ; else if (unformat (line_input, "del")) is_del = 1; else if (unformat (line_input, "add")) is_del = 0; else if (unformat (line_input, "via %U", unformat_fib_route_path, &rpath)) vec_add1 (rpaths, rpath); else return (clib_error_return (0, "unknown input '%U'", format_unformat_error, line_input)); } if (INDEX_INVALID == policy_id) { vlib_cli_output (vm, "Specify a Policy ID"); return 0; } if (!is_del) { if (INDEX_INVALID == acl_index) { vlib_cli_output (vm, "ACL index must be set"); return 0; } abf_policy_update (policy_id, acl_index, rpaths); } else { abf_policy_delete (policy_id, rpaths); } unformat_free (line_input); return (NULL); }
always_inline void free_overflow_bucket (vhash_overflow_buckets_t * ob, vhash_overflow_search_bucket_t * b, u32 i) { u32 o = (u32x4_union_t *) b - ob->search_buckets; ASSERT (o < vec_len (ob->search_buckets)); vec_add1 (ob->free_indices, 4 * o + i); }
static clib_error_t * show_virtio_pci_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { virtio_main_t *vim = &virtio_main; vnet_main_t *vnm = &vnet_main; virtio_if_t *vif; clib_error_t *error = 0; u32 hw_if_index, *hw_if_indices = 0; vnet_hw_interface_t *hi; u8 show_descr = 0, show_device_config = 0; while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) { if (unformat (input, "%U", unformat_vnet_hw_interface, vnm, &hw_if_index)) { hi = vnet_get_hw_interface (vnm, hw_if_index); if (virtio_device_class.index != hi->dev_class_index) { error = clib_error_return (0, "unknown input `%U'", format_unformat_error, input); goto done; } vec_add1 (hw_if_indices, hw_if_index); } else if (unformat (input, "descriptors") || unformat (input, "desc")) show_descr = 1; else if (unformat (input, "debug-device")) show_device_config = 1; else { error = clib_error_return (0, "unknown input `%U'", format_unformat_error, input); goto done; } } if (vec_len (hw_if_indices) == 0) { pool_foreach (vif, vim->interfaces, vec_add1 (hw_if_indices, vif->hw_if_index); );
/* * Enqueue data for delivery to session peer. Does not notify peer of enqueue * event but on request can queue notification events for later delivery by * calling stream_server_flush_enqueue_events(). * * @param tc Transport connection which is to be enqueued data * @param b Buffer to be enqueued * @param offset Offset at which to start enqueueing if out-of-order * @param queue_event Flag to indicate if peer is to be notified or if event * is to be queued. The former is useful when more data is * enqueued and only one event is to be generated. * @param is_in_order Flag to indicate if data is in order * @return Number of bytes enqueued or a negative value if enqueueing failed. */ int session_enqueue_stream_connection (transport_connection_t * tc, vlib_buffer_t * b, u32 offset, u8 queue_event, u8 is_in_order) { session_t *s; int enqueued = 0, rv, in_order_off; s = session_get (tc->s_index, tc->thread_index); if (is_in_order) { enqueued = svm_fifo_enqueue (s->rx_fifo, b->current_length, vlib_buffer_get_current (b)); if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) && enqueued >= 0)) { in_order_off = enqueued > b->current_length ? enqueued : 0; rv = session_enqueue_chain_tail (s, b, in_order_off, 1); if (rv > 0) enqueued += rv; } } else { rv = svm_fifo_enqueue_with_offset (s->rx_fifo, offset, b->current_length, vlib_buffer_get_current (b)); if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) && !rv)) session_enqueue_chain_tail (s, b, offset + b->current_length, 0); /* if something was enqueued, report even this as success for ooo * segment handling */ return rv; } if (queue_event) { /* Queue RX event on this fifo. Eventually these will need to be flushed * by calling stream_server_flush_enqueue_events () */ session_worker_t *wrk; wrk = session_main_get_worker (s->thread_index); if (!(s->flags & SESSION_F_RX_EVT)) { s->flags |= SESSION_F_RX_EVT; vec_add1 (wrk->session_to_enqueue[tc->proto], s->session_index); } } return enqueued; }
clib_error_t * ssvm_config (vlib_main_t * vm, unformat_input_t * input) { u8 * name; int is_master = 1; int i, rv; ssvm_eth_main_t * em = &ssvm_eth_main; while (unformat_check_input(input) != UNFORMAT_END_OF_INPUT) { if (unformat (input, "base-va %llx", &em->next_base_va)) ; else if (unformat (input, "segment-size %lld", &em->segment_size)) em->segment_size = 1ULL << (max_log2 (em->segment_size)); else if (unformat (input, "nbuffers %lld", &em->nbuffers)) ; else if (unformat (input, "queue-elts %lld", &em->queue_elts)) ; else if (unformat (input, "slave")) is_master = 0; else if (unformat (input, "%s", &name)) { vec_add1 (em->names, name); printf("AYXX: adding %s\n", name); } else break; } /* No configured instances, we're done... */ if (vec_len (em->names) == 0) return 0; for (i = 0; i < vec_len (em->names); i++) { rv = ssvm_eth_create (em, em->names[i], is_master); if (rv < 0) return clib_error_return (0, "ssvm_eth_create '%s' failed, error %d", em->names[i], rv); } return 0; }
/* * vl_msg_api_trace */ void vl_msg_api_trace (api_main_t * am, vl_api_trace_t * tp, void *msg) { u8 **this_trace; u8 **old_trace; u8 *msg_copy; trace_cfg_t *cfgp; u16 msg_id = ntohs (*((u16 *) msg)); cfgp = am->api_trace_cfg + msg_id; if (!cfgp || !cfgp->trace_enable) return; msg_copy = 0; if (tp->nitems == 0) { clib_warning ("tp->nitems is 0"); return; } if (vec_len (tp->traces) < tp->nitems) { vec_add1 (tp->traces, 0); this_trace = tp->traces + vec_len (tp->traces) - 1; } else { tp->wrapped = 1; old_trace = tp->traces + tp->curindex++; if (tp->curindex == tp->nitems) tp->curindex = 0; vec_free (*old_trace); this_trace = old_trace; } vec_validate (msg_copy, cfgp->size - 1); clib_memcpy (msg_copy, msg, cfgp->size); *this_trace = msg_copy; }
static void notify_value (svmdb_value_t * v, svmdb_action_t a) { int i; int rv; union sigval sv; u32 value; u32 *dead_registrations = 0; svmdb_notify_t *np; for (i = 0; i < vec_len (v->notifications); i++) { np = vec_elt_at_index (v->notifications, i); if (np->action == a) { value = (np->action << 28) | (np->opaque); sv.sival_ptr = (void *) (uword) value; do { rv = 0; if (sigqueue (np->pid, np->signum, sv) == 0) break; rv = errno; } while (rv == EAGAIN); if (rv == 0) continue; vec_add1 (dead_registrations, i); } } for (i = 0; i < vec_len (dead_registrations); i++) { np = vec_elt_at_index (v->notifications, dead_registrations[i]); clib_warning ("dead reg pid %d sig %d action %d opaque %x", np->pid, np->signum, np->action, np->opaque); vec_delete (v->notifications, 1, dead_registrations[i]); } vec_free (dead_registrations); }
int vl_map_shmem (const char *region_name, int is_vlib) { svm_map_region_args_t _a, *a = &_a; svm_region_t *vlib_rp, *root_rp; api_main_t *am = &api_main; int i; struct timespec ts, tsrem; char *vpe_api_region_suffix = "-vpe-api"; clib_memset (a, 0, sizeof (*a)); if (strstr (region_name, vpe_api_region_suffix)) { u8 *root_path = format (0, "%s", region_name); _vec_len (root_path) = (vec_len (root_path) - strlen (vpe_api_region_suffix)); vec_terminate_c_string (root_path); a->root_path = (const char *) root_path; am->root_path = (const char *) root_path; } if (is_vlib == 0) { int tfd; u8 *api_name; /* * Clients wait for vpp to set up the root / API regioins */ if (am->root_path) api_name = format (0, "/dev/shm/%s-%s%c", am->root_path, region_name + 1, 0); else api_name = format (0, "/dev/shm%s%c", region_name, 0); /* Wait up to 100 seconds... */ for (i = 0; i < 10000; i++) { ts.tv_sec = 0; ts.tv_nsec = 10000 * 1000; /* 10 ms */ while (nanosleep (&ts, &tsrem) < 0) ts = tsrem; tfd = open ((char *) api_name, O_RDWR); if (tfd >= 0) break; } vec_free (api_name); if (tfd < 0) { clib_warning ("region init fail"); return -2; } close (tfd); svm_region_init_chroot_uid_gid (am->root_path, getuid (), getgid ()); } if (a->root_path != NULL) { a->name = "/vpe-api"; } else a->name = region_name; a->size = am->api_size ? am->api_size : (16 << 20); a->flags = SVM_FLAGS_MHEAP; a->uid = am->api_uid; a->gid = am->api_gid; a->pvt_heap_size = am->api_pvt_heap_size; vlib_rp = svm_region_find_or_create (a); if (vlib_rp == 0) return (-2); pthread_mutex_lock (&vlib_rp->mutex); /* Has someone else set up the shared-memory variable table? */ if (vlib_rp->user_ctx) { am->shmem_hdr = (void *) vlib_rp->user_ctx; am->our_pid = getpid (); if (is_vlib) { svm_queue_t *q; uword old_msg; /* * application restart. Reset cached pids, API message * rings, list of clients; otherwise, various things * fail. (e.g. queue non-empty notification) */ /* ghosts keep the region from disappearing properly */ svm_client_scan_this_region_nolock (vlib_rp); am->shmem_hdr->application_restarts++; q = am->shmem_hdr->vl_input_queue; am->shmem_hdr->vl_pid = getpid (); q->consumer_pid = am->shmem_hdr->vl_pid; /* Drain the input queue, freeing msgs */ for (i = 0; i < 10; i++) { if (pthread_mutex_trylock (&q->mutex) == 0) { pthread_mutex_unlock (&q->mutex); goto mutex_ok; } ts.tv_sec = 0; ts.tv_nsec = 10000 * 1000; /* 10 ms */ while (nanosleep (&ts, &tsrem) < 0) ts = tsrem; } /* Mutex buggered, "fix" it */ clib_memset (&q->mutex, 0, sizeof (q->mutex)); clib_warning ("forcibly release main input queue mutex"); mutex_ok: am->vlib_rp = vlib_rp; while (svm_queue_sub (q, (u8 *) & old_msg, SVM_Q_NOWAIT, 0) != -2 /* queue underflow */ ) { vl_msg_api_free_nolock ((void *) old_msg); am->shmem_hdr->restart_reclaims++; } pthread_mutex_unlock (&vlib_rp->mutex); root_rp = svm_get_root_rp (); ASSERT (root_rp); /* Clean up the root region client list */ pthread_mutex_lock (&root_rp->mutex); svm_client_scan_this_region_nolock (root_rp); pthread_mutex_unlock (&root_rp->mutex); } else { pthread_mutex_unlock (&vlib_rp->mutex); } am->vlib_rp = vlib_rp; vec_add1 (am->mapped_shmem_regions, vlib_rp); return 0; } /* Clients simply have to wait... */ if (!is_vlib) { pthread_mutex_unlock (&vlib_rp->mutex); /* Wait up to 100 seconds... */ for (i = 0; i < 10000; i++) { ts.tv_sec = 0; ts.tv_nsec = 10000 * 1000; /* 10 ms */ while (nanosleep (&ts, &tsrem) < 0) ts = tsrem; if (vlib_rp->user_ctx) goto ready; } /* Clean up and leave... */ svm_region_unmap (vlib_rp); clib_warning ("region init fail"); return (-2); ready: am->shmem_hdr = (void *) vlib_rp->user_ctx; am->our_pid = getpid (); am->vlib_rp = vlib_rp; vec_add1 (am->mapped_shmem_regions, vlib_rp); return 0; } /* Nope, it's our problem... */ vl_init_shmem (vlib_rp, 0 /* default config */ , 1 /* is vlib */ , 0 /* is_private_region */ ); vec_add1 (am->mapped_shmem_regions, vlib_rp); return 0; }
static void unix_signal_handler (int signum, siginfo_t * si, ucontext_t * uc) { uword fatal = 0; /* These come in handy when looking at core files from optimized images */ last_signum = signum; last_faulting_address = (uword) si->si_addr; syslog_msg = format (syslog_msg, "received signal %U, PC %U", format_signal, signum, format_ucontext_pc, uc); if (signum == SIGSEGV) syslog_msg = format (syslog_msg, ", faulting address %p", si->si_addr); switch (signum) { /* these (caught) signals cause the application to exit */ case SIGTERM: /* * Ignore SIGTERM if it's sent before we're ready. */ if (unix_main.vlib_main && unix_main.vlib_main->main_loop_exit_set) { syslog (LOG_ERR | LOG_DAEMON, "received SIGTERM, exiting..."); unix_main.vlib_main->main_loop_exit_now = 1; } else syslog (LOG_ERR | LOG_DAEMON, "IGNORE early SIGTERM..."); break; /* fall through */ case SIGQUIT: case SIGINT: case SIGILL: case SIGBUS: case SIGSEGV: case SIGHUP: case SIGFPE: case SIGABRT: fatal = 1; break; /* by default, print a message and continue */ default: fatal = 0; break; } #ifdef CLIB_GCOV /* * Test framework sends SIGTERM, so we need to flush the * code coverage stats here. */ { void __gcov_flush (void); __gcov_flush (); } #endif /* Null terminate. */ vec_add1 (syslog_msg, 0); if (fatal) { syslog (LOG_ERR | LOG_DAEMON, "%s", syslog_msg); /* Address of callers: outer first, inner last. */ uword callers[15]; uword n_callers = clib_backtrace (callers, ARRAY_LEN (callers), 0); int i; for (i = 0; i < n_callers; i++) { vec_reset_length (syslog_msg); syslog_msg = format (syslog_msg, "#%-2d 0x%016lx %U%c", i, callers[i], format_clib_elf_symbol_with_address, callers[i], 0); syslog (LOG_ERR | LOG_DAEMON, "%s", syslog_msg); } /* have to remove SIGABRT to avoid recusive - os_exit calling abort() */ unsetup_signal_handlers (SIGABRT); os_exit (1); } else clib_warning ("%s", syslog_msg); }
clib_error_t * pcap_read (pcap_main_t * pm) { clib_error_t * error = 0; int fd, need_swap, n; pcap_file_header_t fh; pcap_packet_header_t ph; fd = open (pm->file_name, O_RDONLY); if (fd < 0) { error = clib_error_return_unix (0, "open `%s'", pm->file_name); goto done; } if (read (fd, &fh, sizeof (fh)) != sizeof (fh)) { error = clib_error_return_unix (0, "read file header `%s'", pm->file_name); goto done; } need_swap = 0; if (fh.magic == 0xd4c3b2a1) { need_swap = 1; #define _(t,f) fh.f = clib_byte_swap_##t (fh.f); foreach_pcap_file_header; #undef _ } if (fh.magic != 0xa1b2c3d4) { error = clib_error_return (0, "bad magic `%s'", pm->file_name); goto done; } pm->min_packet_bytes = 0; pm->max_packet_bytes = 0; while ((n = read (fd, &ph, sizeof (ph))) != 0) { u8 * data; if (need_swap) { #define _(t,f) ph.f = clib_byte_swap_##t (ph.f); foreach_pcap_packet_header; #undef _ } data = vec_new (u8, ph.n_bytes_in_packet); if (read (fd, data, ph.n_packet_bytes_stored_in_file) != ph.n_packet_bytes_stored_in_file) { error = clib_error_return (0, "short read `%s'", pm->file_name); goto done; } if (vec_len (pm->packets_read) == 0) pm->min_packet_bytes = pm->max_packet_bytes = ph.n_bytes_in_packet; else { pm->min_packet_bytes = clib_min (pm->min_packet_bytes, ph.n_bytes_in_packet); pm->max_packet_bytes = clib_max (pm->max_packet_bytes, ph.n_bytes_in_packet); } vec_add1 (pm->packets_read, data); } done: if (fd >= 0) close (fd); return error; }
int test_ptclosure_main (unformat_input_t * input) { test_main_t *tm = &test_main; u8 *item_name; int i, j; u8 **orig; u8 **closure; u8 *a_name, *b_name; int a_index, b_index; uword *p; u8 *this_constraint; int n; u32 *result = 0; tm->index_by_name = hash_create_string (0, sizeof (uword)); n = ARRAY_LEN (items); for (i = 0; i < n; i++) { item_name = (u8 *) items[i]; hash_set_mem (tm->index_by_name, item_name, i); } orig = clib_ptclosure_alloc (n); for (i = 0; i < ARRAY_LEN (constraints); i++) { this_constraint = format (0, "%s%c", constraints[i], 0); if (comma_split (this_constraint, &a_name, &b_name)) { clib_warning ("couldn't split '%s'", constraints[i]); return 1; } p = hash_get_mem (tm->index_by_name, a_name); if (p == 0) { clib_warning ("couldn't find '%s'", a_name); return 1; } a_index = p[0]; p = hash_get_mem (tm->index_by_name, b_name); if (p == 0) { clib_warning ("couldn't find '%s'", b_name); return 1; } b_index = p[0]; orig[a_index][b_index] = 1; vec_free (this_constraint); } dump_closure (tm, "original relation", orig); closure = clib_ptclosure (orig); dump_closure (tm, "closure", closure); /* * Output partial order */ again: for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { if (closure[i][j]) goto item_constrained; } /* Item i can be output */ vec_add1 (result, i); { int k; for (k = 0; k < n; k++) closure[k][i] = 0; /* "Magic" a before a, to keep from ever outputting it again */ closure[i][i] = 1; goto again; } item_constrained: ; } if (vec_len (result) != n) { clib_warning ("no partial order exists"); exit (1); } fformat (stdout, "Partial order:\n"); for (i = vec_len (result) - 1; i >= 0; i--) { fformat (stdout, "%s\n", items[result[i]]); } vec_free (result); clib_ptclosure_free (orig); clib_ptclosure_free (closure); return 0; }
void add_trajectory_trace (vlib_buffer_t * b, u32 node_index) { vec_add1 (vnet_buffer2 (b)->trajectory_trace, (u16) node_index); }
int test_elog_main (unformat_input_t * input) { clib_error_t * error = 0; u32 i, n_iter, seed, max_events; elog_main_t _em, * em = &_em; u32 verbose; f64 min_sample_time; char * dump_file, * load_file, * merge_file, ** merge_files; u8 * tag, ** tags; n_iter = 100; max_events = 100000; seed = 1; verbose = 0; dump_file = 0; load_file = 0; merge_files = 0; tags = 0; min_sample_time = 2; while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) { if (unformat (input, "iter %d", &n_iter)) ; else if (unformat (input, "seed %d", &seed)) ; else if (unformat (input, "dump %s", &dump_file)) ; else if (unformat (input, "load %s", &load_file)) ; else if (unformat (input, "tag %s", &tag)) vec_add1 (tags, tag); else if (unformat (input, "merge %s", &merge_file)) vec_add1 (merge_files, merge_file); else if (unformat (input, "verbose %=", &verbose, 1)) ; else if (unformat (input, "max-events %d", &max_events)) ; else if (unformat (input, "sample-time %f", &min_sample_time)) ; else { error = clib_error_create ("unknown input `%U'\n", format_unformat_error, input); goto done; } } #ifdef CLIB_UNIX if (load_file) { if ((error = elog_read_file (em, load_file))) goto done; } else if (merge_files) { uword i; elog_main_t * ems; vec_clone (ems, merge_files); elog_init (em, max_events); for (i = 0; i < vec_len (ems); i++) { if ((error = elog_read_file (i == 0 ? em : &ems[i], merge_files[i]))) goto done; if (i > 0) { elog_merge (em, tags[0], &ems[i], tags[i]); tags[0] = 0; } } } else #endif /* CLIB_UNIX */ { f64 t[2]; elog_init (em, max_events); elog_enable_disable (em, 1); t[0] = unix_time_now (); for (i = 0; i < n_iter; i++) { u32 j, n, sum; n = 1 + (random_u32 (&seed) % 128); sum = 0; for (j = 0; j < n; j++) sum += random_u32 (&seed); { ELOG_TYPE_XF (e); ELOG (em, e, sum); } { ELOG_TYPE_XF (e); ELOG (em, e, sum + 1); } { struct { u32 string_index; f32 f; } * d; ELOG_TYPE_DECLARE (e) = { .format = "fumble %s %.9f", .format_args = "t4f4", .n_enum_strings = 4, .enum_strings = { "string0", "string1", "string2", "string3", }, }; d = ELOG_DATA (em, e); d->string_index = sum & 3; d->f = (sum & 0xff) / 128.; } { ELOG_TYPE_DECLARE (e) = { .format = "bar %d.%d.%d.%d", .format_args = "i1i1i1i1", }; ELOG_TRACK (my_track); u8 * d = ELOG_TRACK_DATA (em, e, my_track); d[0] = i + 0; d[1] = i + 1; d[2] = i + 2; d[3] = i + 3; } { ELOG_TYPE_DECLARE (e) = { .format = "bar `%s'", .format_args = "s20", }; struct { char s[20]; } * d; u8 * v; d = ELOG_DATA (em, e); v = format (0, "foo %d%c", i, 0); memcpy (d->s, v, clib_min (vec_len (v), sizeof (d->s))); } { ELOG_TYPE_DECLARE (e) = { .format = "bar `%s'", .format_args = "T4", }; struct { u32 offset; } * d; d = ELOG_DATA (em, e); d->offset = elog_string (em, "string table %d", i); } } do { t[1] = unix_time_now (); } while (t[1] - t[0] < min_sample_time); } #ifdef CLIB_UNIX if (dump_file) { if ((error = elog_write_file (em, dump_file))) goto done; } #endif if (verbose) { elog_event_t * e, * es; es = elog_get_events (em); vec_foreach (e, es) { clib_warning ("%18.9f: %12U %U\n", e->time, format_elog_track, em, e, format_elog_event, em, e); } } done: if (error) clib_error_report (error); return 0; } #ifdef CLIB_UNIX int main (int argc, char * argv []) { unformat_input_t i; int r; unformat_init_command_line (&i, argv); r = test_elog_main (&i); unformat_free (&i); return r; }
void scrape_and_clear_counters (perfmon_main_t * pm) { int i, j, k; vlib_main_t *vm = pm->vlib_main; vlib_main_t *stat_vm; vlib_node_main_t *nm; vlib_node_t ***node_dups = 0; vlib_node_t **nodes; vlib_node_t *n; perfmon_capture_t *c; perfmon_event_config_t *current_event; uword *p; u8 *counter_name; u64 vectors_this_counter; /* snapshoot the nodes, including pm counters */ vlib_worker_thread_barrier_sync (vm); for (j = 0; j < vec_len (vlib_mains); j++) { stat_vm = vlib_mains[j]; if (stat_vm == 0) continue; nm = &stat_vm->node_main; for (i = 0; i < vec_len (nm->nodes); i++) { n = nm->nodes[i]; vlib_node_sync_stats (stat_vm, n); } nodes = 0; vec_validate (nodes, vec_len (nm->nodes) - 1); vec_add1 (node_dups, nodes); /* Snapshoot and clear the per-node perfmon counters */ for (i = 0; i < vec_len (nm->nodes); i++) { n = nm->nodes[i]; nodes[i] = clib_mem_alloc (sizeof (*n)); clib_memcpy_fast (nodes[i], n, sizeof (*n)); n->stats_total.perf_counter0_ticks = 0; n->stats_total.perf_counter1_ticks = 0; n->stats_total.perf_counter_vectors = 0; n->stats_last_clear.perf_counter0_ticks = 0; n->stats_last_clear.perf_counter1_ticks = 0; n->stats_last_clear.perf_counter_vectors = 0; } } vlib_worker_thread_barrier_release (vm); for (j = 0; j < vec_len (vlib_mains); j++) { stat_vm = vlib_mains[j]; if (stat_vm == 0) continue; nodes = node_dups[j]; for (i = 0; i < vec_len (nodes); i++) { u8 *capture_name; n = nodes[i]; if (n->stats_total.perf_counter0_ticks == 0 && n->stats_total.perf_counter1_ticks == 0) goto skip_this_node; for (k = 0; k < 2; k++) { u64 counter_value, counter_last_clear; /* * We collect 2 counters at once, except for the * last counter when the user asks for an odd number of * counters */ if ((pm->current_event + k) >= vec_len (pm->single_events_to_collect)) break; if (k == 0) { counter_value = n->stats_total.perf_counter0_ticks; counter_last_clear = n->stats_last_clear.perf_counter0_ticks; } else { counter_value = n->stats_total.perf_counter1_ticks; counter_last_clear = n->stats_last_clear.perf_counter1_ticks; } capture_name = format (0, "t%d-%v%c", j, n->name, 0); p = hash_get_mem (pm->capture_by_thread_and_node_name, capture_name); if (p == 0) { pool_get (pm->capture_pool, c); memset (c, 0, sizeof (*c)); c->thread_and_node_name = capture_name; hash_set_mem (pm->capture_by_thread_and_node_name, capture_name, c - pm->capture_pool); } else { c = pool_elt_at_index (pm->capture_pool, p[0]); vec_free (capture_name); } /* Snapshoot counters, etc. into the capture */ current_event = pm->single_events_to_collect + pm->current_event + k; counter_name = (u8 *) current_event->name; vectors_this_counter = n->stats_total.perf_counter_vectors - n->stats_last_clear.perf_counter_vectors; vec_add1 (c->counter_names, counter_name); vec_add1 (c->counter_values, counter_value - counter_last_clear); vec_add1 (c->vectors_this_counter, vectors_this_counter); } skip_this_node: clib_mem_free (n); } vec_free (nodes); } vec_free (node_dups); }
clib_error_t * vlib_call_all_config_functions (vlib_main_t * vm, unformat_input_t * input, int is_early) { clib_error_t *error = 0; vlib_config_function_runtime_t *c, **all; uword *hash = 0, *p; uword i; hash = hash_create_string (0, sizeof (uword)); all = 0; c = vm->config_function_registrations; while (c) { hash_set_mem (hash, c->name, vec_len (all)); vec_add1 (all, c); unformat_init (&c->input, 0, 0); c = c->next_registration; } while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) { u8 *s, *v; if (!unformat (input, "%s %v", &s, &v) || !(p = hash_get_mem (hash, s))) { error = clib_error_create ("unknown input `%s %v'", s, v); goto done; } c = all[p[0]]; if (vec_len (c->input.buffer) > 0) vec_add1 (c->input.buffer, ' '); vec_add (c->input.buffer, v, vec_len (v)); vec_free (v); vec_free (s); } for (i = 0; i < vec_len (all); i++) { c = all[i]; /* Is this an early config? Are we doing early configs? */ if (is_early ^ c->is_early) continue; /* Already called? */ if (hash_get (vm->init_functions_called, c->function)) continue; hash_set1 (vm->init_functions_called, c->function); error = c->function (vm, &c->input); if (error) goto done; } done: for (i = 0; i < vec_len (all); i++) { c = all[i]; unformat_free (&c->input); } vec_free (all); hash_free (hash); return error; }
static int dpdk_flow_add (dpdk_device_t * xd, vnet_flow_t * f, dpdk_flow_entry_t * fe) { struct rte_flow_item_ipv4 ip4[2] = { }; struct rte_flow_item_ipv6 ip6[2] = { }; struct rte_flow_item_udp udp[2] = { }; struct rte_flow_item_tcp tcp[2] = { }; struct rte_flow_action_mark mark = { 0 }; struct rte_flow_item *item, *items = 0; struct rte_flow_action *action, *actions = 0; enum { vxlan_hdr_sz = sizeof (vxlan_header_t), raw_sz = sizeof (struct rte_flow_item_raw) }; union { struct rte_flow_item_raw item; u8 val[raw_sz + vxlan_hdr_sz]; } raw[2]; u16 src_port, dst_port, src_port_mask, dst_port_mask; u8 protocol; int rv = 0; if (f->actions & (~xd->supported_flow_actions)) return VNET_FLOW_ERROR_NOT_SUPPORTED; /* Match items */ /* Ethernet */ vec_add2 (items, item, 1); item->type = RTE_FLOW_ITEM_TYPE_ETH; item->spec = any_eth; item->mask = any_eth + 1; /* VLAN */ if (f->type != VNET_FLOW_TYPE_IP4_VXLAN) { vec_add2 (items, item, 1); item->type = RTE_FLOW_ITEM_TYPE_VLAN; item->spec = any_vlan; item->mask = any_vlan + 1; } /* IP */ vec_add2 (items, item, 1); if (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE) { vnet_flow_ip6_n_tuple_t *t6 = &f->ip6_n_tuple; clib_memcpy_fast (ip6[0].hdr.src_addr, &t6->src_addr.addr, 16); clib_memcpy_fast (ip6[1].hdr.src_addr, &t6->src_addr.mask, 16); clib_memcpy_fast (ip6[0].hdr.dst_addr, &t6->dst_addr.addr, 16); clib_memcpy_fast (ip6[1].hdr.dst_addr, &t6->dst_addr.mask, 16); item->type = RTE_FLOW_ITEM_TYPE_IPV6; item->spec = ip6; item->mask = ip6 + 1; src_port = t6->src_port.port; dst_port = t6->dst_port.port; src_port_mask = t6->src_port.mask; dst_port_mask = t6->dst_port.mask; protocol = t6->protocol; } else if (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) { vnet_flow_ip4_n_tuple_t *t4 = &f->ip4_n_tuple; ip4[0].hdr.src_addr = t4->src_addr.addr.as_u32; ip4[1].hdr.src_addr = t4->src_addr.mask.as_u32; ip4[0].hdr.dst_addr = t4->dst_addr.addr.as_u32; ip4[1].hdr.dst_addr = t4->dst_addr.mask.as_u32; item->type = RTE_FLOW_ITEM_TYPE_IPV4; item->spec = ip4; item->mask = ip4 + 1; src_port = t4->src_port.port; dst_port = t4->dst_port.port; src_port_mask = t4->src_port.mask; dst_port_mask = t4->dst_port.mask; protocol = t4->protocol; } else if (f->type == VNET_FLOW_TYPE_IP4_VXLAN) { vnet_flow_ip4_vxlan_t *v4 = &f->ip4_vxlan; ip4[0].hdr.src_addr = v4->src_addr.as_u32; ip4[1].hdr.src_addr = -1; ip4[0].hdr.dst_addr = v4->dst_addr.as_u32; ip4[1].hdr.dst_addr = -1; item->type = RTE_FLOW_ITEM_TYPE_IPV4; item->spec = ip4; item->mask = ip4 + 1; dst_port = v4->dst_port; dst_port_mask = -1; src_port = 0; src_port_mask = 0; protocol = IP_PROTOCOL_UDP; } else { rv = VNET_FLOW_ERROR_NOT_SUPPORTED; goto done; } /* Layer 4 */ vec_add2 (items, item, 1); if (protocol == IP_PROTOCOL_UDP) { udp[0].hdr.src_port = clib_host_to_net_u16 (src_port); udp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask); udp[0].hdr.dst_port = clib_host_to_net_u16 (dst_port); udp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask); item->type = RTE_FLOW_ITEM_TYPE_UDP; item->spec = udp; item->mask = udp + 1; } else if (protocol == IP_PROTOCOL_TCP) { tcp[0].hdr.src_port = clib_host_to_net_u16 (src_port); tcp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask); tcp[0].hdr.dst_port = clib_host_to_net_u16 (dst_port); tcp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask); item->type = RTE_FLOW_ITEM_TYPE_TCP; item->spec = tcp; item->mask = tcp + 1; } else { rv = VNET_FLOW_ERROR_NOT_SUPPORTED; goto done; } /* Tunnel header match */ if (f->type == VNET_FLOW_TYPE_IP4_VXLAN) { u32 vni = f->ip4_vxlan.vni; vxlan_header_t spec_hdr = { .flags = VXLAN_FLAGS_I, .vni_reserved = clib_host_to_net_u32 (vni << 8) }; vxlan_header_t mask_hdr = { .flags = 0xff, .vni_reserved = clib_host_to_net_u32 (((u32) - 1) << 8) }; clib_memset (raw, 0, sizeof raw); raw[0].item.relative = 1; raw[0].item.length = vxlan_hdr_sz; clib_memcpy_fast (raw[0].val + raw_sz, &spec_hdr, vxlan_hdr_sz); raw[0].item.pattern = raw[0].val + raw_sz; clib_memcpy_fast (raw[1].val + raw_sz, &mask_hdr, vxlan_hdr_sz); raw[1].item.pattern = raw[1].val + raw_sz; vec_add2 (items, item, 1); item->type = RTE_FLOW_ITEM_TYPE_RAW; item->spec = raw; item->mask = raw + 1; } vec_add2 (items, item, 1); item->type = RTE_FLOW_ITEM_TYPE_END; /* Actions */ vec_add2 (actions, action, 1); action->type = RTE_FLOW_ACTION_TYPE_PASSTHRU; vec_add2 (actions, action, 1); mark.id = fe->mark; action->type = RTE_FLOW_ACTION_TYPE_MARK; action->conf = &mark; vec_add2 (actions, action, 1); action->type = RTE_FLOW_ACTION_TYPE_END; fe->handle = rte_flow_create (xd->device_index, &ingress, items, actions, &xd->last_flow_error); if (!fe->handle) rv = VNET_FLOW_ERROR_NOT_SUPPORTED; done: vec_free (items); vec_free (actions); return rv; } int dpdk_flow_ops_fn (vnet_main_t * vnm, vnet_flow_dev_op_t op, u32 dev_instance, u32 flow_index, uword * private_data) { dpdk_main_t *dm = &dpdk_main; vnet_flow_t *flow = vnet_get_flow (flow_index); dpdk_device_t *xd = vec_elt_at_index (dm->devices, dev_instance); dpdk_flow_entry_t *fe; dpdk_flow_lookup_entry_t *fle = 0; int rv; /* recycle old flow lookup entries only after the main loop counter increases - i.e. previously DMA'ed packets were handled */ if (vec_len (xd->parked_lookup_indexes) > 0 && xd->parked_loop_count != dm->vlib_main->main_loop_count) { u32 *fl_index; vec_foreach (fl_index, xd->parked_lookup_indexes) pool_put_index (xd->flow_lookup_entries, *fl_index); vec_reset_length (xd->flow_lookup_entries); } if (op == VNET_FLOW_DEV_OP_DEL_FLOW) { ASSERT (*private_data >= vec_len (xd->flow_entries)); fe = vec_elt_at_index (xd->flow_entries, *private_data); if ((rv = rte_flow_destroy (xd->device_index, fe->handle, &xd->last_flow_error))) return VNET_FLOW_ERROR_INTERNAL; if (fe->mark) { /* make sure no action is taken for in-flight (marked) packets */ fle = pool_elt_at_index (xd->flow_lookup_entries, fe->mark); clib_memset (fle, -1, sizeof (*fle)); vec_add1 (xd->parked_lookup_indexes, fe->mark); xd->parked_loop_count = dm->vlib_main->main_loop_count; } clib_memset (fe, 0, sizeof (*fe)); pool_put (xd->flow_entries, fe); goto disable_rx_offload; } if (op != VNET_FLOW_DEV_OP_ADD_FLOW) return VNET_FLOW_ERROR_NOT_SUPPORTED; pool_get (xd->flow_entries, fe); fe->flow_index = flow->index; if (flow->actions == 0) { rv = VNET_FLOW_ERROR_NOT_SUPPORTED; goto done; } /* if we need to mark packets, assign one mark */ if (flow->actions & (VNET_FLOW_ACTION_MARK | VNET_FLOW_ACTION_REDIRECT_TO_NODE | VNET_FLOW_ACTION_BUFFER_ADVANCE)) { /* reserve slot 0 */ if (xd->flow_lookup_entries == 0) pool_get_aligned (xd->flow_lookup_entries, fle, CLIB_CACHE_LINE_BYTES); pool_get_aligned (xd->flow_lookup_entries, fle, CLIB_CACHE_LINE_BYTES); fe->mark = fle - xd->flow_lookup_entries; /* install entry in the lookup table */ clib_memset (fle, -1, sizeof (*fle)); if (flow->actions & VNET_FLOW_ACTION_MARK) fle->flow_id = flow->mark_flow_id; if (flow->actions & VNET_FLOW_ACTION_REDIRECT_TO_NODE) fle->next_index = flow->redirect_device_input_next_index; if (flow->actions & VNET_FLOW_ACTION_BUFFER_ADVANCE) fle->buffer_advance = flow->buffer_advance; } else fe->mark = 0; if ((xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) == 0) { xd->flags |= DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD; dpdk_device_setup (xd); } switch (flow->type) { case VNET_FLOW_TYPE_IP4_N_TUPLE: case VNET_FLOW_TYPE_IP6_N_TUPLE: case VNET_FLOW_TYPE_IP4_VXLAN: if ((rv = dpdk_flow_add (xd, flow, fe))) goto done; break; default: rv = VNET_FLOW_ERROR_NOT_SUPPORTED; goto done; } *private_data = fe - xd->flow_entries; done: if (rv) { clib_memset (fe, 0, sizeof (*fe)); pool_put (xd->flow_entries, fe); if (fle) { clib_memset (fle, -1, sizeof (*fle)); pool_put (xd->flow_lookup_entries, fle); } } disable_rx_offload: if ((xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) != 0 && pool_elts (xd->flow_entries) == 0) { xd->flags &= ~DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD; dpdk_device_setup (xd); } return rv; }
static clib_error_t * lisp_gpe_add_del_fwd_entry_command_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { unformat_input_t _line_input, * line_input = &_line_input; u8 is_add = 1; ip_address_t lloc, rloc, *llocs = 0, *rlocs = 0; clib_error_t * error = 0; gid_address_t _reid, * reid = &_reid, _leid, * leid = &_leid; u8 reid_set = 0, leid_set = 0, is_negative = 0, vrf_set = 0, vni_set = 0; u32 vni, vrf, action = ~0; int rv; /* Get a line of input. */ if (! unformat_user (input, unformat_line_input, line_input)) return 0; while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) { if (unformat (line_input, "del")) is_add = 0; else if (unformat (line_input, "add")) is_add = 1; else if (unformat (line_input, "leid %U", unformat_gid_address, leid)) { leid_set = 1; } else if (unformat (line_input, "reid %U", unformat_gid_address, reid)) { reid_set = 1; } else if (unformat (line_input, "vni %u", &vni)) { gid_address_vni (leid) = vni; gid_address_vni (reid) = vni; vni_set = 1; } else if (unformat (line_input, "vrf %u", &vrf)) { vrf_set = 1; } else if (unformat (line_input, "negative action %U", unformat_negative_mapping_action, &action)) { is_negative = 1; } else if (unformat (line_input, "lloc %U rloc %U", unformat_ip_address, &lloc, unformat_ip_address, &rloc)) { /* TODO: support p and w */ vec_add1 (llocs, lloc); vec_add1 (rlocs, rloc); } else { error = unformat_parse_error (line_input); goto done; } } unformat_free (line_input); if (!vni_set || !vrf_set) { error = clib_error_return(0, "vni and vrf must be set!"); goto done; } if (!reid_set) { error = clib_error_return(0, "remote eid must be set!"); goto done; } if (is_negative) { if (~0 == action) { error = clib_error_return(0, "no action set for negative tunnel!"); goto done; } } else { if (vec_len (llocs) == 0) { error = clib_error_return (0, "expected ip4/ip6 locators."); goto done; } if (vec_len (llocs) != 1) { error = clib_error_return (0, "multihoming not supported for now!"); goto done; } } if (!leid_set) { /* if leid not set, make sure it's the same AFI like reid */ gid_address_type(leid) = gid_address_type(reid); if (GID_ADDR_IP_PREFIX == gid_address_type (reid)) gid_address_ip_version(leid) = gid_address_ip_version(reid); } /* add fwd entry */ vnet_lisp_gpe_add_del_fwd_entry_args_t _a, * a = &_a; memset (a, 0, sizeof(a[0])); a->is_add = is_add; a->vni = vni; a->table_id = vrf; gid_address_copy(&a->seid, leid); gid_address_copy(&a->deid, reid); if (!is_negative) { a->slocator = llocs[0]; a->dlocator = rlocs[0]; } rv = vnet_lisp_gpe_add_del_fwd_entry (a, 0); if (0 != rv) { error = clib_error_return(0, "failed to %s gpe tunnel!", is_add ? "add" : "delete"); } done: vec_free(llocs); vec_free(rlocs); return error; }