int main (int argc, char **argv) { unformat_input_t input; char *chroot_path = 0; u8 *chroot_path_u8; int interval = 0; f64 *vector_ratep, *rx_ratep, *sig_error_ratep; pid_t *vpp_pidp; svmdb_map_args_t _ma, *ma = &_ma; int uid, gid, rv; struct passwd _pw, *pw; struct group _grp, *grp; char *s, buf[128]; unformat_init_command_line (&input, argv); uid = geteuid (); gid = getegid (); while (unformat_check_input (&input) != UNFORMAT_END_OF_INPUT) { if (unformat (&input, "chroot %s", &chroot_path_u8)) { chroot_path = (char *) chroot_path_u8; } else if (unformat (&input, "interval %d", &interval)) ; else if (unformat (&input, "uid %d", &uid)) ; else if (unformat (&input, "gid %d", &gid)) ; else if (unformat (&input, "uid %s", &s)) { /* lookup the username */ pw = NULL; rv = getpwnam_r (s, &_pw, buf, sizeof (buf), &pw); if (rv < 0) { fformat (stderr, "cannot fetch username %s", s); exit (1); } if (pw == NULL) { fformat (stderr, "username %s does not exist", s); exit (1); } vec_free (s); uid = pw->pw_uid; } else if (unformat (&input, "gid %s", &s)) { /* lookup the group name */ grp = NULL; rv = getgrnam_r (s, &_grp, buf, sizeof (buf), &grp); if (rv != 0) { fformat (stderr, "cannot fetch group %s", s); exit (1); } if (grp == NULL) { fformat (stderr, "group %s does not exist", s); exit (1); } vec_free (s); gid = grp->gr_gid; } else { fformat (stderr, "usage: vpp_get_metrics [chroot <path>] [interval <nn>]\n"); exit (1); } } setup_signal_handlers (); clib_memset (ma, 0, sizeof (*ma)); ma->root_path = chroot_path; ma->uid = uid; ma->gid = gid; c = svmdb_map (ma); vpp_pidp = svmdb_local_get_variable_reference (c, SVMDB_NAMESPACE_VEC, "vpp_pid"); vector_ratep = svmdb_local_get_variable_reference (c, SVMDB_NAMESPACE_VEC, "vpp_vector_rate"); rx_ratep = svmdb_local_get_variable_reference (c, SVMDB_NAMESPACE_VEC, "vpp_input_rate"); sig_error_ratep = svmdb_local_get_variable_reference (c, SVMDB_NAMESPACE_VEC, "vpp_sig_error_rate"); /* * Make sure vpp is actually running. Otherwise, there's every * chance that the database region will be wiped out by the * process monitor script */ if (vpp_pidp == 0 || vector_ratep == 0 || rx_ratep == 0 || sig_error_ratep == 0) { fformat (stdout, "vpp not running\n"); exit (1); } do { /* * Once vpp exits, the svm db region will be recreated... * Can't use kill (*vpp_pidp, 0) if running as non-root / * accessing the shared-VM database via group perms. */ if (*vpp_pidp == 0) { fformat (stdout, "vpp not running\n"); exit (1); } fformat (stdout, "%d: vpp_vector_rate=%.2f, vpp_input_rate=%f, vpp_sig_error_rate=%f\n", *vpp_pidp, *vector_ratep, *rx_ratep, *sig_error_ratep); if (interval) sleep (interval); if (signal_received) break; } while (interval); svmdb_unmap (c); exit (0); }
int test_mheap_main (unformat_input_t * input) { int i, j, k, n_iterations; void *h, *h_mem; uword *objects = 0; u32 objects_used, really_verbose, n_objects, max_object_size; u32 check_mask, seed, trace, use_vm; u32 print_every = 0; u32 *data; mheap_t *mh; /* Validation flags. */ check_mask = 0; #define CHECK_VALIDITY 1 #define CHECK_DATA 2 #define CHECK_ALIGN 4 #define TEST1 8 n_iterations = 10; seed = 0; max_object_size = 100; n_objects = 1000; trace = 0; really_verbose = 0; use_vm = 0; while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) { if (0 == unformat (input, "iter %d", &n_iterations) && 0 == unformat (input, "count %d", &n_objects) && 0 == unformat (input, "size %d", &max_object_size) && 0 == unformat (input, "seed %d", &seed) && 0 == unformat (input, "print %d", &print_every) && 0 == unformat (input, "validdata %|", &check_mask, CHECK_DATA | CHECK_VALIDITY) && 0 == unformat (input, "valid %|", &check_mask, CHECK_VALIDITY) && 0 == unformat (input, "verbose %=", &really_verbose, 1) && 0 == unformat (input, "trace %=", &trace, 1) && 0 == unformat (input, "vm %=", &use_vm, 1) && 0 == unformat (input, "align %|", &check_mask, CHECK_ALIGN) && 0 == unformat (input, "test1 %|", &check_mask, TEST1)) { clib_warning ("unknown input `%U'", format_unformat_error, input); return 1; } } /* Zero seed means use default. */ if (!seed) seed = random_default_seed (); if (check_mask & TEST1) { return test1 (); } if_verbose ("testing %d iterations, %d %saligned objects, max. size %d, seed %d", n_iterations, n_objects, (check_mask & CHECK_ALIGN) ? "randomly " : "un", max_object_size, seed); vec_resize (objects, n_objects); if (vec_bytes (objects) > 0) /* stupid warning be gone */ clib_memset (objects, ~0, vec_bytes (objects)); objects_used = 0; /* Allocate initial heap. */ { uword size = max_pow2 (2 * n_objects * max_object_size * sizeof (data[0])); h_mem = clib_mem_alloc (size); if (!h_mem) return 0; h = mheap_alloc (h_mem, size); } if (trace) mheap_trace (h, trace); mh = mheap_header (h); if (use_vm) mh->flags &= ~MHEAP_FLAG_DISABLE_VM; else mh->flags |= MHEAP_FLAG_DISABLE_VM; if (check_mask & CHECK_VALIDITY) mh->flags |= MHEAP_FLAG_VALIDATE; for (i = 0; i < n_iterations; i++) { while (1) { j = random_u32 (&seed) % vec_len (objects); if (objects[j] != ~0 || i + objects_used < n_iterations) break; } if (objects[j] != ~0) { mheap_put (h, objects[j]); objects_used--; objects[j] = ~0; } else { uword size, align, align_offset; size = (random_u32 (&seed) % max_object_size) * sizeof (data[0]); align = align_offset = 0; if (check_mask & CHECK_ALIGN) { align = 1 << (random_u32 (&seed) % 10); align_offset = round_pow2 (random_u32 (&seed) & (align - 1), sizeof (u32)); } h = mheap_get_aligned (h, size, align, align_offset, &objects[j]); if (align > 0) ASSERT (0 == ((objects[j] + align_offset) & (align - 1))); ASSERT (objects[j] != ~0); objects_used++; /* Set newly allocated object with test data. */ if (check_mask & CHECK_DATA) { uword len; data = (void *) h + objects[j]; len = mheap_len (h, data); ASSERT (size <= mheap_data_bytes (h, objects[j])); data[0] = len; for (k = 1; k < len; k++) data[k] = objects[j] + k; } } /* Verify that all used objects have correct test data. */ if (check_mask & 2) { for (j = 0; j < vec_len (objects); j++) if (objects[j] != ~0) { u32 *data = h + objects[j]; uword len = data[0]; for (k = 1; k < len; k++) ASSERT (data[k] == objects[j] + k); } } if (print_every != 0 && i > 0 && (i % print_every) == 0) fformat (stderr, "iteration %d: %U\n", i, format_mheap, h, really_verbose); } if (verbose) fformat (stderr, "%U\n", format_mheap, h, really_verbose); mheap_free (h); clib_mem_free (h_mem); vec_free (objects); return 0; }
void prog_delete(qc_program *prog) { if (prog->filename) mem_d(prog->filename); vec_free(prog->code); vec_free(prog->defs); vec_free(prog->fields); vec_free(prog->functions); vec_free(prog->strings); vec_free(prog->globals); vec_free(prog->entitydata); vec_free(prog->entitypool); vec_free(prog->localstack); vec_free(prog->stack); vec_free(prog->profile); mem_d(prog); }
int assemble(struct membuf *source, struct membuf *dest) { struct vec guesses_history[1]; struct map guesses_storage[1]; int dest_pos; int result; dump_sym_table(LOG_DEBUG, s->initial_symbols); vec_init(guesses_history, sizeof(struct map)); s->guesses = NULL; dest_pos = membuf_memlen(dest); for(;;) { map_put_all(s->sym_table, s->initial_symbols); named_buffer_copy(s->named_buffer, s->initial_named_buffer); map_init(guesses_storage); if(s->guesses != NULL) { /* copy updated guesses from latest pass */ map_put_all(guesses_storage, s->guesses); } s->guesses = guesses_storage; result = assembleSinglePass(source, dest); if(result != 0) { /* the assemble pass failed */ break; } /* check if any guessed symbols was wrong and update them * to their actual value */ if(wasFinalPass()) { /* The assemble pass succeeded without any wrong guesses, * we're done */ break; } if(loopDetect(guesses_history)) { /* More passes would only get us into a loop */ LOG(LOG_VERBOSE, ("Aborting due to loop.\n")); result = -1; break; } LOG(LOG_VERBOSE, ("Trying another pass.\n")); /* allocate storage for the guesses in the history vector */ s->guesses = vec_push(guesses_history, s->guesses); parse_reset(); membuf_truncate(dest, dest_pos); } map_free(guesses_storage); vec_free(guesses_history, (cb_free*)map_free); s->guesses = NULL; return result; }
static void vl_msg_api_process_file (vlib_main_t *vm, u8 *filename, u32 first_index, u32 last_index, vl_api_replay_t which) { vl_api_trace_file_header_t * hp; int i, fd; struct stat statb; size_t file_size; u8 *msg; u8 endian_swap_needed = 0; api_main_t * am = &api_main; static u8 *tmpbuf; u32 nitems; void **saved_print_handlers = 0; fd = open ((char *) filename, O_RDONLY); if (fd < 0) { vlib_cli_output (vm, "Couldn't open %s\n", filename); return; } if (fstat(fd, &statb) < 0) { vlib_cli_output (vm, "Couldn't stat %s\n", filename); return; } if (! (statb.st_mode & S_IFREG) || (statb.st_size < sizeof (*hp))) { vlib_cli_output (vm, "File not plausible: %s\n", filename); return; } file_size = statb.st_size; file_size = (file_size + 4095) & ~(4096); hp = mmap (0, file_size, PROT_READ, MAP_PRIVATE, fd, 0); if (hp == (vl_api_trace_file_header_t *)MAP_FAILED) { vlib_cli_output (vm, "mmap failed: %s\n", filename); close(fd); return; } close(fd); if ((clib_arch_is_little_endian && hp->endian == VL_API_BIG_ENDIAN) || (clib_arch_is_big_endian && hp->endian == VL_API_LITTLE_ENDIAN)) endian_swap_needed = 1; if (endian_swap_needed) nitems = ntohl(hp->nitems); else nitems = hp->nitems; if (last_index == (u32) ~0) { last_index = nitems - 1; } if (first_index >= nitems || last_index >= nitems) { vlib_cli_output (vm, "Range (%d, %d) outside file range (0, %d)\n", first_index, last_index, nitems-1); return; } if (hp->wrapped) vlib_cli_output (vm, "Note: wrapped/incomplete trace, results may vary\n"); if (which == CUSTOM_DUMP) { saved_print_handlers = (void **) vec_dup (am->msg_print_handlers); vl_msg_api_custom_dump_configure (am); } msg = (u8 *)(hp+1); for (i = 0; i < first_index; i++) { trace_cfg_t *cfgp; int size; u16 msg_id; if (clib_arch_is_little_endian) msg_id = ntohs(*((u16 *)msg)); else msg_id = *((u16 *)msg); cfgp = am->api_trace_cfg + msg_id; if (!cfgp) { vlib_cli_output (vm, "Ugh: msg id %d no trace config\n", msg_id); return; } size = cfgp->size; msg += size; } for (; i <= last_index; i++) { trace_cfg_t *cfgp; u16 *msg_idp; u16 msg_id; int size; if (which == DUMP) vlib_cli_output (vm, "---------- trace %d -----------\n", i); if (clib_arch_is_little_endian) msg_id = ntohs(*((u16 *)msg)); else msg_id = *((u16 *)msg); cfgp = am->api_trace_cfg + msg_id; if (!cfgp) { vlib_cli_output (vm, "Ugh: msg id %d no trace config\n", msg_id); return; } size = cfgp->size; /* Copy the buffer (from the read-only mmap'ed file) */ vec_validate (tmpbuf, size-1 + sizeof(uword)); memcpy (tmpbuf+sizeof(uword), msg, size); memset (tmpbuf, 0xf, sizeof(uword)); /* * Endian swap if needed. All msg data is supposed to be * in network byte order. All msg handlers are supposed to * know that. The generic message dumpers don't know that. * One could fix apigen, I suppose. */ if ((which == DUMP && clib_arch_is_little_endian) || endian_swap_needed) { void (*endian_fp)(void *); if (msg_id >= vec_len (am->msg_endian_handlers) || (am->msg_endian_handlers[msg_id] == 0)) { vlib_cli_output (vm, "Ugh: msg id %d no endian swap\n", msg_id); return; } endian_fp = am->msg_endian_handlers[msg_id]; (*endian_fp)(tmpbuf+sizeof(uword)); } /* msg_id always in network byte order */ if (clib_arch_is_little_endian) { msg_idp = (u16 *)(tmpbuf+sizeof(uword)); *msg_idp = msg_id; } switch (which) { case CUSTOM_DUMP: case DUMP: if (msg_id < vec_len(am->msg_print_handlers) && am->msg_print_handlers [msg_id]) { u8 *(*print_fp)(void *, void *); print_fp = (void *)am->msg_print_handlers[msg_id]; (*print_fp)(tmpbuf+sizeof(uword), vm); } else { vlib_cli_output (vm, "Skipping msg id %d: no print fcn\n", msg_id); break; } break; case INITIALIZERS: if (msg_id < vec_len(am->msg_print_handlers) && am->msg_print_handlers [msg_id]) { u8 * s; int j; u8 *(*print_fp)(void *, void *); print_fp = (void *)am->msg_print_handlers[msg_id]; vlib_cli_output (vm, "/*"); (*print_fp)(tmpbuf+sizeof(uword), vm); vlib_cli_output (vm, "*/\n"); s = format (0, "static u8 * vl_api_%s_%d[%d] = {", am->msg_names[msg_id], i, am->api_trace_cfg[msg_id].size); for (j = 0; j < am->api_trace_cfg[msg_id].size; j++) { if ((j & 7) == 0) s = format (s, "\n "); s = format (s, "0x%02x,", tmpbuf[sizeof(uword)+j]); } s = format (s, "\n};\n%c", 0); vlib_cli_output (vm, (char *)s); vec_free(s); } break; case REPLAY: if (msg_id < vec_len(am->msg_print_handlers) && am->msg_print_handlers [msg_id] && cfgp->replay_enable) { void (*handler)(void *); handler = (void *)am->msg_handlers[msg_id]; if (!am->is_mp_safe[msg_id]) vl_msg_api_barrier_sync(); (*handler)(tmpbuf+sizeof(uword)); if (!am->is_mp_safe[msg_id]) vl_msg_api_barrier_release(); } else { if (cfgp->replay_enable) vlib_cli_output (vm, "Skipping msg id %d: no handler\n", msg_id); break; } break; } _vec_len(tmpbuf) = 0; msg += size; } if (saved_print_handlers) { memcpy (am->msg_print_handlers, saved_print_handlers, vec_len(am->msg_print_handlers) * sizeof (void *)); vec_free (saved_print_handlers); } munmap (hp, file_size); }
inline void lmn_memstack_free(LmnMemStack memstack) { vec_free(memstack); }
/* Subtree version of score test */ void ff_score_tests_sub(TreeModel *mod, MSA *msa, GFF_Set *gff, mode_type mode, double *feat_pvals, double *feat_null_scales, double *feat_derivs, double *feat_sub_derivs, double *feat_teststats, FILE *logf) { int i; FeatFitData *d, *d2; Vector *grad = vec_new(2); Matrix *fim = mat_new(2, 2); double lnl, teststat; FimGrid *grid; List *inside=NULL, *outside=NULL; TreeModel *modcpy = tm_create_copy(mod); /* need separate copy of tree model with different internal scaling data for supertree/subtree case */ /* init FeatFitData -- one for null model, one for alt */ d = ff_init_fit_data(modcpy, msa, ALL, NNEUT, FALSE); d2 = ff_init_fit_data(mod, msa, SUBTREE, NNEUT, FALSE); /* mod has the subtree info, modcpy does not */ /* precompute Fisher information matrices for a grid of scale values */ grid = col_fim_grid_sub(mod); /* prepare lists of leaves inside and outside root, for use in checking for informative substitutions */ if (mod->subtree_root != NULL) { inside = lst_new_ptr(mod->tree->nnodes); outside = lst_new_ptr(mod->tree->nnodes); tr_partition_leaves(mod->tree, mod->subtree_root, inside, outside); } /* iterate through features */ for (i = 0; i < lst_size(gff->features); i++) { checkInterrupt(); d->feat = lst_get_ptr(gff->features, i); /* first check for informative substitution data in feature; if none, don't waste time computing likelihoods */ if (!ff_has_data_sub(mod, msa, d->feat, inside, outside)) { teststat = 0; vec_zero(grad); } else { vec_set(d->cdata->params, 0, d->cdata->init_scale); opt_newton_1d(ff_likelihood_wrapper_1d, &d->cdata->params->data[0], d, &lnl, SIGFIGS, d->cdata->lb->data[0], d->cdata->ub->data[0], logf, NULL, NULL); /* turns out to be faster to use numerical rather than exact derivatives (judging by col case) */ d2->feat = d->feat; d2->cdata->mod->scale = d->cdata->params->data[0]; d2->cdata->mod->scale_sub = 1; tm_set_subst_matrices(d2->cdata->mod); ff_scale_derivs_subtree(d2, grad, NULL, d2->cdata->fels_scratch); fim = col_get_fim_sub(grid, d2->cdata->mod->scale); mat_scale(fim, d->feat->end - d->feat->start + 1); /* scale column-by-column FIM by length of feature (expected values are additive) */ teststat = grad->data[1]*grad->data[1] / (fim->data[1][1] - fim->data[0][1]*fim->data[1][0]/fim->data[0][0]); if (teststat < 0) { fprintf(stderr, "WARNING: teststat < 0 (%f)\n", teststat); teststat = 0; } if ((mode == ACC && grad->data[1] < 0) || (mode == CON && grad->data[1] > 0)) teststat = 0; /* derivative points toward boundary; truncate at 0 */ mat_free(fim); } if (feat_pvals != NULL) { if (mode == NNEUT || mode == CONACC) feat_pvals[i] = chisq_cdf(teststat, 1, FALSE); else feat_pvals[i] = half_chisq_cdf(teststat, 1, FALSE); /* assumes 50:50 mix of chisq and point mass at zero */ if (feat_pvals[i] < 1e-20) feat_pvals[i] = 1e-20; /* approx limit of eval of tail prob; pvals of 0 cause problems */ if (mode == CONACC && grad->data[1] > 0) feat_pvals[i] *= -1; /* mark as acceleration */ } /* store scales and log likelihood ratios if necessary */ if (feat_null_scales != NULL) feat_null_scales[i] = d->cdata->params->data[0]; if (feat_derivs != NULL) feat_derivs[i] = grad->data[0]; if (feat_sub_derivs != NULL) feat_sub_derivs[i] = grad->data[1]; if (feat_teststats != NULL) feat_teststats[i] = teststat; } ff_free_fit_data(d); ff_free_fit_data(d2); vec_free(grad); modcpy->estimate_branchlens = TM_BRANCHLENS_ALL; /* have to revert for tm_free to work correctly */ tm_free(modcpy); col_free_fim_grid(grid); if (inside != NULL) lst_free(inside); if (outside != NULL) lst_free(outside); }
void stat_mem_deallocate(void *ptr, size_t line, const char *file) { stat_mem_block_t *info = NULL; char *ident = (char *)ptr - IDENT_SIZE; if (GMQCC_UNLIKELY(!ptr)) return; /* Validate usage */ VALGRIND_MAKE_MEM_DEFINED(ident, IDENT_SIZE); if (!strcmp(ident, IDENT_VEC)) { vector_t *vec = (vector_t*)((char *)ptr - IDENT_VEC_TOP); stat_mem_block_t *block = (stat_mem_block_t*)((char *)vec - IDENT_MEM_TOP); VALGRIND_MAKE_MEM_DEFINED(block, sizeof(stat_mem_block_t)); con_err("internal warning: invalid use of mem_d:\n"); con_err("internal warning: vector (used elements: %u, allocated elements: %u)\n", (unsigned)vec->used, (unsigned)vec->allocated ); con_err("internal warning: vector was last (re)allocated with (size: %u (bytes), at location: %s:%u)\n", (unsigned)block->size, block->file, (unsigned)block->line ); con_err("internal warning: released with wrong routine at %s:%u\n", file, (unsigned)line); con_err("internal warning: forwarding to vec_free, please fix it\n"); VALGRIND_MAKE_MEM_NOACCESS(block, sizeof(stat_mem_block_t)); VALGRIND_MAKE_MEM_NOACCESS(ident, IDENT_SIZE); vec_free(ptr); return; } VALGRIND_MAKE_MEM_NOACCESS(ident, IDENT_SIZE); info = (stat_mem_block_t*)((char *)ptr - IDENT_MEM_TOP); /* * we need access to the redzone that represents the info block * so lets do that. */ VALGRIND_MAKE_MEM_DEFINED(info, IDENT_MEM_TOP); stat_mem_deallocated += info->size; stat_mem_high -= info->size; stat_mem_deallocated_total ++; if (info->prev) { /* just need access for a short period */ VALGRIND_MAKE_MEM_DEFINED(info->prev, IDENT_MEM_TOP); info->prev->next = info->next; /* don't need access anymore */ VALGRIND_MAKE_MEM_NOACCESS(info->prev, IDENT_MEM_TOP); } if (info->next) { /* just need access for a short period */ VALGRIND_MAKE_MEM_DEFINED(info->next, IDENT_MEM_TOP); info->next->prev = info->prev; /* don't need access anymore */ VALGRIND_MAKE_MEM_NOACCESS(info->next, IDENT_MEM_TOP); } /* move ahead */ if (info == stat_mem_block_root) stat_mem_block_root = info->next; free(info); VALGRIND_MAKE_MEM_NOACCESS(info, IDENT_MEM_TOP); VALGRIND_FREELIKE_BLOCK(ptr, IDENT_MEM_TOP); }
static void _spoil_table_aux(doc_ptr doc, cptr title, _obj_p pred, int options) { int i; vec_ptr entries = vec_alloc(free); int ct_std = 0, ct_rnd = 0, ct_ego = 0; int score_std = 0, score_rnd = 0, score_ego = 0; int max_score_std = 0, max_score_rnd = 0, max_score_ego = 0; if ((options & _SPOIL_ARTS) && !random_artifacts) { for (i = 1; i < max_a_idx; ++i) { object_type forge = {0}; _art_info_ptr entry; if (!p_ptr->wizard && (a_info[i].gen_flags & OFG_QUESTITEM)) continue; if (!create_named_art_aux(i, &forge)) continue; if ((options & _SPOIL_EGOS) && !a_info[i].found) continue; /* Hack */ if (pred && !pred(&forge)) continue; obj_identify_fully(&forge); entry = malloc(sizeof(_art_info_t)); entry->id = i; if (p_ptr->prace == RACE_ANDROID) { entry->score = android_obj_exp(&forge); if (!entry->score) entry->score = obj_value_real(&forge); } else entry->score = obj_value_real(&forge); object_desc(entry->name, &forge, OD_COLOR_CODED); entry->k_idx = forge.k_idx; vec_add(entries, entry); if (a_info[entry->id].found) { ct_std++; score_std += entry->score; if (entry->score > max_score_std) max_score_std = entry->score; } } } if (options & _SPOIL_RAND_ARTS) { vec_ptr v = stats_rand_arts(); for (i = 0; i < vec_length(v); i++) { object_type *o_ptr = vec_get(v, i); _art_info_ptr entry; if (pred && !pred(o_ptr)) continue; entry = malloc(sizeof(_art_info_t)); entry->id = ART_RANDOM; if (p_ptr->prace == RACE_ANDROID) { entry->score = android_obj_exp(o_ptr); if (!entry->score) entry->score = obj_value_real(o_ptr); } else entry->score = obj_value_real(o_ptr); object_desc(entry->name, o_ptr, OD_COLOR_CODED); entry->k_idx = o_ptr->k_idx; vec_add(entries, entry); ct_rnd++; score_rnd += entry->score; if (entry->score > max_score_rnd) max_score_rnd = entry->score; } } if (options & _SPOIL_EGOS) { vec_ptr v = stats_egos(); for (i = 0; i < vec_length(v); i++) { object_type *o_ptr = vec_get(v, i); _art_info_ptr entry; if (pred && !pred(o_ptr)) continue; entry = malloc(sizeof(_art_info_t)); entry->id = ART_EGO; if (p_ptr->prace == RACE_ANDROID) { entry->score = android_obj_exp(o_ptr); if (!entry->score) entry->score = obj_value_real(o_ptr); } else entry->score = obj_value_real(o_ptr); object_desc(entry->name, o_ptr, OD_COLOR_CODED); entry->k_idx = o_ptr->k_idx; vec_add(entries, entry); ct_ego++; score_ego += entry->score; if (entry->score > max_score_ego) max_score_ego = entry->score; } } if (vec_length(entries)) { vec_sort(entries, (vec_cmp_f)_art_score_cmp); doc_printf(doc, "<topic:%s><style:heading>%s</style>\n\n", title, title); doc_insert(doc, "<style:wide> <color:G> Score Lvl Rty Cts Object Description</color>\n"); for (i = 0; i < vec_length(entries); i++) { _art_info_ptr entry = vec_get(entries, i); if (entry->id == ART_RANDOM) { doc_printf(doc, "<color:v>%3d) %7d</color> %3d ", i+1, entry->score, k_info[entry->k_idx].counts.found); doc_printf(doc, "<indent><style:indent>%s</style></indent>\n", entry->name); } else if (entry->id == ART_EGO) { doc_printf(doc, "<color:B>%3d) %7d</color> %3d ", i+1, entry->score, k_info[entry->k_idx].counts.found); doc_printf(doc, "<indent><style:indent>%s</style></indent>\n", entry->name); } else { artifact_type *a_ptr = &a_info[entry->id]; doc_printf(doc, "<color:%c>%3d) %7d</color> %3d %3d ", (a_ptr->found) ? 'y' : 'w', i+1, entry->score, a_ptr->level, a_ptr->rarity); if (a_ptr->gen_flags & OFG_INSTA_ART) doc_insert(doc, " "); else doc_printf(doc, "%3d ", k_info[entry->k_idx].counts.found); doc_printf(doc, "<indent><style:indent>%s <color:D>#%d</color></style></indent>\n", entry->name, entry->id); } } if (ct_std || ct_rnd || ct_ego) { doc_printf(doc, "\n<color:G>%20.20s Ct Average Best</color>\n", ""); if (ct_std) { doc_printf(doc, "<color:B>%20.20s</color> %4d %7d %7d\n", "Stand Arts", ct_std, score_std/ct_std, max_score_std); } if (ct_rnd) { doc_printf(doc, "<color:B>%20.20s</color> %4d %7d %7d\n", "Rand Arts", ct_rnd, score_rnd/ct_rnd, max_score_rnd); } if (ct_ego) { doc_printf(doc, "<color:B>%20.20s</color> %4d %7d %7d\n", "Egos", ct_ego, score_ego/ct_ego, max_score_ego); } } doc_insert(doc, "</style>\n\n"); } vec_free(entries); }
void moto_freeEnv(MotoEnv *env) { Enumeration *e; /* Free any outstanding frames */ while(vec_size(env->frames) > 0) { moto_freeFrame(env); } /* Free all the globals */ e = stab_getKeys(env->globals); while (enum_hasNext(e)) { char* n = (char*)enum_next(e); MotoVar* var = stab_get(env->globals,n); if(env->mode != COMPILER_MODE) { moto_freeVal(env,var->vs); } else { opool_release(env->valpool,var->vs); } free(var); } enum_free(e); stab_free(env->globals); /* free all cached regular expressions */ e = stab_getKeys(env->rxcache); while (enum_hasNext(e)) { char *rx = (char *)enum_next(e); MDFA *mdfa = (MDFA *)stab_get(env->rxcache, rx); if (mdfa != NULL) { mdfa_free(mdfa); } } enum_free(e); /* free all remaining pointers */ e = hset_elements(env->ptrs); while (enum_hasNext(e)) { void *ptr = enum_next(e); if (shared_check(ptr)) { free(ptr); } } enum_free(e); /* free all errors */ e = sset_elements(env->errs); while (enum_hasNext(e)) { void *ptr = enum_next(e); if (shared_check(ptr)) { free(ptr); } } enum_free(e); /* free all scopes */ e = stack_elements(env->scope); while (enum_hasNext(e)) { free(enum_next(e)); } enum_free(e); /* free all cells */ moto_freeTreeCells(env); /* free all class defs */ stab_free(env->cdefs); /* free remainder of env struct */ hset_free(env->ptrs); buf_free(env->out); buf_free(env->err); stab_free(env->types); vec_free(env->frames); ftab_free(env->ftable); /* Free all the stuff that got put in the mpool ... this includes MotoFunctions and MotoClassDefinitions */ mpool_free(env->mpool); stack_free(env->scope); stab_free(env->rxcache); //stack_free(env->callstack); sset_free(env->errs); sset_free(env->uses); sset_free(env->includes); buf_free(env->fcodebuffer); istack_free(env->scopeIDStack); htab_free(env->fdefs); htab_free(env->adefs); buf_free(env->constantPool); moto_freeTree(env->tree); opool_free(env->valpool); opool_free(env->bufpool); opool_free(env->stkpool); e = stab_getKeys(env->fcache); while (enum_hasNext(e)) free((char *)enum_next(e)); enum_free(e); stab_free(env->fcache); free(env); }
int test_ptclosure_main (unformat_input_t * input) { test_main_t *tm = &test_main; u8 *item_name; int i, j; u8 **orig; u8 **closure; u8 *a_name, *b_name; int a_index, b_index; uword *p; u8 *this_constraint; int n; u32 *result = 0; tm->index_by_name = hash_create_string (0, sizeof (uword)); n = ARRAY_LEN (items); for (i = 0; i < n; i++) { item_name = (u8 *) items[i]; hash_set_mem (tm->index_by_name, item_name, i); } orig = clib_ptclosure_alloc (n); for (i = 0; i < ARRAY_LEN (constraints); i++) { this_constraint = format (0, "%s%c", constraints[i], 0); if (comma_split (this_constraint, &a_name, &b_name)) { clib_warning ("couldn't split '%s'", constraints[i]); return 1; } p = hash_get_mem (tm->index_by_name, a_name); if (p == 0) { clib_warning ("couldn't find '%s'", a_name); return 1; } a_index = p[0]; p = hash_get_mem (tm->index_by_name, b_name); if (p == 0) { clib_warning ("couldn't find '%s'", b_name); return 1; } b_index = p[0]; orig[a_index][b_index] = 1; vec_free (this_constraint); } dump_closure (tm, "original relation", orig); closure = clib_ptclosure (orig); dump_closure (tm, "closure", closure); /* * Output partial order */ again: for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { if (closure[i][j]) goto item_constrained; } /* Item i can be output */ vec_add1 (result, i); { int k; for (k = 0; k < n; k++) closure[k][i] = 0; /* "Magic" a before a, to keep from ever outputting it again */ closure[i][i] = 1; goto again; } item_constrained: ; } if (vec_len (result) != n) { clib_warning ("no partial order exists"); exit (1); } fformat (stdout, "Partial order:\n"); for (i = vec_len (result) - 1; i >= 0; i--) { fformat (stdout, "%s\n", items[result[i]]); } vec_free (result); clib_ptclosure_free (orig); clib_ptclosure_free (closure); return 0; }
int vl_map_shmem (const char *region_name, int is_vlib) { svm_map_region_args_t _a, *a = &_a; svm_region_t *vlib_rp, *root_rp; api_main_t *am = &api_main; int i; struct timespec ts, tsrem; char *vpe_api_region_suffix = "-vpe-api"; clib_memset (a, 0, sizeof (*a)); if (strstr (region_name, vpe_api_region_suffix)) { u8 *root_path = format (0, "%s", region_name); _vec_len (root_path) = (vec_len (root_path) - strlen (vpe_api_region_suffix)); vec_terminate_c_string (root_path); a->root_path = (const char *) root_path; am->root_path = (const char *) root_path; } if (is_vlib == 0) { int tfd; u8 *api_name; /* * Clients wait for vpp to set up the root / API regioins */ if (am->root_path) api_name = format (0, "/dev/shm/%s-%s%c", am->root_path, region_name + 1, 0); else api_name = format (0, "/dev/shm%s%c", region_name, 0); /* Wait up to 100 seconds... */ for (i = 0; i < 10000; i++) { ts.tv_sec = 0; ts.tv_nsec = 10000 * 1000; /* 10 ms */ while (nanosleep (&ts, &tsrem) < 0) ts = tsrem; tfd = open ((char *) api_name, O_RDWR); if (tfd >= 0) break; } vec_free (api_name); if (tfd < 0) { clib_warning ("region init fail"); return -2; } close (tfd); svm_region_init_chroot_uid_gid (am->root_path, getuid (), getgid ()); } if (a->root_path != NULL) { a->name = "/vpe-api"; } else a->name = region_name; a->size = am->api_size ? am->api_size : (16 << 20); a->flags = SVM_FLAGS_MHEAP; a->uid = am->api_uid; a->gid = am->api_gid; a->pvt_heap_size = am->api_pvt_heap_size; vlib_rp = svm_region_find_or_create (a); if (vlib_rp == 0) return (-2); pthread_mutex_lock (&vlib_rp->mutex); /* Has someone else set up the shared-memory variable table? */ if (vlib_rp->user_ctx) { am->shmem_hdr = (void *) vlib_rp->user_ctx; am->our_pid = getpid (); if (is_vlib) { svm_queue_t *q; uword old_msg; /* * application restart. Reset cached pids, API message * rings, list of clients; otherwise, various things * fail. (e.g. queue non-empty notification) */ /* ghosts keep the region from disappearing properly */ svm_client_scan_this_region_nolock (vlib_rp); am->shmem_hdr->application_restarts++; q = am->shmem_hdr->vl_input_queue; am->shmem_hdr->vl_pid = getpid (); q->consumer_pid = am->shmem_hdr->vl_pid; /* Drain the input queue, freeing msgs */ for (i = 0; i < 10; i++) { if (pthread_mutex_trylock (&q->mutex) == 0) { pthread_mutex_unlock (&q->mutex); goto mutex_ok; } ts.tv_sec = 0; ts.tv_nsec = 10000 * 1000; /* 10 ms */ while (nanosleep (&ts, &tsrem) < 0) ts = tsrem; } /* Mutex buggered, "fix" it */ clib_memset (&q->mutex, 0, sizeof (q->mutex)); clib_warning ("forcibly release main input queue mutex"); mutex_ok: am->vlib_rp = vlib_rp; while (svm_queue_sub (q, (u8 *) & old_msg, SVM_Q_NOWAIT, 0) != -2 /* queue underflow */ ) { vl_msg_api_free_nolock ((void *) old_msg); am->shmem_hdr->restart_reclaims++; } pthread_mutex_unlock (&vlib_rp->mutex); root_rp = svm_get_root_rp (); ASSERT (root_rp); /* Clean up the root region client list */ pthread_mutex_lock (&root_rp->mutex); svm_client_scan_this_region_nolock (root_rp); pthread_mutex_unlock (&root_rp->mutex); } else { pthread_mutex_unlock (&vlib_rp->mutex); } am->vlib_rp = vlib_rp; vec_add1 (am->mapped_shmem_regions, vlib_rp); return 0; } /* Clients simply have to wait... */ if (!is_vlib) { pthread_mutex_unlock (&vlib_rp->mutex); /* Wait up to 100 seconds... */ for (i = 0; i < 10000; i++) { ts.tv_sec = 0; ts.tv_nsec = 10000 * 1000; /* 10 ms */ while (nanosleep (&ts, &tsrem) < 0) ts = tsrem; if (vlib_rp->user_ctx) goto ready; } /* Clean up and leave... */ svm_region_unmap (vlib_rp); clib_warning ("region init fail"); return (-2); ready: am->shmem_hdr = (void *) vlib_rp->user_ctx; am->our_pid = getpid (); am->vlib_rp = vlib_rp; vec_add1 (am->mapped_shmem_regions, vlib_rp); return 0; } /* Nope, it's our problem... */ vl_init_shmem (vlib_rp, 0 /* default config */ , 1 /* is vlib */ , 0 /* is_private_region */ ); vec_add1 (am->mapped_shmem_regions, vlib_rp); return 0; }
void stack_free(Stack *p) { vec_free(p); }
qc_program* prog_load(const char *filename) { qc_program *prog; prog_header header; FILE *file; file = util_fopen(filename, "rb"); if (!file) return NULL; if (fread(&header, sizeof(header), 1, file) != 1) { loaderror("failed to read header from '%s'", filename); fclose(file); return NULL; } if (header.version != 6) { loaderror("header says this is a version %i progs, we need version 6\n", header.version); fclose(file); return NULL; } prog = (qc_program*)mem_a(sizeof(qc_program)); if (!prog) { fclose(file); printf("failed to allocate program data\n"); return NULL; } memset(prog, 0, sizeof(*prog)); prog->entityfields = header.entfield; prog->crc16 = header.crc16; prog->filename = util_strdup(filename); if (!prog->filename) { loaderror("failed to store program name"); goto error; } #define read_data(hdrvar, progvar, reserved) \ if (fseek(file, header.hdrvar.offset, SEEK_SET) != 0) { \ loaderror("seek failed"); \ goto error; \ } \ if (fread(vec_add(prog->progvar, header.hdrvar.length + reserved), \ sizeof(*prog->progvar), \ header.hdrvar.length, file) \ != header.hdrvar.length) \ { \ loaderror("read failed"); \ goto error; \ } #define read_data1(x) read_data(x, x, 0) #define read_data2(x, y) read_data(x, x, y) read_data (statements, code, 0); read_data1(defs); read_data1(fields); read_data1(functions); read_data1(strings); read_data2(globals, 2); /* reserve more in case a RETURN using with the global at "the end" exists */ fclose(file); /* profile counters */ memset(vec_add(prog->profile, vec_size(prog->code)), 0, sizeof(prog->profile[0]) * vec_size(prog->code)); /* Add tempstring area */ prog->tempstring_start = vec_size(prog->strings); prog->tempstring_at = vec_size(prog->strings); memset(vec_add(prog->strings, 16*1024), 0, 16*1024); /* spawn the world entity */ vec_push(prog->entitypool, true); memset(vec_add(prog->entitydata, prog->entityfields), 0, prog->entityfields * sizeof(prog->entitydata[0])); prog->entities = 1; return prog; error: if (prog->filename) mem_d(prog->filename); vec_free(prog->code); vec_free(prog->defs); vec_free(prog->fields); vec_free(prog->functions); vec_free(prog->strings); vec_free(prog->globals); vec_free(prog->entitydata); vec_free(prog->entitypool); mem_d(prog); return NULL; }
static void _doc_write_html_file(doc_ptr doc, FILE *fp) { doc_pos_t pos; doc_char_ptr cell; byte old_a = _INVALID_COLOR; int bookmark_idx = 0; doc_bookmark_ptr next_bookmark = NULL; vec_ptr links = doc_get_links(doc); int link_idx = 0; doc_link_ptr next_link = NULL; if (bookmark_idx < vec_length(doc->bookmarks)) next_bookmark = vec_get(doc->bookmarks, bookmark_idx); if (link_idx < vec_length(links)) next_link = vec_get(links, link_idx); fprintf(fp, "<!DOCTYPE html>\n<html>\n"); if (string_length(doc->html_header)) fprintf(fp, "%s\n", string_buffer(doc->html_header)); fprintf(fp, "<body text=\"#ffffff\" bgcolor=\"#000000\"><pre>\n"); for (pos.y = 0; pos.y <= doc->cursor.y; pos.y++) { int cx = doc->width; pos.x = 0; if (pos.y == doc->cursor.y) cx = doc->cursor.x; cell = doc_char(doc, pos); if (next_bookmark && pos.y == next_bookmark->pos.y) { fprintf(fp, "<a name=\"%s\"></a>", string_buffer(next_bookmark->name)); bookmark_idx++; if (bookmark_idx < vec_length(doc->bookmarks)) next_bookmark = vec_get(doc->bookmarks, bookmark_idx); else next_bookmark = NULL; } for (; pos.x < cx; pos.x++) { char c = cell->c; byte a = cell->a & 0x0F; if (next_link) { if (doc_pos_compare(next_link->location.start, pos) == 0) { string_ptr s; int pos = string_last_chr(next_link->file, '.'); if (pos >= 0) { s = string_copy_sn(string_buffer(next_link->file), pos + 1); string_append_s(s, "html"); } else s = string_copy(next_link->file); fprintf(fp, "<a href=\"%s", string_buffer(s)); if (next_link->topic) fprintf(fp, "#%s", string_buffer(next_link->topic)); fprintf(fp, "\">"); string_free(s); } if (doc_pos_compare(next_link->location.stop, pos) == 0) { fprintf(fp, "</a>"); link_idx++; if (link_idx < vec_length(links)) next_link = vec_get(links, link_idx); else next_link = NULL; } } if (!c) break; if (a != old_a && c != ' ') { if (old_a != _INVALID_COLOR) fprintf(fp, "</font>"); fprintf(fp, "<font color=\"#%02x%02x%02x\">", angband_color_table[a][1], angband_color_table[a][2], angband_color_table[a][3] ); old_a = a; } switch (c) { case '&': fprintf(fp, "&"); break; case '<': fprintf(fp, "<"); break; case '>': fprintf(fp, ">"); break; default: fprintf(fp, "%c", c); break; } cell++; } fputc('\n', fp); } fprintf(fp, "</font>"); fprintf(fp, "</pre></body></html>\n"); vec_free(links); }
void scrape_and_clear_counters (perfmon_main_t * pm) { int i, j, k; vlib_main_t *vm = pm->vlib_main; vlib_main_t *stat_vm; vlib_node_main_t *nm; vlib_node_t ***node_dups = 0; vlib_node_t **nodes; vlib_node_t *n; perfmon_capture_t *c; perfmon_event_config_t *current_event; uword *p; u8 *counter_name; u64 vectors_this_counter; /* snapshoot the nodes, including pm counters */ vlib_worker_thread_barrier_sync (vm); for (j = 0; j < vec_len (vlib_mains); j++) { stat_vm = vlib_mains[j]; if (stat_vm == 0) continue; nm = &stat_vm->node_main; for (i = 0; i < vec_len (nm->nodes); i++) { n = nm->nodes[i]; vlib_node_sync_stats (stat_vm, n); } nodes = 0; vec_validate (nodes, vec_len (nm->nodes) - 1); vec_add1 (node_dups, nodes); /* Snapshoot and clear the per-node perfmon counters */ for (i = 0; i < vec_len (nm->nodes); i++) { n = nm->nodes[i]; nodes[i] = clib_mem_alloc (sizeof (*n)); clib_memcpy_fast (nodes[i], n, sizeof (*n)); n->stats_total.perf_counter0_ticks = 0; n->stats_total.perf_counter1_ticks = 0; n->stats_total.perf_counter_vectors = 0; n->stats_last_clear.perf_counter0_ticks = 0; n->stats_last_clear.perf_counter1_ticks = 0; n->stats_last_clear.perf_counter_vectors = 0; } } vlib_worker_thread_barrier_release (vm); for (j = 0; j < vec_len (vlib_mains); j++) { stat_vm = vlib_mains[j]; if (stat_vm == 0) continue; nodes = node_dups[j]; for (i = 0; i < vec_len (nodes); i++) { u8 *capture_name; n = nodes[i]; if (n->stats_total.perf_counter0_ticks == 0 && n->stats_total.perf_counter1_ticks == 0) goto skip_this_node; for (k = 0; k < 2; k++) { u64 counter_value, counter_last_clear; /* * We collect 2 counters at once, except for the * last counter when the user asks for an odd number of * counters */ if ((pm->current_event + k) >= vec_len (pm->single_events_to_collect)) break; if (k == 0) { counter_value = n->stats_total.perf_counter0_ticks; counter_last_clear = n->stats_last_clear.perf_counter0_ticks; } else { counter_value = n->stats_total.perf_counter1_ticks; counter_last_clear = n->stats_last_clear.perf_counter1_ticks; } capture_name = format (0, "t%d-%v%c", j, n->name, 0); p = hash_get_mem (pm->capture_by_thread_and_node_name, capture_name); if (p == 0) { pool_get (pm->capture_pool, c); memset (c, 0, sizeof (*c)); c->thread_and_node_name = capture_name; hash_set_mem (pm->capture_by_thread_and_node_name, capture_name, c - pm->capture_pool); } else { c = pool_elt_at_index (pm->capture_pool, p[0]); vec_free (capture_name); } /* Snapshoot counters, etc. into the capture */ current_event = pm->single_events_to_collect + pm->current_event + k; counter_name = (u8 *) current_event->name; vectors_this_counter = n->stats_total.perf_counter_vectors - n->stats_last_clear.perf_counter_vectors; vec_add1 (c->counter_names, counter_name); vec_add1 (c->counter_values, counter_value - counter_last_clear); vec_add1 (c->vectors_this_counter, vectors_this_counter); } skip_this_node: clib_mem_free (n); } vec_free (nodes); } vec_free (node_dups); }
int main (int argc, char * argv[]) { word i, j, k, n, check_mask; u32 seed; u32 * h = 0; uword * objects = 0; uword * handles = 0; uword objects_used; uword align, fixed_size; n = 10; seed = (u32) getpid (); check_mask = 0; fixed_size = 0; if (argc > 1) { n = atoi (argv[1]); verbose = 1; } if (argc > 2) { word i = atoi (argv[2]); if (i) seed = i; } if (argc > 3) check_mask = atoi (argv[3]); align = 0; if (argc > 4) align = 1 << atoi (argv[4]); if_verbose ("testing %wd iterations seed %wd\n", n, seed); if (verbose) fformat (stderr, "%U\n", format_clib_mem_usage, /* verbose */ 0); vec_resize (objects, 1000); if (vec_bytes(objects)) /* stupid warning be gone */ memset (objects, ~0, vec_bytes (objects)); vec_resize (handles, vec_len (objects)); objects_used = 0; if (fixed_size) { uword max_len = 1024 * 1024; void * memory = clib_mem_alloc (max_len * sizeof (h[0])); h = heap_create_from_memory (memory, max_len, sizeof (h[0])); } for (i = 0; i < n; i++) { while (1) { j = random_u32 (&seed) % vec_len (objects); if (objects[j] != ~0 || i + objects_used < n) break; } if (objects[j] != ~0) { heap_dealloc (h, handles[j]); objects_used--; objects[j] = ~0; } else { u32 * data; uword size; size = 1 + (random_u32 (&seed) % 100); objects[j] = heap_alloc_aligned (h, size, align, handles[j]); objects_used++; if (align) ASSERT (0 == (objects[j] & (align - 1))); ASSERT (objects[j] < vec_len (h)); ASSERT (size <= heap_len (h, handles[j])); /* Set newly allocated object with test data. */ if (check_mask & 2) { data = h + objects[j]; for (k = 0; k < size; k++) data[k] = objects[j] + k; } } if (check_mask & 1) heap_validate (h); if (check_mask & 4) { /* Duplicate heap at each iteration. */ u32 * h1 = heap_dup (h); heap_free (h); h = h1; } /* Verify that all used objects have correct test data. */ if (check_mask & 2) { for (j = 0; j < vec_len (objects); j++) if (objects[j] != ~0) { u32 * data = h + objects[j]; for (k = 0; k < heap_len (h, handles[j]); k++) ASSERT(data[k] == objects[j] + k); } } } if (verbose) fformat (stderr, "%U\n", format_heap, h, 1); { u32 * h1 = heap_dup (h); if (verbose) fformat (stderr, "%U\n", format_heap, h1, 1); heap_free (h1); } heap_free (h); if (verbose) fformat (stderr, "%U\n", format_heap, h, 1); ASSERT (objects_used == 0); vec_free (objects); vec_free (handles); if (fixed_size) vec_free_h (h, sizeof (heap_header_t)); if (verbose) fformat (stderr, "%U\n", format_clib_mem_usage, /* verbose */ 0); return 0; }
void correct_free(correction_t *c) { vec_free(c->edits); vec_free(c->lens); correct_pool_delete(); }
void fini_ssanames (void) { vec_free (SSANAMES (cfun)); vec_free (FREE_SSANAMES (cfun)); }
clib_error_t * vlib_call_all_config_functions (vlib_main_t * vm, unformat_input_t * input, int is_early) { clib_error_t *error = 0; vlib_config_function_runtime_t *c, **all; uword *hash = 0, *p; uword i; hash = hash_create_string (0, sizeof (uword)); all = 0; c = vm->config_function_registrations; while (c) { hash_set_mem (hash, c->name, vec_len (all)); vec_add1 (all, c); unformat_init (&c->input, 0, 0); c = c->next_registration; } while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) { u8 *s, *v; if (!unformat (input, "%s %v", &s, &v) || !(p = hash_get_mem (hash, s))) { error = clib_error_create ("unknown input `%s %v'", s, v); goto done; } c = all[p[0]]; if (vec_len (c->input.buffer) > 0) vec_add1 (c->input.buffer, ' '); vec_add (c->input.buffer, v, vec_len (v)); vec_free (v); vec_free (s); } for (i = 0; i < vec_len (all); i++) { c = all[i]; /* Is this an early config? Are we doing early configs? */ if (is_early ^ c->is_early) continue; /* Already called? */ if (hash_get (vm->init_functions_called, c->function)) continue; hash_set1 (vm->init_functions_called, c->function); error = c->function (vm, &c->input); if (error) goto done; } done: for (i = 0; i < vec_len (all); i++) { c = all[i]; unformat_free (&c->input); } vec_free (all); hash_free (hash); return error; }
static void free_vec_pool(struct vec *v) { vec_free(v, NULL); }
static int dpdk_flow_add (dpdk_device_t * xd, vnet_flow_t * f, dpdk_flow_entry_t * fe) { struct rte_flow_item_ipv4 ip4[2] = { }; struct rte_flow_item_ipv6 ip6[2] = { }; struct rte_flow_item_udp udp[2] = { }; struct rte_flow_item_tcp tcp[2] = { }; struct rte_flow_action_mark mark = { 0 }; struct rte_flow_item *item, *items = 0; struct rte_flow_action *action, *actions = 0; enum { vxlan_hdr_sz = sizeof (vxlan_header_t), raw_sz = sizeof (struct rte_flow_item_raw) }; union { struct rte_flow_item_raw item; u8 val[raw_sz + vxlan_hdr_sz]; } raw[2]; u16 src_port, dst_port, src_port_mask, dst_port_mask; u8 protocol; int rv = 0; if (f->actions & (~xd->supported_flow_actions)) return VNET_FLOW_ERROR_NOT_SUPPORTED; /* Match items */ /* Ethernet */ vec_add2 (items, item, 1); item->type = RTE_FLOW_ITEM_TYPE_ETH; item->spec = any_eth; item->mask = any_eth + 1; /* VLAN */ if (f->type != VNET_FLOW_TYPE_IP4_VXLAN) { vec_add2 (items, item, 1); item->type = RTE_FLOW_ITEM_TYPE_VLAN; item->spec = any_vlan; item->mask = any_vlan + 1; } /* IP */ vec_add2 (items, item, 1); if (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE) { vnet_flow_ip6_n_tuple_t *t6 = &f->ip6_n_tuple; clib_memcpy_fast (ip6[0].hdr.src_addr, &t6->src_addr.addr, 16); clib_memcpy_fast (ip6[1].hdr.src_addr, &t6->src_addr.mask, 16); clib_memcpy_fast (ip6[0].hdr.dst_addr, &t6->dst_addr.addr, 16); clib_memcpy_fast (ip6[1].hdr.dst_addr, &t6->dst_addr.mask, 16); item->type = RTE_FLOW_ITEM_TYPE_IPV6; item->spec = ip6; item->mask = ip6 + 1; src_port = t6->src_port.port; dst_port = t6->dst_port.port; src_port_mask = t6->src_port.mask; dst_port_mask = t6->dst_port.mask; protocol = t6->protocol; } else if (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) { vnet_flow_ip4_n_tuple_t *t4 = &f->ip4_n_tuple; ip4[0].hdr.src_addr = t4->src_addr.addr.as_u32; ip4[1].hdr.src_addr = t4->src_addr.mask.as_u32; ip4[0].hdr.dst_addr = t4->dst_addr.addr.as_u32; ip4[1].hdr.dst_addr = t4->dst_addr.mask.as_u32; item->type = RTE_FLOW_ITEM_TYPE_IPV4; item->spec = ip4; item->mask = ip4 + 1; src_port = t4->src_port.port; dst_port = t4->dst_port.port; src_port_mask = t4->src_port.mask; dst_port_mask = t4->dst_port.mask; protocol = t4->protocol; } else if (f->type == VNET_FLOW_TYPE_IP4_VXLAN) { vnet_flow_ip4_vxlan_t *v4 = &f->ip4_vxlan; ip4[0].hdr.src_addr = v4->src_addr.as_u32; ip4[1].hdr.src_addr = -1; ip4[0].hdr.dst_addr = v4->dst_addr.as_u32; ip4[1].hdr.dst_addr = -1; item->type = RTE_FLOW_ITEM_TYPE_IPV4; item->spec = ip4; item->mask = ip4 + 1; dst_port = v4->dst_port; dst_port_mask = -1; src_port = 0; src_port_mask = 0; protocol = IP_PROTOCOL_UDP; } else { rv = VNET_FLOW_ERROR_NOT_SUPPORTED; goto done; } /* Layer 4 */ vec_add2 (items, item, 1); if (protocol == IP_PROTOCOL_UDP) { udp[0].hdr.src_port = clib_host_to_net_u16 (src_port); udp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask); udp[0].hdr.dst_port = clib_host_to_net_u16 (dst_port); udp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask); item->type = RTE_FLOW_ITEM_TYPE_UDP; item->spec = udp; item->mask = udp + 1; } else if (protocol == IP_PROTOCOL_TCP) { tcp[0].hdr.src_port = clib_host_to_net_u16 (src_port); tcp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask); tcp[0].hdr.dst_port = clib_host_to_net_u16 (dst_port); tcp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask); item->type = RTE_FLOW_ITEM_TYPE_TCP; item->spec = tcp; item->mask = tcp + 1; } else { rv = VNET_FLOW_ERROR_NOT_SUPPORTED; goto done; } /* Tunnel header match */ if (f->type == VNET_FLOW_TYPE_IP4_VXLAN) { u32 vni = f->ip4_vxlan.vni; vxlan_header_t spec_hdr = { .flags = VXLAN_FLAGS_I, .vni_reserved = clib_host_to_net_u32 (vni << 8) }; vxlan_header_t mask_hdr = { .flags = 0xff, .vni_reserved = clib_host_to_net_u32 (((u32) - 1) << 8) }; clib_memset (raw, 0, sizeof raw); raw[0].item.relative = 1; raw[0].item.length = vxlan_hdr_sz; clib_memcpy_fast (raw[0].val + raw_sz, &spec_hdr, vxlan_hdr_sz); raw[0].item.pattern = raw[0].val + raw_sz; clib_memcpy_fast (raw[1].val + raw_sz, &mask_hdr, vxlan_hdr_sz); raw[1].item.pattern = raw[1].val + raw_sz; vec_add2 (items, item, 1); item->type = RTE_FLOW_ITEM_TYPE_RAW; item->spec = raw; item->mask = raw + 1; } vec_add2 (items, item, 1); item->type = RTE_FLOW_ITEM_TYPE_END; /* Actions */ vec_add2 (actions, action, 1); action->type = RTE_FLOW_ACTION_TYPE_PASSTHRU; vec_add2 (actions, action, 1); mark.id = fe->mark; action->type = RTE_FLOW_ACTION_TYPE_MARK; action->conf = &mark; vec_add2 (actions, action, 1); action->type = RTE_FLOW_ACTION_TYPE_END; fe->handle = rte_flow_create (xd->device_index, &ingress, items, actions, &xd->last_flow_error); if (!fe->handle) rv = VNET_FLOW_ERROR_NOT_SUPPORTED; done: vec_free (items); vec_free (actions); return rv; } int dpdk_flow_ops_fn (vnet_main_t * vnm, vnet_flow_dev_op_t op, u32 dev_instance, u32 flow_index, uword * private_data) { dpdk_main_t *dm = &dpdk_main; vnet_flow_t *flow = vnet_get_flow (flow_index); dpdk_device_t *xd = vec_elt_at_index (dm->devices, dev_instance); dpdk_flow_entry_t *fe; dpdk_flow_lookup_entry_t *fle = 0; int rv; /* recycle old flow lookup entries only after the main loop counter increases - i.e. previously DMA'ed packets were handled */ if (vec_len (xd->parked_lookup_indexes) > 0 && xd->parked_loop_count != dm->vlib_main->main_loop_count) { u32 *fl_index; vec_foreach (fl_index, xd->parked_lookup_indexes) pool_put_index (xd->flow_lookup_entries, *fl_index); vec_reset_length (xd->flow_lookup_entries); } if (op == VNET_FLOW_DEV_OP_DEL_FLOW) { ASSERT (*private_data >= vec_len (xd->flow_entries)); fe = vec_elt_at_index (xd->flow_entries, *private_data); if ((rv = rte_flow_destroy (xd->device_index, fe->handle, &xd->last_flow_error))) return VNET_FLOW_ERROR_INTERNAL; if (fe->mark) { /* make sure no action is taken for in-flight (marked) packets */ fle = pool_elt_at_index (xd->flow_lookup_entries, fe->mark); clib_memset (fle, -1, sizeof (*fle)); vec_add1 (xd->parked_lookup_indexes, fe->mark); xd->parked_loop_count = dm->vlib_main->main_loop_count; } clib_memset (fe, 0, sizeof (*fe)); pool_put (xd->flow_entries, fe); goto disable_rx_offload; } if (op != VNET_FLOW_DEV_OP_ADD_FLOW) return VNET_FLOW_ERROR_NOT_SUPPORTED; pool_get (xd->flow_entries, fe); fe->flow_index = flow->index; if (flow->actions == 0) { rv = VNET_FLOW_ERROR_NOT_SUPPORTED; goto done; } /* if we need to mark packets, assign one mark */ if (flow->actions & (VNET_FLOW_ACTION_MARK | VNET_FLOW_ACTION_REDIRECT_TO_NODE | VNET_FLOW_ACTION_BUFFER_ADVANCE)) { /* reserve slot 0 */ if (xd->flow_lookup_entries == 0) pool_get_aligned (xd->flow_lookup_entries, fle, CLIB_CACHE_LINE_BYTES); pool_get_aligned (xd->flow_lookup_entries, fle, CLIB_CACHE_LINE_BYTES); fe->mark = fle - xd->flow_lookup_entries; /* install entry in the lookup table */ clib_memset (fle, -1, sizeof (*fle)); if (flow->actions & VNET_FLOW_ACTION_MARK) fle->flow_id = flow->mark_flow_id; if (flow->actions & VNET_FLOW_ACTION_REDIRECT_TO_NODE) fle->next_index = flow->redirect_device_input_next_index; if (flow->actions & VNET_FLOW_ACTION_BUFFER_ADVANCE) fle->buffer_advance = flow->buffer_advance; } else fe->mark = 0; if ((xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) == 0) { xd->flags |= DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD; dpdk_device_setup (xd); } switch (flow->type) { case VNET_FLOW_TYPE_IP4_N_TUPLE: case VNET_FLOW_TYPE_IP6_N_TUPLE: case VNET_FLOW_TYPE_IP4_VXLAN: if ((rv = dpdk_flow_add (xd, flow, fe))) goto done; break; default: rv = VNET_FLOW_ERROR_NOT_SUPPORTED; goto done; } *private_data = fe - xd->flow_entries; done: if (rv) { clib_memset (fe, 0, sizeof (*fe)); pool_put (xd->flow_entries, fe); if (fle) { clib_memset (fle, -1, sizeof (*fle)); pool_put (xd->flow_lookup_entries, fle); } } disable_rx_offload: if ((xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) != 0 && pool_elts (xd->flow_entries) == 0) { xd->flags &= ~DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD; dpdk_device_setup (xd); } return rv; }
int main(int argc, char **argv) { bool extract = true; char *redirout = (char*)stdout; char *redirerr = (char*)stderr; char *file = NULL; char **files = NULL; pak_file_t *pak = NULL; size_t iter = 0; con_init(); /* * Command line option parsing commences now We only need to support * a few things in the test suite. */ while (argc > 1) { ++argv; --argc; if (argv[0][0] == '-') { if (parsecmd("redirout", &argc, &argv, &redirout, 1, false)) continue; if (parsecmd("redirerr", &argc, &argv, &redirerr, 1, false)) continue; if (parsecmd("file", &argc, &argv, &file, 1, false)) continue; con_change(redirout, redirerr); switch (argv[0][1]) { case 'e': extract = true; continue; case 'c': extract = false; continue; } if (!strcmp(argv[0]+1, "debug")) { OPTS_OPTION_BOOL(OPTION_DEBUG) = true; continue; } if (!strcmp(argv[0]+1, "memchk")) { OPTS_OPTION_BOOL(OPTION_MEMCHK) = true; continue; } if (!strcmp(argv[0]+1, "nocolor")) { con_color(0); continue; } } vec_push(files, argv[0]); } con_change(redirout, redirerr); if (!file) { con_err("-file must be specified for output/input PAK file\n"); vec_free(files); return EXIT_FAILURE; } if (extract) { if (!(pak = pak_open(file, "r"))) { con_err("failed to open PAK file %s\n", file); vec_free(files); return EXIT_FAILURE; } if (!pak_extract_all(pak, "./")) { con_err("failed to extract PAK %s (files may be missing)\n", file); pak_close(pak); vec_free(files); return EXIT_FAILURE; } /* not possible */ pak_close(pak); vec_free(files); stat_info(); return EXIT_SUCCESS; } if (!(pak = pak_open(file, "w"))) { con_err("failed to open PAK %s for writing\n", file); vec_free(files); return EXIT_FAILURE; } for (iter = 0; iter < vec_size(files); iter++) { if (!(pak_insert_one(pak, files[iter]))) { con_err("failed inserting %s for PAK %s\n", files[iter], file); pak_close(pak); vec_free(files); return EXIT_FAILURE; } } /* not possible */ pak_close(pak); vec_free(files); stat_info(); return EXIT_SUCCESS; }
static clib_error_t * lisp_gpe_add_del_fwd_entry_command_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { unformat_input_t _line_input, * line_input = &_line_input; u8 is_add = 1; ip_address_t lloc, rloc, *llocs = 0, *rlocs = 0; clib_error_t * error = 0; gid_address_t _reid, * reid = &_reid, _leid, * leid = &_leid; u8 reid_set = 0, leid_set = 0, is_negative = 0, vrf_set = 0, vni_set = 0; u32 vni, vrf, action = ~0; int rv; /* Get a line of input. */ if (! unformat_user (input, unformat_line_input, line_input)) return 0; while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) { if (unformat (line_input, "del")) is_add = 0; else if (unformat (line_input, "add")) is_add = 1; else if (unformat (line_input, "leid %U", unformat_gid_address, leid)) { leid_set = 1; } else if (unformat (line_input, "reid %U", unformat_gid_address, reid)) { reid_set = 1; } else if (unformat (line_input, "vni %u", &vni)) { gid_address_vni (leid) = vni; gid_address_vni (reid) = vni; vni_set = 1; } else if (unformat (line_input, "vrf %u", &vrf)) { vrf_set = 1; } else if (unformat (line_input, "negative action %U", unformat_negative_mapping_action, &action)) { is_negative = 1; } else if (unformat (line_input, "lloc %U rloc %U", unformat_ip_address, &lloc, unformat_ip_address, &rloc)) { /* TODO: support p and w */ vec_add1 (llocs, lloc); vec_add1 (rlocs, rloc); } else { error = unformat_parse_error (line_input); goto done; } } unformat_free (line_input); if (!vni_set || !vrf_set) { error = clib_error_return(0, "vni and vrf must be set!"); goto done; } if (!reid_set) { error = clib_error_return(0, "remote eid must be set!"); goto done; } if (is_negative) { if (~0 == action) { error = clib_error_return(0, "no action set for negative tunnel!"); goto done; } } else { if (vec_len (llocs) == 0) { error = clib_error_return (0, "expected ip4/ip6 locators."); goto done; } if (vec_len (llocs) != 1) { error = clib_error_return (0, "multihoming not supported for now!"); goto done; } } if (!leid_set) { /* if leid not set, make sure it's the same AFI like reid */ gid_address_type(leid) = gid_address_type(reid); if (GID_ADDR_IP_PREFIX == gid_address_type (reid)) gid_address_ip_version(leid) = gid_address_ip_version(reid); } /* add fwd entry */ vnet_lisp_gpe_add_del_fwd_entry_args_t _a, * a = &_a; memset (a, 0, sizeof(a[0])); a->is_add = is_add; a->vni = vni; a->table_id = vrf; gid_address_copy(&a->seid, leid); gid_address_copy(&a->deid, reid); if (!is_negative) { a->slocator = llocs[0]; a->dlocator = rlocs[0]; } rv = vnet_lisp_gpe_add_del_fwd_entry (a, 0); if (0 != rv) { error = clib_error_return(0, "failed to %s gpe tunnel!", is_add ? "add" : "delete"); } done: vec_free(llocs); vec_free(rlocs); return error; }
void camera_free(camera* c){ vec_free(c->loc); free(c); }
static u32 add_del_ip_tunnel (vnet_lisp_gpe_add_del_fwd_entry_args_t *a, u32 * tun_index_res) { lisp_gpe_main_t * lgm = &lisp_gpe_main; lisp_gpe_tunnel_t *t = 0; uword * p; int rv; lisp_gpe_tunnel_key_t key; /* prepare tunnel key */ memset(&key, 0, sizeof(key)); ip_prefix_copy(&key.eid, &gid_address_ippref(&a->deid)); ip_address_copy(&key.dst_loc, &a->dlocator); key.iid = clib_host_to_net_u32 (a->vni); p = mhash_get (&lgm->lisp_gpe_tunnel_by_key, &key); if (a->is_add) { /* adding a tunnel: tunnel must not already exist */ if (p) return VNET_API_ERROR_INVALID_VALUE; if (a->decap_next_index >= LISP_GPE_INPUT_N_NEXT) return VNET_API_ERROR_INVALID_DECAP_NEXT; pool_get_aligned (lgm->tunnels, t, CLIB_CACHE_LINE_BYTES); memset (t, 0, sizeof (*t)); /* copy from arg structure */ #define _(x) t->x = a->x; foreach_copy_field; #undef _ ip_address_copy(&t->src, &a->slocator); ip_address_copy(&t->dst, &a->dlocator); /* if vni is non-default */ if (a->vni) { t->flags = LISP_GPE_FLAGS_I; t->vni = a->vni; } t->flags |= LISP_GPE_FLAGS_P; t->next_protocol = ip_prefix_version(&key.eid) == IP4 ? LISP_GPE_NEXT_PROTO_IP4 : LISP_GPE_NEXT_PROTO_IP6; rv = lisp_gpe_rewrite (t); if (rv) { pool_put(lgm->tunnels, t); return rv; } mhash_set(&lgm->lisp_gpe_tunnel_by_key, &key, t - lgm->tunnels, 0); /* return tunnel index */ if (tun_index_res) tun_index_res[0] = t - lgm->tunnels; } else { /* deleting a tunnel: tunnel must exist */ if (!p) { clib_warning("Tunnel for eid %U doesn't exist!", format_gid_address, &a->deid); return VNET_API_ERROR_NO_SUCH_ENTRY; } t = pool_elt_at_index(lgm->tunnels, p[0]); mhash_unset(&lgm->lisp_gpe_tunnel_by_key, &key, 0); vec_free(t->rewrite); pool_put(lgm->tunnels, t); } return 0; }
/*destructor for camera */ void camera_free(camera *c) { vec_free(c->loc); free(c); return; }
int ssvm_master_init_shm (ssvm_private_t * ssvm) { int ssvm_fd; #if USE_DLMALLOC == 0 int mh_flags = MHEAP_FLAG_DISABLE_VM | MHEAP_FLAG_THREAD_SAFE; #endif clib_mem_vm_map_t mapa = { 0 }; u8 junk = 0, *ssvm_filename; ssvm_shared_header_t *sh; uword page_size, requested_va = 0; void *oldheap; if (ssvm->ssvm_size == 0) return SSVM_API_ERROR_NO_SIZE; if (CLIB_DEBUG > 1) clib_warning ("[%d] creating segment '%s'", getpid (), ssvm->name); ASSERT (vec_c_string_is_terminated (ssvm->name)); ssvm_filename = format (0, "/dev/shm/%s%c", ssvm->name, 0); unlink ((char *) ssvm_filename); vec_free (ssvm_filename); ssvm_fd = shm_open ((char *) ssvm->name, O_RDWR | O_CREAT | O_EXCL, 0777); if (ssvm_fd < 0) { clib_unix_warning ("create segment '%s'", ssvm->name); return SSVM_API_ERROR_CREATE_FAILURE; } if (fchmod (ssvm_fd, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP) < 0) clib_unix_warning ("ssvm segment chmod"); if (svm_get_root_rp ()) { /* TODO: is this really needed? */ svm_main_region_t *smr = svm_get_root_rp ()->data_base; if (fchown (ssvm_fd, smr->uid, smr->gid) < 0) clib_unix_warning ("ssvm segment chown"); } if (lseek (ssvm_fd, ssvm->ssvm_size, SEEK_SET) < 0) { clib_unix_warning ("lseek"); close (ssvm_fd); return SSVM_API_ERROR_SET_SIZE; } if (write (ssvm_fd, &junk, 1) != 1) { clib_unix_warning ("set ssvm size"); close (ssvm_fd); return SSVM_API_ERROR_SET_SIZE; } page_size = clib_mem_get_fd_page_size (ssvm_fd); if (ssvm->requested_va) { requested_va = ssvm->requested_va; clib_mem_vm_randomize_va (&requested_va, min_log2 (page_size)); } mapa.requested_va = requested_va; mapa.size = ssvm->ssvm_size; mapa.fd = ssvm_fd; if (clib_mem_vm_ext_map (&mapa)) { clib_unix_warning ("mmap"); close (ssvm_fd); return SSVM_API_ERROR_MMAP; } close (ssvm_fd); sh = mapa.addr; sh->master_pid = ssvm->my_pid; sh->ssvm_size = ssvm->ssvm_size; sh->ssvm_va = pointer_to_uword (sh); sh->type = SSVM_SEGMENT_SHM; #if USE_DLMALLOC == 0 sh->heap = mheap_alloc_with_flags (((u8 *) sh) + page_size, ssvm->ssvm_size - page_size, mh_flags); #else sh->heap = create_mspace_with_base (((u8 *) sh) + page_size, ssvm->ssvm_size - page_size, 1 /* locked */ ); mspace_disable_expand (sh->heap); #endif oldheap = ssvm_push_heap (sh); sh->name = format (0, "%s", ssvm->name, 0); ssvm_pop_heap (oldheap); ssvm->sh = sh; ssvm->my_pid = getpid (); ssvm->i_am_master = 1; /* The application has to set set sh->ready... */ return 0; }