/** * Recursively apply matching function on each node, in depth-first order, * until it returns TRUE or the maximum depth is reached, at which time we * return the matching node. * * A depth of 0 limits searching to the root node, 1 to the root node plus * its immediate children, etc... * * @param tree the tree descriptor * @param maxdepth maximum search depth * @param match the item matching function * @param data user-defined argument passed to the matching callback * * @return the first matching node in the traversal path, NULL if none matched. */ void * etree_find_depth(const etree_t *tree, unsigned maxdepth, match_fn_t match, void *data) { etree_check(tree); g_assert(uint_is_non_negative(maxdepth) || ETREE_MAX_DEPTH == maxdepth); g_assert(match != NULL); return etree_find_depth_internal(tree, tree->root, 0, maxdepth, match, data); }
/** * Same as xnode_tree_find() but limit search to specified depth: 0 means * the root node only, 1 corresponds to the immediate children of the root, * and so on. * * @return the first matching node in the traversal path, NULL if none matched. */ xnode_t * xnode_tree_find_depth(xnode_t *root, unsigned depth, match_fn_t func, void *data) { etree_t t; xnode_check(root); g_assert(uint_is_non_negative(depth)); etree_init_root(&t, root, TRUE, offsetof(xnode_t, node)); return etree_find_depth(&t, depth, func, data); }
/** * Maps a service type to a string. */ const char * upnp_service_type_to_string(enum upnp_service_type type) { g_assert(uint_is_non_negative(type) && type < UPNP_SCV_MAX); switch (type) { case UPNP_SVC_UNKNOWN: return "Unknown"; case UPNP_SVC_WAN_CIF: return "WANCommonInterfaceConfig"; case UPNP_SVC_WAN_IP: return "WANIPConnection"; case UPNP_SVC_WAN_PPP: return "WANPPPConnection"; case UPNP_SCV_MAX: g_assert_not_reached(); } return NULL; }
/** * Recursively traverse tree, in depth-first mode. * * Traversal can be pruned with an optional "enter" callback, and/or by depth. * * The "action" callback can be invoked before or after processing children. * It can be triggered on non-leaf nodes, on leaves only, or on both. * It is always allowed to free-up the node. * * The function returns the number of visited nodes, regardless of whether the * action was run on them. This allows to know how many nodes were selected by * the "enter" callback and see the effect of depth-pruning. * * @param tree the tree descriptor * @param flags nodes to visit + when to invoke action callback * @param maxdepth 0 = root node only, 1 = root + its children, etc... * @param enter (optional) callback when we enter a node * @param action (optional) action on the node * @param data user-defined argument passed to callbacks * * @return amount of nodes visited, regardless of whether action was run. */ size_t etree_traverse(const etree_t *tree, unsigned flags, unsigned maxdepth, match_fn_t enter, data_fn_t action, void *data) { etree_check(tree); g_assert(uint_is_non_negative(maxdepth) || ETREE_MAX_DEPTH == maxdepth); g_assert(NULL == action || (flags & ETREE_CALL_BEFORE) ^ (flags & ETREE_CALL_AFTER)); g_assert(NULL != action || 0 == (flags & (ETREE_CALL_BEFORE | ETREE_CALL_AFTER))); g_assert(0 != (flags & ETREE_TRAVERSE_ALL)); if G_UNLIKELY(NULL == tree->root) return 0; return etree_traverse_internal(tree, tree->root, flags, 0, maxdepth, enter, action, data); }
/** * Check which of qsort(), xqsort(), xsort() or smsort() is best for sorting * aligned arrays with a native item size of OPSIZ. At identical performance * level, we prefer our own sorting algorithms instead of libc's qsort() for * memory allocation purposes. * * @param items amount of items to use in the sorted array * @param idx index of the virtual routine to update * @param verbose whether to be verbose * @param which either "large" or "small", for logging */ static void vsort_init_items(size_t items, unsigned idx, int verbose, const char *which) { struct vsort_testing tests[] = { { vsort_qsort, qsort, 0.0, 0, "qsort" }, { vsort_xqsort, xqsort, 0.0, 2, "xqsort" }, { vsort_xsort, xsort, 0.0, 1, "xsort" }, { vsort_tqsort, tqsort, 0.0, 1, "tqsort" }, { vsort_smsort, smsort, 0.0, 1, "smsort" }, /* Only for almost sorted */ }; size_t len = items * OPSIZ; struct vsort_timing vt; size_t loops, highest_loops; unsigned i; g_assert(uint_is_non_negative(idx)); g_assert(idx < N_ITEMS(vsort_table)); vt.data = vmm_alloc(len); vt.copy = vmm_alloc(len); vt.items = items; vt.isize = OPSIZ; vt.len = len; random_bytes(vt.data, len); highest_loops = loops = vsort_loops(items); /* The -1 below is to avoid benchmarking smsort() for the general case */ retry_random: for (i = 0; i < N_ITEMS(tests) - 1; i++) { tests[i].v_elapsed = vsort_timeit(tests[i].v_timer, &vt, &loops); if (verbose > 1) { s_debug("%s() took %.4f secs for %s array (%zu loops)", tests[i].v_name, tests[i].v_elapsed * loops, which, loops); } if (loops != highest_loops) { highest_loops = loops; /* Redo all the tests if the number of timing loops changes */ if (i != 0) goto retry_random; } } /* * When dealing with a large amount of items, redo the tests twice with * another set of random bytes to make sure we're not hitting a special * ordering case. */ if (items >= VSORT_ITEMS) { unsigned j; for (j = 0; j < 2; j++) { random_bytes(vt.data, len); for (i = 0; i < N_ITEMS(tests) - 1; i++) { tests[i].v_elapsed += vsort_timeit(tests[i].v_timer, &vt, &loops); if (verbose > 1) { s_debug("%s() spent %.6f secs total for %s array", tests[i].v_name, tests[i].v_elapsed, which); } if (loops != highest_loops) { highest_loops = loops; /* Redo all the tests if the number of loops changes */ s_info("%s(): restarting %s array tests with %zu loops", G_STRFUNC, which, loops); goto retry_random; } } } } xqsort(tests, N_ITEMS(tests) - 1, sizeof tests[0], vsort_testing_cmp); vsort_table[idx].v_sort = vsort_routine(tests[0].v_routine, items); if (verbose) { s_info("vsort() will use %s() for %s arrays", vsort_routine_name(tests[0].v_name, items), which); } /* * Now sort the data, then randomly perturb them by swapping a few items * so that the array is almost sorted. */ xqsort(vt.data, vt.items, vt.isize, vsort_long_cmp); vsort_perturb_sorted_array(vt.data, vt.items, vt.isize); retry_sorted: for (i = 0; i < N_ITEMS(tests); i++) { tests[i].v_elapsed = vsort_timeit(tests[i].v_timer, &vt, &loops); if (verbose > 1) { s_debug("%s() on almost-sorted took %.4f secs " "for %s array (%zu loops)", tests[i].v_name, tests[i].v_elapsed * loops, which, loops); } if (loops != highest_loops) { highest_loops = loops; /* Redo all the tests if the number of timing loops changes */ if (i != 0) goto retry_sorted; } } xqsort(tests, N_ITEMS(tests), sizeof tests[0], vsort_testing_cmp); vsort_table[idx].v_sort_almost = vsort_routine(tests[0].v_routine, items); if (verbose) { s_info("vsort_almost() will use %s() for %s arrays", vsort_routine_name(tests[0].v_name, items), which); } vmm_free(vt.data, len); vmm_free(vt.copy, len); }
/** * Load geographic IP data from the supplied FILE. * * @return The amount of entries loaded. */ static G_GNUC_COLD uint gip_load(FILE *f, unsigned idx) { char line[1024]; int linenum = 0; filestat_t buf; g_assert(f != NULL); g_assert(uint_is_non_negative(idx)); g_assert(idx < G_N_ELEMENTS(gip_source)); switch (idx) { case GIP_IPV4: iprange_reset_ipv4(geo_db); break; case GIP_IPV6: iprange_reset_ipv6(geo_db); break; default: g_assert_not_reached(); } if (-1 == fstat(fileno(f), &buf)) { g_warning("cannot stat %s: %m", gip_source[idx].file); } else { gip_source[idx].mtime = buf.st_mtime; } while (fgets(line, sizeof line, f)) { linenum++; /* * Remove all trailing spaces in string. * Otherwise, lines which contain only spaces would cause a warning. */ if (!file_line_chomp_tail(line, sizeof line, NULL)) { g_warning("%s: line %d too long, aborting", gip_source[idx].file, linenum); break; } if (file_line_is_skipable(line)) continue; if (GIP_IPV4 == idx) gip_parse_ipv4(line, linenum); else gip_parse_ipv6(line, linenum); } iprange_sync(geo_db); if (GNET_PROPERTY(reload_debug)) { if (GIP_IPV4 == idx) { g_debug("loaded %u geographical IPv4 ranges (%u hosts)", iprange_get_item_count4(geo_db), iprange_get_host_count4(geo_db)); } else { g_debug("loaded %u geographical IPv6 ranges", iprange_get_item_count6(geo_db)); } } return GIP_IPV4 == idx ? iprange_get_item_count4(geo_db) : iprange_get_item_count6(geo_db); }