/* Using the given list of services, stage them into our global state. Every * service version are handled. This function can remove entries in the given * service_list. * * Staging a service means that we take all services in service_list and we * put them in the staging list (global) which acts as a temporary list that * is used by the service loading key process. In other words, staging a * service puts it in a list to be considered when loading the keys and then * moved to the main global list. */ static void stage_services(smartlist_t *service_list) { tor_assert(service_list); /* This is v2 specific. Trigger service pruning which will make sure the * just configured services end up in the main global list. It should only * be done in non validation mode because v2 subsystem handles service * object differently. */ rend_service_prune_list(); /* Cleanup v2 service from the list, we don't need those object anymore * because we validated them all against the others and we want to stage * only >= v3 service. And remember, v2 has a different object type which is * shadow copied from an hs_service_t type. */ SMARTLIST_FOREACH_BEGIN(service_list, hs_service_t *, s) { if (s->config.version == HS_VERSION_TWO) { SMARTLIST_DEL_CURRENT(service_list, s); hs_service_free(s); } } SMARTLIST_FOREACH_END(s); /* This is >= v3 specific. Using the newly configured service list, stage * them into our global state. Every object ownership is lost after. */ hs_service_stage_services(service_list); }
/** Append to <b>out</b> all circuits in state OR_WAIT waiting for * the given connection. */ void circuit_get_all_pending_on_or_conn(smartlist_t *out, or_connection_t *or_conn) { tor_assert(out); tor_assert(or_conn); if (!circuits_pending_or_conns) return; SMARTLIST_FOREACH_BEGIN(circuits_pending_or_conns, circuit_t *, circ) { if (circ->marked_for_close) continue; if (!circ->n_hop) continue; tor_assert(circ->state == CIRCUIT_STATE_OR_WAIT); if (tor_digest_is_zero(circ->n_hop->identity_digest)) { /* Look at addr/port. This is an unkeyed connection. */ if (!tor_addr_eq(&circ->n_hop->addr, &or_conn->_base.addr) || circ->n_hop->port != or_conn->_base.port) continue; } else { /* We expected a key. See if it's the right one. */ if (memcmp(or_conn->identity_digest, circ->n_hop->identity_digest, DIGEST_LEN)) continue; } smartlist_add(out, circ); } SMARTLIST_FOREACH_END(circ); }
/** Finish the configuration protocol version negotiation by printing whether we support any of the suggested configuration protocols in stdout, according to the 180 spec. Return: * 0: if we actually found and selected a protocol. * -1: if we couldn't find a common supported protocol or if we couldn't even parse tor's supported protocol list. XXX: in the future we should return the protocol version we selected. let's keep it simple for now since we have just one protocol version. */ static int conf_proto_version_negotiation(const managed_proxy_t *proxy) { int r=-1; smartlist_t *versions = smartlist_create(); smartlist_split_string(versions, proxy->vars.conf_proto_version, ",", SPLIT_SKIP_SPACE|SPLIT_IGNORE_BLANK, -1); SMARTLIST_FOREACH_BEGIN(versions, char *, version) { if (is_supported_conf_protocol(version)) { print_protocol_line("%s %s\n", PROTO_NEG_SUCCESS, version); r=0; goto done; } } SMARTLIST_FOREACH_END(version); /* we get here if we couldn't find a supported protocol */ print_protocol_line("%s", PROTO_NEG_FAIL); done: SMARTLIST_FOREACH(versions, char *, cp, free(cp)); smartlist_free(versions); return r; }
/** Return 1 iff <b>smartlist</b> contains a tor_addr_t structure * that is an IPv4 or IPv6 multicast address. Otherwise, return 0. */ static int smartlist_contains_multicast_tor_addr(smartlist_t *smartlist) { SMARTLIST_FOREACH_BEGIN(smartlist, tor_addr_t *, tor_addr) { if (tor_addr_is_multicast(tor_addr)) { return 1; } } SMARTLIST_FOREACH_END(tor_addr); return 0; }
/** Return 1 iff <b>smartlist</b> contains a tor_addr_t structure * that is NULL or the null tor_addr_t. Otherwise, return 0. */ static int smartlist_contains_null_tor_addr(smartlist_t *smartlist) { SMARTLIST_FOREACH_BEGIN(smartlist, tor_addr_t *, tor_addr) { if (tor_addr == NULL || tor_addr_is_null(tor_addr)) { return 1; } } SMARTLIST_FOREACH_END(tor_addr); return 0; }
/** Return 1 iff <b>smartlist</b> contains a tor_addr_t structure * that is an IPv6 address. Otherwise, return 0. */ static int smartlist_contains_ipv6_tor_addr(smartlist_t *smartlist) { SMARTLIST_FOREACH_BEGIN(smartlist, tor_addr_t *, tor_addr) { /* Since there's no tor_addr_is_v6, assume all non-v4s are v6 */ if (!tor_addr_is_v4(tor_addr)) { return 1; } } SMARTLIST_FOREACH_END(tor_addr); return 0; }
/** * Helper: clear all entries from <b>cache</b> (but do not delete * any that aren't marked for removal */ static void consensus_cache_clear(consensus_cache_t *cache) { consensus_cache_delete_pending(cache, 0); SMARTLIST_FOREACH_BEGIN(cache->entries, consensus_cache_entry_t *, ent) { ent->in_cache = NULL; consensus_cache_entry_decref(ent); } SMARTLIST_FOREACH_END(ent); smartlist_free(cache->entries); cache->entries = NULL; }
/** Remove every entry of the transport list that was marked with * mark_transport_list if it has not subsequently been un-marked. */ void sweep_transport_list(void) { if (!transport_list) transport_list = smartlist_new(); SMARTLIST_FOREACH_BEGIN(transport_list, transport_t *, t) { if (t->marked_for_removal) { SMARTLIST_DEL_CURRENT(transport_list, t); transport_free(t); } } SMARTLIST_FOREACH_END(t); }
/** Remove every entry of the bridge list that was marked with * mark_bridge_list if it has not subsequently been un-marked. */ void sweep_bridge_list(void) { if (!bridge_list) bridge_list = smartlist_new(); SMARTLIST_FOREACH_BEGIN(bridge_list, bridge_info_t *, b) { if (b->marked_for_removal) { SMARTLIST_DEL_CURRENT(bridge_list, b); bridge_free(b); } } SMARTLIST_FOREACH_END(b); }
/** Look through the routerlist, and using the measured bandwidth cache count * how many measured bandwidths we know. This is used to decide whether we * ever trust advertised bandwidths for purposes of assigning flags. */ void dirserv_count_measured_bws(const smartlist_t *routers) { /* Initialize this first */ routers_with_measured_bw = 0; /* Iterate over the routerlist and count measured bandwidths */ SMARTLIST_FOREACH_BEGIN(routers, const routerinfo_t *, ri) { /* Check if we know a measured bandwidth for this one */ if (dirserv_has_measured_bw(ri->cache_info.identity_digest)) { ++routers_with_measured_bw; } } SMARTLIST_FOREACH_END(ri); }
/** Returns the transport in our transport list that has the name <b>name</b>. * Else returns NULL. */ transport_t * transport_get_by_name(const char *name) { tor_assert(name); if (!transport_list) return NULL; SMARTLIST_FOREACH_BEGIN(transport_list, transport_t *, transport) { if (!strcmp(transport->name, name)) return transport; } SMARTLIST_FOREACH_END(transport); return NULL; }
/** * Take a list of uint16_t *, and remove every port in the list from the * current list of predicted ports. */ void rep_hist_remove_predicted_ports(const smartlist_t *rmv_ports) { /* Let's do this on O(N), not O(N^2). */ bitarray_t *remove_ports = bitarray_init_zero(UINT16_MAX); SMARTLIST_FOREACH(rmv_ports, const uint16_t *, p, bitarray_set(remove_ports, *p)); SMARTLIST_FOREACH_BEGIN(predicted_ports_list, predicted_port_t *, pp) { if (bitarray_is_set(remove_ports, pp->port)) { tor_free(pp); predicted_ports_total_alloc -= sizeof(*pp); SMARTLIST_DEL_CURRENT(predicted_ports_list, pp); } } SMARTLIST_FOREACH_END(pp); bitarray_free(remove_ports); }
/** We have a bug that I can't find. Sometimes, very rarely, cpuworkers get * stuck in the 'busy' state, even though the cpuworker process thinks of * itself as idle. I don't know why. But here's a workaround to kill any * cpuworker that's been busy for more than CPUWORKER_BUSY_TIMEOUT. */ static void cull_wedged_cpuworkers(void) { time_t now = time(NULL); smartlist_t *conns = get_connection_array(); SMARTLIST_FOREACH_BEGIN(conns, connection_t *, conn) { if (!conn->marked_for_close && conn->type == CONN_TYPE_CPUWORKER && conn->state == CPUWORKER_STATE_BUSY_ONION && conn->timestamp_lastwritten + CPUWORKER_BUSY_TIMEOUT < now) { log_notice(LD_BUG, "closing wedged cpuworker. Can somebody find the bug?"); num_cpuworkers_busy--; num_cpuworkers--; connection_mark_for_close(conn); } } SMARTLIST_FOREACH_END(conn); }
/* Validate the given service against all service in the given list. If the * service is ephemeral, this function ignores it. Services with the same * directory path aren't allowed and will return an error. If a duplicate is * found, 1 is returned else 0 if none found. */ static int service_is_duplicate_in_list(const smartlist_t *service_list, const hs_service_t *service) { int ret = 0; tor_assert(service_list); tor_assert(service); /* Ephemeral service don't have a directory configured so no need to check * for a service in the list having the same path. */ if (service->config.is_ephemeral) { goto end; } /* XXX: Validate if we have any service that has the given service dir path. * This has two problems: * * a) It's O(n^2), but the same comment from the bottom of * rend_config_services() should apply. * * b) We only compare directory paths as strings, so we can't * detect two distinct paths that specify the same directory * (which can arise from symlinks, case-insensitivity, bind * mounts, etc.). * * It also can't detect that two separate Tor instances are trying * to use the same HiddenServiceDir; for that, we would need a * lock file. But this is enough to detect a simple mistake that * at least one person has actually made. */ SMARTLIST_FOREACH_BEGIN(service_list, const hs_service_t *, s) { if (!strcmp(s->config.directory_path, service->config.directory_path)) { log_warn(LD_REND, "Another hidden service is already configured " "for directory %s", escaped(service->config.directory_path)); ret = 1; goto end; } } SMARTLIST_FOREACH_END(s); end: return ret; }
/** Helper to conduct tests for populate_live_entry_guards(). This test adds some entry guards to our list, and then tests populate_live_entry_guards() to mke sure it filters them correctly. <b>num_needed</b> is the number of guard nodes we support. It's configurable to make sure we function properly with 1 or 3 guard nodes configured. */ static void populate_live_entry_guards_test_helper(int num_needed) { smartlist_t *our_nodelist = NULL; smartlist_t *live_entry_guards = smartlist_new(); const smartlist_t *all_entry_guards = get_entry_guards(); or_options_t *options = get_options_mutable(); int retval; /* Set NumEntryGuards to the provided number. */ options->NumEntryGuards = num_needed; tt_int_op(num_needed, OP_EQ, decide_num_guards(options, 0)); /* The global entry guards smartlist should be empty now. */ tt_int_op(smartlist_len(all_entry_guards), OP_EQ, 0); /* Walk the nodelist and add all nodes as entry guards. */ our_nodelist = nodelist_get_list(); tt_int_op(smartlist_len(our_nodelist), OP_EQ, NUMBER_OF_DESCRIPTORS); SMARTLIST_FOREACH_BEGIN(our_nodelist, const node_t *, node) { const node_t *node_tmp; node_tmp = add_an_entry_guard(node, 0, 1, 0, 0); tt_assert(node_tmp); } SMARTLIST_FOREACH_END(node); /* Make sure the nodes were added as entry guards. */ tt_int_op(smartlist_len(all_entry_guards), OP_EQ, NUMBER_OF_DESCRIPTORS); /* Ensure that all the possible entry guards are enough to satisfy us. */ tt_int_op(smartlist_len(all_entry_guards), OP_GE, num_needed); /* Walk the entry guard list for some sanity checking */ SMARTLIST_FOREACH_BEGIN(all_entry_guards, const entry_guard_t *, entry) { /* Since we called add_an_entry_guard() with 'for_discovery' being False, all guards should have made_contact enabled. */ tt_int_op(entry->made_contact, OP_EQ, 1); /* Since we don't have a routerstatus, all of the entry guards are not directory servers. */ tt_int_op(entry->is_dir_cache, OP_EQ, 0); } SMARTLIST_FOREACH_END(entry);
/** Given a smartlist of listener configs; launch them all and print method lines as appropriate. Return 0 if at least a listener was spawned, -1 otherwise. */ static int open_listeners_managed(const managed_proxy_t *proxy) { int ret=-1; const char *transport; /* Open listeners for each configuration. */ SMARTLIST_FOREACH_BEGIN(proxy->configs, config_t *, cfg) { transport = get_transport_name_from_config(cfg); if (open_listeners(get_event_base(), cfg)) { ret=0; /* success! launched at least one listener. */ print_method_line(transport, proxy, cfg); } else { /* fail. print a method error line. */ print_method_error_line(transport, proxy, ST_LAUNCH_FAIL_LSN); } } SMARTLIST_FOREACH_END(cfg); return ret; }
static int test_conn_download_status_teardown(const struct testcase_t *tc, void *arg) { (void)arg; int rv = 0; /* Ignore arg, and just loop through the connection array */ SMARTLIST_FOREACH_BEGIN(get_connection_array(), connection_t *, conn) { if (conn) { assert_connection_ok(conn, time(NULL)); /* connection_free_() cleans up requested_resource */ rv = test_conn_get_rsrc_teardown(tc, conn); tt_assert(rv == 1); } } SMARTLIST_FOREACH_END(conn); done: return rv; }
/** As dirserv_get_routerdescs(), but instead of getting signed_descriptor_t * pointers, adds copies of digests to fps_out, and doesn't use the * /tor/server/ prefix. For a /d/ request, adds descriptor digests; for other * requests, adds identity digests. */ int dirserv_get_routerdesc_spool(smartlist_t *spool_out, const char *key, dir_spool_source_t source, int conn_is_encrypted, const char **msg_out) { *msg_out = NULL; if (!strcmp(key, "all")) { const routerlist_t *rl = router_get_routerlist(); SMARTLIST_FOREACH_BEGIN(rl->routers, const routerinfo_t *, r) { spooled_resource_t *spooled; spooled = spooled_resource_new(source, (const uint8_t *)r->cache_info.identity_digest, DIGEST_LEN); /* Treat "all" requests as if they were unencrypted */ conn_is_encrypted = 0; smartlist_add(spool_out, spooled); } SMARTLIST_FOREACH_END(r); } else if (!strcmp(key, "authority")) {
/** Append some EntryGuard lines to the Tor state at <b>state</b>. <b>entry_guard_lines</b> is a smartlist containing 2-tuple smartlists that carry the key and values of the statefile. As an example: entry_guard_lines = (("EntryGuard", "name 67E72FF33D7D41BF11C569646A0A7B4B188340DF DirCache"), ("EntryGuardDownSince", "2014-06-07 16:02:46 2014-06-07 16:02:46")) */ static void state_insert_entry_guard_helper(or_state_t *state, smartlist_t *entry_guard_lines) { config_line_t **next, *line; next = &state->EntryGuards; *next = NULL; /* Loop over all the state lines in the smartlist */ SMARTLIST_FOREACH_BEGIN(entry_guard_lines, const smartlist_t *,state_lines) { /* Get key and value for each line */ const char *state_key = smartlist_get(state_lines, 0); const char *state_value = smartlist_get(state_lines, 1); *next = line = tor_malloc_zero(sizeof(config_line_t)); line->key = tor_strdup(state_key); tor_asprintf(&line->value, "%s", state_value); next = &(line->next); } SMARTLIST_FOREACH_END(state_lines); }
/* Remove a directory and all of its subdirectories */ static void rm_rf(const char *dir) { struct stat st; smartlist_t *elements; elements = tor_listdir(dir); if (elements) { SMARTLIST_FOREACH_BEGIN(elements, const char *, cp) { char *tmp = NULL; tor_asprintf(&tmp, "%s"PATH_SEPARATOR"%s", dir, cp); if (0 == stat(tmp,&st) && (st.st_mode & S_IFDIR)) { rm_rf(tmp); } else { if (unlink(tmp)) { fprintf(stderr, "Error removing %s: %s\n", tmp, strerror(errno)); } } tor_free(tmp); } SMARTLIST_FOREACH_END(cp); SMARTLIST_FOREACH(elements, char *, cp, tor_free(cp)); smartlist_free(elements); }
/** Return a newly allocated pointer to a list of uint16_t * for ports that * are likely to be asked for in the near future. */ smartlist_t * rep_hist_get_predicted_ports(time_t now) { int predicted_circs_relevance_time; smartlist_t *out = smartlist_new(); tor_assert(predicted_ports_list); predicted_circs_relevance_time = (int)prediction_timeout; /* clean out obsolete entries */ SMARTLIST_FOREACH_BEGIN(predicted_ports_list, predicted_port_t *, pp) { if (pp->time + predicted_circs_relevance_time < now) { log_debug(LD_CIRC, "Expiring predicted port %d", pp->port); predicted_ports_total_alloc -= sizeof(predicted_port_t); tor_free(pp); SMARTLIST_DEL_CURRENT(predicted_ports_list, pp); } else { smartlist_add(out, tor_memdup(&pp->port, sizeof(uint16_t))); } } SMARTLIST_FOREACH_END(pp); return out; }
/** Remember that <b>port</b> has been asked for as of time <b>now</b>. * This is used for predicting what sorts of streams we'll make in the * future and making exit circuits to anticipate that. */ void rep_hist_note_used_port(time_t now, uint16_t port) { tor_assert(predicted_ports_list); if (!port) /* record nothing */ return; SMARTLIST_FOREACH_BEGIN(predicted_ports_list, predicted_port_t *, pp) { if (pp->port == port) { pp->time = now; last_prediction_add_time = now; log_info(LD_CIRC, "New port prediction added. Will continue predictive circ " "building for %d more seconds.", predicted_ports_prediction_time_remaining(now)); return; } } SMARTLIST_FOREACH_END(pp); /* it's not there yet; we need to add it */ add_predicted_port(now, port); }
/** Adjust the cell count of every active circuit on <b>chan</b> so * that they are scaled with respect to <b>cur_tick</b> */ static void scale_active_circuits(ewma_policy_data_t *pol, unsigned cur_tick) { double factor; tor_assert(pol); tor_assert(pol->active_circuit_pqueue); factor = get_scale_factor( pol->active_circuit_pqueue_last_recalibrated, cur_tick); /** Ordinarily it isn't okay to change the value of an element in a heap, * but it's okay here, since we are preserving the order. */ SMARTLIST_FOREACH_BEGIN( pol->active_circuit_pqueue, cell_ewma_t *, e) { tor_assert(e->last_adjusted_tick == pol->active_circuit_pqueue_last_recalibrated); e->cell_count *= factor; e->last_adjusted_tick = cur_tick; } SMARTLIST_FOREACH_END(e); pol->active_circuit_pqueue_last_recalibrated = cur_tick; }
/** Update the routerset's <b>countries</b> bitarray_t. Called whenever * the GeoIP IPv4 database is reloaded. */ void routerset_refresh_countries(routerset_t *target) { int cc; bitarray_free(target->countries); if (!geoip_is_loaded(AF_INET)) { target->countries = NULL; target->n_countries = 0; return; } target->n_countries = geoip_get_n_countries(); target->countries = bitarray_init_zero(target->n_countries); SMARTLIST_FOREACH_BEGIN(target->country_names, const char *, country) { cc = geoip_get_country(country); if (cc >= 0) { tor_assert(cc < target->n_countries); bitarray_set(target->countries, cc); } else { log_warn(LD_CONFIG, "Country code '%s' is not recognized.", country); } } SMARTLIST_FOREACH_END(country); }
/** Tell the nodelist that the current usable consensus is <b>ns</b>. * This makes the nodelist change all of the routerstatus entries for * the nodes, drop nodes that no longer have enough info to get used, * and grab microdescriptors into nodes as appropriate. */ void nodelist_set_consensus(networkstatus_t *ns) { const or_options_t *options = get_options(); int authdir = authdir_mode_v3(options); int client = !server_mode(options); init_nodelist(); if (ns->flavor == FLAV_MICRODESC) (void) get_microdesc_cache(); /* Make sure it exists first. */ SMARTLIST_FOREACH(the_nodelist->nodes, node_t *, node, node->rs = NULL); SMARTLIST_FOREACH_BEGIN(ns->routerstatus_list, routerstatus_t *, rs) { node_t *node = node_get_or_create(rs->identity_digest); node->rs = rs; if (ns->flavor == FLAV_MICRODESC) { if (node->md == NULL || tor_memneq(node->md->digest,rs->descriptor_digest,DIGEST256_LEN)) { if (node->md) node->md->held_by_nodes--; node->md = microdesc_cache_lookup_by_digest256(NULL, rs->descriptor_digest); if (node->md) node->md->held_by_nodes++; } } node_set_country(node); /* If we're not an authdir, believe others. */ if (!authdir) { node->is_valid = rs->is_valid; node->is_running = rs->is_flagged_running; node->is_fast = rs->is_fast; node->is_stable = rs->is_stable; node->is_possible_guard = rs->is_possible_guard; node->is_exit = rs->is_exit; node->is_bad_directory = rs->is_bad_directory; node->is_bad_exit = rs->is_bad_exit; node->is_hs_dir = rs->is_hs_dir; node->ipv6_preferred = 0; if (client && options->ClientPreferIPv6ORPort == 1 && (tor_addr_is_null(&rs->ipv6_addr) == 0 || (node->md && tor_addr_is_null(&node->md->ipv6_addr) == 0))) node->ipv6_preferred = 1; } } SMARTLIST_FOREACH_END(rs); nodelist_purge(); if (! authdir) { SMARTLIST_FOREACH_BEGIN(the_nodelist->nodes, node_t *, node) { /* We have no routerstatus for this router. Clear flags so we can skip * it, maybe.*/ if (!node->rs) { tor_assert(node->ri); /* if it had only an md, or nothing, purge * would have removed it. */ if (node->ri->purpose == ROUTER_PURPOSE_GENERAL) { /* Clear all flags. */ node->is_valid = node->is_running = node->is_hs_dir = node->is_fast = node->is_stable = node->is_possible_guard = node->is_exit = node->is_bad_exit = node->is_bad_directory = node->ipv6_preferred = 0; } } } SMARTLIST_FOREACH_END(node); }
/** Iterate over each of the supported <b>backends</b> and attempt to add a * port forward for the port stored in <b>tor_fw_options</b>. */ static void tor_fw_add_ports(tor_fw_options_t *tor_fw_options, backends_t *backends) { int i; int r = 0; int succeeded = 0; if (tor_fw_options->verbose) fprintf(stderr, "V: %s\n", __func__); /** Loop all ports that need to be forwarded, and try to use our * backends for each port. If a backend succeeds, break the loop, * report success and get to the next port. If all backends fail, * report failure for that port. */ SMARTLIST_FOREACH_BEGIN(tor_fw_options->ports_to_forward, port_to_forward_t *, port_to_forward) { succeeded = 0; for (i=0; i<backends->n_backends; ++i) { if (tor_fw_options->verbose) { fprintf(stderr, "V: running backend_state now: %i\n", i); fprintf(stderr, "V: size of backend state: %u\n", (int)(backends->backend_ops)[i].state_len); fprintf(stderr, "V: backend state name: %s\n", (const char *) backends->backend_ops[i].name); } r = backends->backend_ops[i].add_tcp_mapping(port_to_forward->internal_port, port_to_forward->external_port, tor_fw_options->verbose, backends->backend_state[i]); if (r == 0) { /* backend success */ tor_fw_helper_report_port_fw_success(port_to_forward->internal_port, port_to_forward->external_port, backends->backend_ops[i].name); succeeded = 1; break; } fprintf(stderr, "tor-fw-helper: tor_fw_add_port backend %s " "returned: %i\n", (const char *) backends->backend_ops[i].name, r); } if (!succeeded) { /* all backends failed */ char *list_of_backends_str = get_list_of_backends_string(backends); char *fail_msg = NULL; tor_asprintf(&fail_msg, "All port forwarding backends (%s) failed.", list_of_backends_str); tor_fw_helper_report_port_fw_fail(port_to_forward->internal_port, port_to_forward->external_port, fail_msg); tor_free(list_of_backends_str); tor_free(fail_msg); } } SMARTLIST_FOREACH_END(port_to_forward); }
/** Look through the routerlist, the Mean Time Between Failure history, and * the Weighted Fractional Uptime history, and use them to set thresholds for * the Stable, Fast, and Guard flags. Update the fields stable_uptime, * stable_mtbf, enough_mtbf_info, guard_wfu, guard_tk, fast_bandwidth, * guard_bandwidth_including_exits, and guard_bandwidth_excluding_exits. * * Also, set the is_exit flag of each router appropriately. */ void dirserv_compute_performance_thresholds(digestmap_t *omit_as_sybil) { int n_active, n_active_nonexit, n_familiar; uint32_t *uptimes, *bandwidths_kb, *bandwidths_excluding_exits_kb; long *tks; double *mtbfs, *wfus; smartlist_t *nodelist; time_t now = time(NULL); const or_options_t *options = get_options(); /* Require mbw? */ int require_mbw = (dirserv_get_last_n_measured_bws() > options->MinMeasuredBWsForAuthToIgnoreAdvertised) ? 1 : 0; /* initialize these all here, in case there are no routers */ stable_uptime = 0; stable_mtbf = 0; fast_bandwidth_kb = 0; guard_bandwidth_including_exits_kb = 0; guard_bandwidth_excluding_exits_kb = 0; guard_tk = 0; guard_wfu = 0; nodelist_assert_ok(); nodelist = nodelist_get_list(); /* Initialize arrays that will hold values for each router. We'll * sort them and use that to compute thresholds. */ n_active = n_active_nonexit = 0; /* Uptime for every active router. */ uptimes = tor_calloc(smartlist_len(nodelist), sizeof(uint32_t)); /* Bandwidth for every active router. */ bandwidths_kb = tor_calloc(smartlist_len(nodelist), sizeof(uint32_t)); /* Bandwidth for every active non-exit router. */ bandwidths_excluding_exits_kb = tor_calloc(smartlist_len(nodelist), sizeof(uint32_t)); /* Weighted mean time between failure for each active router. */ mtbfs = tor_calloc(smartlist_len(nodelist), sizeof(double)); /* Time-known for each active router. */ tks = tor_calloc(smartlist_len(nodelist), sizeof(long)); /* Weighted fractional uptime for each active router. */ wfus = tor_calloc(smartlist_len(nodelist), sizeof(double)); /* Now, fill in the arrays. */ SMARTLIST_FOREACH_BEGIN(nodelist, node_t *, node) { if (options->BridgeAuthoritativeDir && node->ri && node->ri->purpose != ROUTER_PURPOSE_BRIDGE) continue; routerinfo_t *ri = node->ri; if (ri) { node->is_exit = (!router_exit_policy_rejects_all(ri) && exit_policy_is_general_exit(ri->exit_policy)); } if (router_counts_toward_thresholds(node, now, omit_as_sybil, require_mbw)) { const char *id = node->identity; uint32_t bw_kb; /* resolve spurious clang shallow analysis null pointer errors */ tor_assert(ri); uptimes[n_active] = (uint32_t)real_uptime(ri, now); mtbfs[n_active] = rep_hist_get_stability(id, now); tks [n_active] = rep_hist_get_weighted_time_known(id, now); bandwidths_kb[n_active] = bw_kb = dirserv_get_credible_bandwidth_kb(ri); if (!node->is_exit || node->is_bad_exit) { bandwidths_excluding_exits_kb[n_active_nonexit] = bw_kb; ++n_active_nonexit; } ++n_active; } } SMARTLIST_FOREACH_END(node); /* Now, compute thresholds. */ if (n_active) { /* The median uptime is stable. */ stable_uptime = median_uint32(uptimes, n_active); /* The median mtbf is stable, if we have enough mtbf info */ stable_mtbf = median_double(mtbfs, n_active); /* The 12.5th percentile bandwidth is fast. */ fast_bandwidth_kb = find_nth_uint32(bandwidths_kb, n_active, n_active/8); /* (Now bandwidths is sorted.) */ if (fast_bandwidth_kb < RELAY_REQUIRED_MIN_BANDWIDTH/(2 * 1000)) fast_bandwidth_kb = bandwidths_kb[n_active/4]; guard_bandwidth_including_exits_kb = third_quartile_uint32(bandwidths_kb, n_active); guard_tk = find_nth_long(tks, n_active, n_active/8); } if (guard_tk > TIME_KNOWN_TO_GUARANTEE_FAMILIAR) guard_tk = TIME_KNOWN_TO_GUARANTEE_FAMILIAR; { /* We can vote on a parameter for the minimum and maximum. */ #define ABSOLUTE_MIN_VALUE_FOR_FAST_FLAG 4 int32_t min_fast_kb, max_fast_kb, min_fast, max_fast; min_fast = networkstatus_get_param(NULL, "FastFlagMinThreshold", ABSOLUTE_MIN_VALUE_FOR_FAST_FLAG, ABSOLUTE_MIN_VALUE_FOR_FAST_FLAG, INT32_MAX); if (options->TestingTorNetwork) { min_fast = (int32_t)options->TestingMinFastFlagThreshold; } max_fast = networkstatus_get_param(NULL, "FastFlagMaxThreshold", INT32_MAX, min_fast, INT32_MAX); min_fast_kb = min_fast / 1000; max_fast_kb = max_fast / 1000; if (fast_bandwidth_kb < (uint32_t)min_fast_kb) fast_bandwidth_kb = min_fast_kb; if (fast_bandwidth_kb > (uint32_t)max_fast_kb) fast_bandwidth_kb = max_fast_kb; } /* Protect sufficiently fast nodes from being pushed out of the set * of Fast nodes. */ if (options->AuthDirFastGuarantee && fast_bandwidth_kb > options->AuthDirFastGuarantee/1000) fast_bandwidth_kb = (uint32_t)options->AuthDirFastGuarantee/1000; /* Now that we have a time-known that 7/8 routers are known longer than, * fill wfus with the wfu of every such "familiar" router. */ n_familiar = 0; SMARTLIST_FOREACH_BEGIN(nodelist, node_t *, node) { if (router_counts_toward_thresholds(node, now, omit_as_sybil, require_mbw)) { routerinfo_t *ri = node->ri; const char *id = ri->cache_info.identity_digest; long tk = rep_hist_get_weighted_time_known(id, now); if (tk < guard_tk) continue; wfus[n_familiar++] = rep_hist_get_weighted_fractional_uptime(id, now); } } SMARTLIST_FOREACH_END(node); if (n_familiar) guard_wfu = median_double(wfus, n_familiar); if (guard_wfu > WFU_TO_GUARANTEE_GUARD) guard_wfu = WFU_TO_GUARANTEE_GUARD; enough_mtbf_info = rep_hist_have_measured_enough_stability(); if (n_active_nonexit) { guard_bandwidth_excluding_exits_kb = find_nth_uint32(bandwidths_excluding_exits_kb, n_active_nonexit, n_active_nonexit*3/4); } log_info(LD_DIRSERV, "Cutoffs: For Stable, %lu sec uptime, %lu sec MTBF. " "For Fast: %lu kilobytes/sec. " "For Guard: WFU %.03f%%, time-known %lu sec, " "and bandwidth %lu or %lu kilobytes/sec. " "We%s have enough stability data.", (unsigned long)stable_uptime, (unsigned long)stable_mtbf, (unsigned long)fast_bandwidth_kb, guard_wfu*100, (unsigned long)guard_tk, (unsigned long)guard_bandwidth_including_exits_kb, (unsigned long)guard_bandwidth_excluding_exits_kb, enough_mtbf_info ? "" : " don't"); tor_free(uptimes); tor_free(mtbfs); tor_free(bandwidths_kb); tor_free(bandwidths_excluding_exits_kb); tor_free(tks); tor_free(wfus); }