/** * Add GUID to the banned list or refresh the fact that we are still seeing * it as being worth banning. */ void guid_add_banned(const guid_t *guid) { struct guiddata *gd; struct guiddata new_gd; gd = get_guiddata(guid); if (NULL == gd) { gd = &new_gd; gd->create_time = gd->last_time = tm_time(); gnet_stats_inc_general(GNR_BANNED_GUID_HELD); if (GNET_PROPERTY(guid_debug)) { g_debug("GUID banning %s", guid_hex_str(guid)); } } else { gd->last_time = tm_time(); } dbmw_write(db_guid, guid, gd, sizeof *gd); }
/** * @return NULL on error, a newly allocated string via halloc() otherwise. */ static char * uhc_get_next(void) { struct uhc *uhc; char *host; time_t now; size_t n; g_return_val_if_fail(uhc_list, NULL); now = tm_time(); n = hash_list_count(uhc_list); if (0 == n) return NULL; /* * Wait UHC_RETRY_AFTER secs before contacting the UHC again. * Can't be too long because the UDP reply may get lost if the * requesting host already has a saturated b/w. * If we come here, it's because we're lacking hosts for establishing * a Gnutella connection, after we exhausted our caches. */ while (n-- != 0) { uhc = hash_list_head(uhc_list); g_assert(uhc != NULL); /* We computed count on entry */ if (delta_time(now, uhc->stamp) >= UHC_RETRY_AFTER) goto found; hash_list_moveto_tail(uhc_list, uhc); } return NULL; found: uhc->stamp = now; host = h_strdup(uhc->host); if (uhc->used < UHC_MAX_ATTEMPTS) { uhc->used++; hash_list_moveto_tail(uhc_list, uhc); } else { hash_list_remove(uhc_list, uhc); uhc_free(&uhc); } return host; }
/* * Avoid nodes being stuck helplessly due to completely stale caches. * @return TRUE if an UHC may be contact, FALSE if it's not permissable. */ static gboolean host_cache_allow_bypass(void) { static time_t last_try; if (node_count() > 0) return FALSE; /* Wait at least 2 minutes after starting up */ if (delta_time(tm_time(), GNET_PROPERTY(start_stamp)) < 2 * 60) return FALSE; /* * Allow again after 12 hours, useful after unexpected network outage * or downtime. */ if (last_try && delta_time(tm_time(), last_try) < 12 * 3600) return FALSE; last_try = tm_time(); return TRUE; }
/** * Given a node which was first seen at ``first_seen'' and last seen at * ``last_seen'', return probability that node still be alive now. * * @param first_seen first time node was seen / created * @param last_seen last time node was seen * * @return the probability that the node be still alive now. */ double stable_still_alive_probability(time_t first_seen, time_t last_seen) { time_delta_t life; time_delta_t elapsed; life = delta_time(last_seen, first_seen); if (life <= 0) return 0.0; elapsed = delta_time(tm_time(), last_seen); return stable_alive_probability(life, elapsed); }
/** * Allocate a new NAT-PMP gateway. */ static natpmp_t * natpmp_alloc(host_addr_t gateway, unsigned sssoe, host_addr_t wan_ip) { natpmp_t *np; WALLOC(np); np->magic = NATPMP_MAGIC; np->gateway = gateway; np->wan_ip = wan_ip; np->sssoe = sssoe; np->last_update = tm_time(); return np; }
/** * Start the nagle timer. */ static void deflate_nagle_start(txdrv_t *tx) { struct attr *attr = tx->opaque; g_assert(!(attr->flags & DF_NAGLE)); g_assert(NULL == attr->tm_ev); if (!attr->nagle) /* Nagle not allowed */ return; attr->tm_ev = cq_insert(attr->cq, BUFFER_NAGLE, deflate_nagle_timeout, tx); attr->flags |= DF_NAGLE; attr->nagle_start = tm_time(); }
/** * Can the node which timed-out in the past be considered again as the * target of an RPC, and therefore returned in k-closest lookups? */ bool knode_can_recontact(const knode_t *kn) { time_t grace; time_delta_t elapsed; knode_check(kn); if (!kn->rpc_timeouts) return TRUE; /* Timeout condition was cleared */ grace = 1 << kn->rpc_timeouts; elapsed = delta_time(tm_time(), kn->last_sent); return elapsed > grace; }
/** * DBMW foreach iterator to remove expired DB keys. * @return TRUE if entry must be deleted. */ static bool publisher_remove_expired(void *u_key, void *value, size_t u_len, void *u_data) { const struct pubdata *pd = value; (void) u_key; (void) u_len; (void) u_data; /* * Entries for which we should re-enqueue a publish request now * have expired and can be deleted. */ return delta_time(tm_time(), pd->next_enqueue) >= 0; }
/** * Return entry age in seconds, (time_delta_t) -1 if not found. */ time_delta_t aging_age(const aging_table_t *ag, const void *key) { struct aging_value *aval; time_delta_t age; aging_check(ag); aging_synchronize(ag); aval = hikset_lookup(ag->table, key); age = aval == NULL ? (time_delta_t) -1 : delta_time(tm_time(), aval->last_insert); aging_return(ag, age); }
/** * Lookup value in table, and if found, revitalize entry, restoring the * initial lifetime the key/value pair had at insertion time. */ void * aging_lookup_revitalise(const aging_table_t *ag, gconstpointer key) { struct aging_value *aval; aging_check(ag); aval = g_hash_table_lookup(ag->table, key); if (aval != NULL) { g_assert(aval->cq_ev != NULL); aval->last_insert = tm_time(); cq_resched(aval->cq_ev, 1000 * aval->ttl); } return aval == NULL ? NULL : aval->value; }
/** * Parse gtk-gnutella's version number in User-Agent/Server string `str' * and extract timestamp into `ver'. */ static void version_stamp(const char *str, version_t *ver) { static char stamp[256]; const char *p; ver->timestamp = 0; /* * A typical vendor string with a timestamp would look like: * * gtk-gnutella/0.85 (04/04/2002; X11; FreeBSD 4.6-STABLE i386) * * The date stamp is formattted as DD/MM/YYYY here, but the date2time() * routine is also able to parse the ISO format YYYY-MM-DD which is * being used starting 2004-03-02. */ p = strchr(str, '('); if (p) { const char *end; p++; end = strchr(p, ';'); if (end == NULL) end = strchr(p, ')'); /* Only date present: short version */ if (end) { size_t size = end - p + 1; /* * Using date2time() will allow us to possibly change the date * format in the future, without impacting the ability of older * servents to parse it. */ g_strlcpy(stamp, p, MIN(size, sizeof(stamp))); ver->timestamp = date2time(stamp, tm_time()); if (ver->timestamp == -1) { ver->timestamp = 0; g_warning("could not parse timestamp \"%s\" in \"%s\"", p, str); } } else g_warning("no timestamp in \"%s\"", str); } }
static bool udp_ping_register(const struct guid *muid, host_addr_t addr, uint16 port, udp_ping_cb_t cb, void *data, bool multiple) { struct udp_ping *ping; uint length; g_assert(muid); g_return_val_if_fail(udp_pings, FALSE); if (hash_list_contains(udp_pings, muid)) { /* Probably a duplicate */ return FALSE; } /* random early drop */ length = hash_list_length(udp_pings); if (length >= UDP_PING_MAX) { return FALSE; } else if (length > (UDP_PING_MAX / 4) * 3) { if (random_value(UDP_PING_MAX - 1) < length) return FALSE; } WALLOC(ping); ping->muid = *muid; ping->added = tm_time(); { gnet_host_t host; gnet_host_set(&host, addr, port); ping->host = atom_host_get(&host); } if (cb != NULL) { WALLOC0(ping->callback); ping->callback->cb = cb; ping->callback->data = data; ping->callback->multiple = booleanize(multiple); } else { ping->callback = NULL; } hash_list_append(udp_pings, ping); return TRUE; }
/** * Check the given IP against the entries in the bogus IP database. * * @returns TRUE if found, and FALSE if not. */ bool bogons_check(const host_addr_t ha) { if G_UNLIKELY(NULL == bogons_db) return FALSE; /* * If the bogons file is too ancient, there is a risk it may flag an * IP as bogus whereas it is no longer reserved. IPv4 address shortage * makes that likely. * --RAM, 2010-11-07 */ if (delta_time(tm_time(), bogons_mtime) > 15552000) /* ~6 months */ return !host_addr_is_routable(ha); return 0 != iprange_get_addr(bogons_db, ha); }
/** * Add value to the table. * * If it was already present, its lifetime is reset to the aging delay. * * The key argument is freed immediately if there is a free routine for * keys and the key was present in the table. * * The previous value is freed and replaced by the new one if there is * an insertion conflict and the key pointers are different. */ void aging_insert(aging_table_t *ag, const void *key, void *value) { bool found; void *ovalue; time_t now = tm_time(); struct aging_value *aval; aging_check(ag); aging_synchronize(ag); found = hikset_lookup_extended(ag->table, key, &ovalue); if (found) { aval = ovalue; if (ag->kvfree != NULL) { /* * We discard the new and keep the old key instead. * That way, we don't have to update the hash table. */ (*ag->kvfree)(deconstify_pointer(key), aval->value); } /* * Value existed for this key, reset its lifetime by moving the * entry to the tail of the list. */ aval->value = value; aval->last_insert = now; elist_moveto_tail(&ag->list, aval); } else { WALLOC(aval); aval->value = value; aval->key = deconstify_pointer(key); aval->last_insert = now; hikset_insert(ag->table, aval); elist_append(&ag->list, aval); } aging_return_void(ag); }
/** * Update the row with the given nodeinfo. If row is -1 the row number * is determined by the node_id contained in the gnet_node_info_t. */ static void nodes_gui_update_node_info(gnet_node_info_t *n, gint row) { GtkCList *clist = GTK_CLIST(gui_main_window_lookup("clist_nodes")); g_assert(n != NULL); if (row == -1) { row = gtk_clist_find_row_from_data(clist, deconstify_gpointer(n->node_id)); } if (row != -1) { gchar ver_buf[64]; gnet_node_status_t status; time_t now = tm_time(); if (guc_node_get_status(n->node_id, &status)) { gtk_clist_set_text(clist, row, c_gnet_user_agent, n->vendor ? lazy_utf8_to_locale(n->vendor) : "..."); gtk_clist_set_text(clist, row, c_gnet_loc, deconstify_gchar(iso3166_country_cc(n->country))); str_bprintf(ver_buf, sizeof ver_buf, "%d.%d", n->proto_major, n->proto_minor); gtk_clist_set_text(clist, row, c_gnet_version, ver_buf); if (status.status == GTA_NODE_CONNECTED) gtk_clist_set_text(clist, row, c_gnet_connected, short_uptime(delta_time(now, status.connect_date))); if (status.up_date) gtk_clist_set_text(clist, row, c_gnet_uptime, status.up_date ? short_uptime(delta_time(now, status.up_date)) : "..."); gtk_clist_set_text(clist, row, c_gnet_info, nodes_gui_common_status_str(&status)); } } else { g_warning("%s(): no matching row found", G_STRFUNC); } }
/** * Add the servent update as a "UP" child to the root. */ static void g2_build_add_uptime(g2_tree_t *t) { time_delta_t uptime; char payload[8]; int n; g2_tree_t *c; /* * The uptime will typically be small, hence it is encoded as a variable * length little-endian value, with trailing zeros removed. Usually * only 2 or 3 bytes will be necesssary to encode the uptime (in seconds). */ uptime = delta_time(tm_time(), GNET_PROPERTY(start_stamp)); n = vlint_encode(uptime, payload); c = g2_tree_alloc_copy("UP", payload, n); /* No trailing 0s */ g2_tree_add_child(t, c); }
/** * Called when we get a reply from the ADNS process. */ static void whitelist_dns_cb(const host_addr_t *addrs, size_t n, void *udata) { struct whitelist_dns *ctx = udata; struct whitelist *item = ctx->item; if (ctx->generation != whitelist_generation) { if (GNET_PROPERTY(whitelist_debug)) log_whitelist_item(item, "late DNS resolution"); if (!ctx->revalidate) { whitelist_free(item); } } else { item->host->last_resolved = tm_time(); if (n < 1) { if (GNET_PROPERTY(whitelist_debug)) log_whitelist_item(item, "could not DNS-resolve"); if (ctx->revalidate) { item->addr = ipv4_unspecified; item->bits = 0; } else { whitelist_free(item); } } else { item->addr = addrs[random_value(n - 1)]; /* Pick one randomly */ item->bits = addr_default_mask(item->addr); if (GNET_PROPERTY(whitelist_debug) > 1) { g_debug("WLIST DNS-resolved %s as %s (out of %zu result%s)", item->host->name, host_addr_to_string(item->addr), n, plural(n)); } if (!ctx->revalidate) { whitelist_add(item); } } } WFREE(ctx); }
/** * Callout queue periodic event for request load updates. * Also reclaims dead keys holding no values. */ static bool keys_periodic_load(void *unused_obj) { struct load_ctx ctx; (void) unused_obj; ctx.values = 0; ctx.now = tm_time(); hikset_foreach_remove(keys, keys_update_load, &ctx); g_assert(values_count() == ctx.values); if (GNET_PROPERTY(dht_storage_debug)) { size_t keys_count = hikset_count(keys); g_debug("DHT holding %zu value%s spread over %zu key%s", ctx.values, plural(ctx.values), keys_count, plural(keys_count)); } return TRUE; /* Keep calling */ }
/** * Remove expired messages (eslist iterator). * * @return TRUE if message has expired and was freed up. */ static bool udp_tx_desc_expired(void *data, void *udata) { struct udp_tx_desc *txd = data; udp_sched_t *us = udata; udp_sched_check(us); udp_tx_desc_check(txd); if (delta_time(tm_time(), txd->expire) > 0) { udp_sched_log(1, "%p: expiring mb=%p (%d bytes) prio=%u", us, txd->mb, pmsg_size(txd->mb), pmsg_prio(txd->mb)); if (txd->cb->add_tx_dropped != NULL) (*txd->cb->add_tx_dropped)(txd->tx->owner, 1); /* Dropped in TX */ return udp_tx_desc_drop(data, udata); /* Returns TRUE */ } return FALSE; }
/** * Lookup value in table, and if found, revitalize entry, restoring the * initial lifetime the key/value pair had at insertion time. */ void * aging_lookup_revitalise(aging_table_t *ag, const void *key) { struct aging_value *aval; void *data; aging_check(ag); aging_synchronize(ag); aval = hikset_lookup(ag->table, key); if (aval != NULL) { aval->last_insert = tm_time(); elist_moveto_tail(&ag->list, aval); } data = NULL == aval ? NULL : aval->value; aging_return(ag, data); }
/** * Given a node which was first seen at ``first_seen'' and last seen at * ``last_seen'', return probability that node still be alive now. * * @param first_seen first time node was seen / created * @param last_seen last time node was seen * * @return the probability that the node be still alive now. */ double stable_still_alive_probability(time_t first_seen, time_t last_seen) { time_delta_t life; time_delta_t elapsed; life = delta_time(last_seen, first_seen); if (life <= 0) return 0.0; elapsed = delta_time(tm_time(), last_seen); /* * Safety precaution: regardless of the past lifetime of the node, if * we have not heard from it for more than STABLE_UPPER_THRESH, then * consider it dead. */ return elapsed < STABLE_UPPER_THRESH ? stable_alive_probability(life, elapsed) : 0.0; }
static void adns_reply_ready(const struct adns_response *ans) { time_t now = tm_time(); g_assert(ans != NULL); if (ans->common.reverse) { if (common_dbg > 1) { const struct adns_reverse_reply *reply = &ans->reply.reverse; g_debug("%s: resolved \"%s\" to \"%s\".", G_STRFUNC, host_addr_to_string(reply->addr), reply->hostname); } } else { const struct adns_reply *reply = &ans->reply.by_addr; size_t num; num = count_addrs(reply->addrs, G_N_ELEMENTS(reply->addrs)); num = MAX(1, num); /* For negative caching */ if (common_dbg > 1) { size_t i; for (i = 0; i < num; i++) { g_debug("%s: resolved \"%s\" to \"%s\".", G_STRFUNC, reply->hostname, host_addr_to_string(reply->addrs[i])); } } if (!adns_cache_lookup(adns_cache, now, reply->hostname, NULL, 0)) { adns_cache_add(adns_cache, now, reply->hostname, reply->addrs, num); } } g_assert(ans->common.user_callback); adns_invoke_user_callback(ans); }
/** * Periodic garbage collecting routine. */ static bool aging_gc(void *obj) { aging_table_t *ag = obj; time_t now = tm_time(); struct aging_value *aval; aging_check(ag); aging_synchronize(ag); g_assert(elist_count(&ag->list) == hikset_count(ag->table)); while (NULL != (aval = elist_head(&ag->list))) { if (delta_time(now, aval->last_insert) <= ag->delay) break; /* List is sorted, oldest items first */ hikset_remove(ag->table, aval->key); aging_free(aval, ag); } aging_return(ag, TRUE); /* Keep calling */ }
/** * Updates the global HSEP table when a connection is about * to be closed. The connection's HSEP data is restored to * zero and the CAN_HSEP attribute is cleared. */ void hsep_connection_close(struct gnutella_node *n, bool in_shutdown) { unsigned int i, j; g_assert(n); g_assert(n->hsep); if (GNET_PROPERTY(hsep_debug) > 1) printf("HSEP: Deinitializing node %s\n", host_addr_port_to_string(n->addr, n->port)); if (in_shutdown) goto cleanup; for (i = 1; i < G_N_ELEMENTS(hsep_global_table); i++) { for (j = 0; j < G_N_ELEMENTS(hsep_global_table[0]); j++) { hsep_global_table[i][j] -= n->hsep->table[i][j]; n->hsep->table[i][j] = 0; } } if (GNET_PROPERTY(hsep_debug) > 1) hsep_dump_table(); hsep_fire_global_table_changed(tm_time()); /* * Clear CAN_HSEP attribute so that the HSEP code * will not use the node any longer. */ cleanup: n->attrs &= ~NODE_A_CAN_HSEP; WFREE(n->hsep); n->hsep = NULL; }
/** * Watchdog timer has expired. */ static void wd_expired(cqueue_t *cq, void *arg) { watchdog_t *wd = arg; watchdog_check(wd); wd->ev = NULL; /* * If no kicks have happened, fire the registered callback. Otherwise, * reset the callout queue event, so that the sliding window is starting * when the last tick happened. */ if (0 == wd->last_kick) { wd_trigger(wd); } else { time_t now = tm_time(); time_delta_t elapsed = delta_time(now, wd->last_kick); /* * If for some reason the callout queue heartbeat got delayed, more * than ``period'' seconds may have elapsed since the last kick, in * which case we also need to trigger the callback. * * Note that watchdog ``period'' is expressed in seconds. */ if (elapsed >= wd->period) { wd_trigger(wd); } else { time_delta_t delay = wd->period - elapsed; wd->ev = cq_insert(cq, delay * 1000, wd_expired, wd); } } }
/** * Upon reception of an UDP pong, check whether we had a matching registered * ping bearing the given MUID. * * If there was a callback atttached to the reception of a reply, invoke it * before returning UDP_PONG_HANDLED. * * The ``host'' paramaeter MUST be a stack or static pointer to a gnet_host_t, * and NOT the address of a dynamically allocated host because gnet_host_copy() * is going to be used on it. * * @param n the gnutella node replying * @param host if non-NULL, filled with the host to whom we sent the ping * * @return TRUE if indeed this was a reply for a ping we sent. */ enum udp_pong_status udp_ping_is_registered(const struct gnutella_node *n, gnet_host_t *host) { const struct guid *muid = gnutella_header_get_muid(&n->header); if (udp_pings) { struct udp_ping *ping; ping = hash_list_remove(udp_pings, muid); if (ping != NULL) { if (host != NULL) { /* * Let caller know the exact IP:port of the host we contacted, * since the replying party can use a different port (which * we may not be able to contact, whereas we know the targeted * port did cause a reply). */ gnet_host_copy(host, ping->host); } if (ping->callback) { (*ping->callback->cb)(UDP_PING_REPLY, n, ping->callback->data); if (ping->callback->multiple) { ping->callback->got_reply = TRUE; ping->added = tm_time(); /* Delay expiration */ hash_list_append(udp_pings, ping); } else { udp_ping_free(ping); } return UDP_PONG_HANDLED; } udp_ping_free(ping); return UDP_PONG_SOLICITED; } } return UDP_PONG_UNSOLICITED; }
/** * Delay the nagle timer when more data is coming. */ static void deflate_nagle_delay(txdrv_t *tx) { struct attr *attr = tx->opaque; g_assert(attr->flags & DF_NAGLE); g_assert(NULL != attr->tm_ev); g_assert(attr->nagle); /* Nagle is allowed */ /* * We push back the initial delay a little while when more data comes, * hoping that enough will be output so that we end up sending the TX * buffer without having to trigger a flush too soon, since that would * degrade compression performance. * * If too much time elapsed since the Nagle timer started, do not * postpone the flush otherwise we might delay time-sensitive messages. */ if (delta_time(tm_time(), attr->nagle_start) < BUFFER_DELAY) { int delay = cq_remaining(attr->tm_ev); cq_resched(attr->tm_ev, MAX(delay, BUFFER_NAGLE / 2)); } }
void hsep_reset(void) { const GSList *sl; uint i; ZERO(&hsep_global_table); for (sl = node_all_nodes(); sl; sl = g_slist_next(sl)) { struct gnutella_node *n = sl->data; /* also consider unestablished connections here */ if (!(n->attrs & NODE_A_CAN_HSEP)) continue; g_assert(n->hsep); ZERO(&n->hsep->table); ZERO(&n->hsep->sent_table); /* this is what we know before receiving the first message */ for (i = 1; i < G_N_ELEMENTS(hsep_global_table); i++) { n->hsep->table[i][HSEP_IDX_NODES] = 1; hsep_global_table[i][HSEP_IDX_NODES]++; } /* * There's no need to reset the last_sent timestamp. * If we'd do this, hsep_timer() would send a message * to all HSEP connections the next time it is called. */ } hsep_fire_global_table_changed(tm_time()); }
/** * Callback for adns_resolve(), invoked when the resolution is complete. */ static void uhc_host_resolved(const host_addr_t *addrs, size_t n, void *uu_udata) { (void) uu_udata; g_assert(addrs); /* * If resolution failed, try again if possible. */ if (0 == n) { if (GNET_PROPERTY(bootstrap_debug)) g_warning("could not resolve UDP host cache \"%s\"", uhc_ctx.host); uhc_try_next(); return; } if (n > 1) { size_t i; host_addr_t *hav; /* Current UHC was moved to tail by uhc_get_next() */ struct uhc *uhc = hash_list_tail(uhc_list); /* * UHC resolved to multiple endpoints. Could be roundrobbin or * IPv4 and IPv6 addresss. Adding them as seperate entries: if the * IPv6 is unreachable we have an opportunity to skip it. * -- JA 24/7/2011 * * Shuffle the address array before appending them to the UHC list. * --RAM, 2015-10-01 */ hav = HCOPY_ARRAY(addrs, n); SHUFFLE_ARRAY_N(hav, n); for (i = 0; i < n; i++) { const char *host = host_addr_port_to_string(hav[i], uhc_ctx.port); g_debug("BOOT UDP host cache \"%s\" resolved to %s (#%zu)", uhc_ctx.host, host, i + 1); uhc_list_append(host); } hash_list_remove(uhc_list, uhc); /* Replaced by IP address list */ uhc_free(&uhc); /* * We're going to continue and process the first address (in our * shuffled array). Make sure it is put at the end of the list * and marked as being used, mimicing what uhc_get_next() would do. * --RAM, 2015-10-01 */ { struct uhc key; key.host = host_addr_port_to_string(hav[0], uhc_ctx.port); uhc = hash_list_lookup(uhc_list, &key); g_assert(uhc != NULL); /* We added the entry above! */ uhc->stamp = tm_time(); uhc->used++; hash_list_moveto_tail(uhc_list, uhc); } uhc_ctx.addr = hav[0]; /* Struct copy */ HFREE_NULL(hav); } else { uhc_ctx.addr = addrs[0]; } if (GNET_PROPERTY(bootstrap_debug)) g_debug("BOOT UDP host cache \"%s\" resolved to %s", uhc_ctx.host, host_addr_to_string(uhc_ctx.addr)); /* * Now send the ping. */ uhc_send_ping(); }
/** * Periodic host heartbeat timer. */ void host_timer(void) { guint count; int missing; host_addr_t addr; guint16 port; host_type_t htype; guint max_nodes; gboolean empty_cache = FALSE; if (in_shutdown || !GNET_PROPERTY(online_mode)) return; max_nodes = settings_is_leaf() ? GNET_PROPERTY(max_ultrapeers) : GNET_PROPERTY(max_connections); count = node_count(); /* Established + connecting */ missing = node_keep_missing(); if (GNET_PROPERTY(host_debug) > 1) g_debug("host_timer - count %u, missing %u", count, missing); /* * If we are not connected to the Internet, apparently, make sure to * connect to at most one host, to avoid using all our hostcache. * Also, we don't connect each time we are called. */ if (!GNET_PROPERTY(is_inet_connected)) { static time_t last_try; if (last_try && delta_time(tm_time(), last_try) < 20) return; last_try = tm_time(); if (GNET_PROPERTY(host_debug)) g_debug("host_timer - not connected, trying to connect"); } /* * Allow more outgoing connections than the maximum amount of * established Gnet connection we can maintain, but not more * than quick_connect_pool_size This is the "greedy mode". */ if (count >= GNET_PROPERTY(quick_connect_pool_size)) { if (GNET_PROPERTY(host_debug) > 1) g_debug("host_timer - count %u >= pool size %u", count, GNET_PROPERTY(quick_connect_pool_size)); return; } if (count < max_nodes) missing -= whitelist_connect(); /* * If we are under the number of connections wanted, we add hosts * to the connection list */ htype = HOST_ULTRA; if ( settings_is_ultra() && GNET_PROPERTY(node_normal_count) < GNET_PROPERTY(normal_connections) && GNET_PROPERTY(node_ultra_count) >= (GNET_PROPERTY(up_connections) - GNET_PROPERTY(normal_connections)) ) { htype = HOST_ANY; } if (hcache_size(htype) == 0) htype = HOST_ANY; if (hcache_size(htype) == 0) empty_cache = TRUE; if (GNET_PROPERTY(host_debug) && missing > 0) g_debug("host_timer - missing %d host%s%s", missing, missing == 1 ? "" : "s", empty_cache ? " [empty caches]" : ""); if (!GNET_PROPERTY(stop_host_get)) { if (missing > 0) { static time_t last_try; unsigned fan, max_pool, to_add; max_pool = MAX(GNET_PROPERTY(quick_connect_pool_size), max_nodes); fan = (missing * GNET_PROPERTY(quick_connect_pool_size))/ max_pool; fan = MAX(1, fan); to_add = GNET_PROPERTY(is_inet_connected) ? fan : (guint) missing; /* * Every so many calls, attempt to ping all our neighbours to * get fresh pongs, in case our host cache is not containing * sufficiently fresh hosts and we keep getting connection failures. */ if ( 0 == last_try || delta_time(tm_time(), last_try) >= HOST_PINGING_PERIOD ) { ping_all_neighbours(); last_try = tm_time(); } /* * Make sure that we never use more connections then the * quick pool or the maximum number of hosts allow. */ if (to_add + count > max_pool) to_add = max_pool - count; if (GNET_PROPERTY(host_debug) > 2) { g_debug("host_timer - connecting - " "add: %d fan:%d miss:%d max_hosts:%d count:%d extra:%d", to_add, fan, missing, max_nodes, count, GNET_PROPERTY(quick_connect_pool_size)); } missing = to_add; if (missing > 0 && (0 == connected_nodes() || host_low_on_pongs)) { gnet_host_t host[HOST_DHT_MAX]; int hcount; int i; hcount = dht_fill_random(host, MIN(UNSIGNED(missing), G_N_ELEMENTS(host))); missing -= hcount; for (i = 0; i < hcount; i++) { addr = gnet_host_get_addr(&host[i]); port = gnet_host_get_port(&host[i]); if (!hcache_node_is_bad(addr)) { if (GNET_PROPERTY(host_debug) > 3) { g_debug("host_timer - UHC pinging and connecting " "to DHT node at %s", host_addr_port_to_string(addr, port)); } /* Try to use the host as an UHC before connecting */ udp_send_ping(NULL, addr, port, TRUE); if (!host_gnutella_connect(addr, port)) { missing++; /* Did not use entry */ } } else { missing++; /* Did not use entry */ } } } while (hcache_size(htype) && missing-- > 0) { if (hcache_get_caught(htype, &addr, &port)) { if (!(hostiles_check(addr) || hcache_node_is_bad(addr))) { if (!host_gnutella_connect(addr, port)) { missing++; /* Did not use entry */ } } else { missing++; /* Did not use entry */ } } } if (missing > 0 && (empty_cache || host_cache_allow_bypass())) { if (!uhc_is_waiting()) { if (GNET_PROPERTY(host_debug)) g_debug("host_timer - querying UDP host cache"); uhc_get_hosts(); /* Get new hosts from UHCs */ } } } } else if (GNET_PROPERTY(use_netmasks)) { /* Try to find better hosts */ if (hcache_find_nearby(htype, &addr, &port)) { if (node_remove_worst(TRUE)) node_add(addr, port, 0); else hcache_add_caught(htype, addr, port, "nearby host"); } } }