static void print_node_info(struct gnutella_shell *sh, const struct gnutella_node *n) { gnet_node_flags_t flags; time_delta_t up, con; char buf[1024]; char vendor_escaped[50]; char uptime_buf[8]; char contime_buf[8]; g_return_if_fail(sh); g_return_if_fail(n); if (!node_fill_flags(NODE_ID(n), &flags)) return; con = n->connect_date ? delta_time(tm_time(), n->connect_date) : 0; up = n->up_date ? delta_time(tm_time(), n->up_date) : 0; { const char *vendor; char *escaped; vendor = node_vendor(n); escaped = hex_escape(vendor, TRUE); clamp_strcpy(vendor_escaped, sizeof vendor_escaped, escaped); if (escaped != vendor) { HFREE_NULL(escaped); } } clamp_strcpy(uptime_buf, sizeof uptime_buf, up > 0 ? compact_time(up) : "?"); clamp_strcpy(contime_buf, sizeof contime_buf, con > 0 ? compact_time(con) : "?"); gm_snprintf(buf, sizeof buf, "%-21.45s %5.1u %s %2.2s %6.6s %6.6s %.50s", node_addr(n), (unsigned) n->gnet_port, node_flags_to_string(&flags), iso3166_country_cc(n->country), contime_buf, uptime_buf, vendor_escaped); shell_write(sh, buf); shell_write(sh, "\n"); /* Terminate line */ }
/** * DBMW foreach iterator to remove old entries. * @return TRUE if entry must be deleted. */ static gboolean prune_old(gpointer key, gpointer value, size_t u_len, gpointer u_data) { const kuid_t *id = key; const struct lifedata *ld = value; time_delta_t d; gboolean expired; double p; (void) u_len; (void) u_data; d = delta_time(tm_time(), ld->last_seen); if (d <= STABLE_EXPIRE) { expired = FALSE; p = 1.0; } else { p = stable_still_alive_probability(ld->first_seen, ld->last_seen); expired = p < STABLE_PROBA; } if (GNET_PROPERTY(dht_stable_debug) > 4) { g_debug("DHT STABLE node %s life=%s last_seen=%s, p=%.2f%%%s", kuid_to_hex_string(id), compact_time(delta_time(ld->last_seen, ld->first_seen)), compact_time2(d), p * 100.0, expired ? " [EXPIRED]" : ""); } return expired; }
/** * Stringify value of the general stats to buffer. * * @param dst destination buffer * @param size length of destination buffer * @param stats the statistics array * @param idx the index within the general statistics of value to format */ void gnet_stats_gui_general_to_string_buf(char *dst, size_t size, const gnet_stats_t *stats, int idx) { const uint64 value = stats->general[idx]; if (0 == value) g_strlcpy(dst, "-", size); else { switch (idx) { case GNR_QUERY_COMPACT_SIZE: case GNR_IGNORED_DATA: case GNR_SUNK_DATA: case GNR_UDP_READ_AHEAD_BYTES_SUM: case GNR_UDP_READ_AHEAD_BYTES_MAX: case GNR_RUDP_TX_BYTES: case GNR_RUDP_RX_BYTES: g_strlcpy(dst, compact_size(value, show_metric_units()), size); break; case GNR_UDP_READ_AHEAD_DELAY_MAX: g_strlcpy(dst, compact_time(value), size); break; default: uint64_to_string_buf(value, dst, size); } } }
/** * Retrieve cached security token for a given KUID. * * @param id the KUID for which we'd like the security token * @param len_ptr where the length of the security token is written * @param tok_ptr where the address of the security token is written * @param time_ptr where the last update time of token is writen * * @return TRUE if we found a token, with len_ptr and tok_ptr filled with * the information about the length and the token pointer. Information is * returned from a static memory buffer so it must be perused immediately. */ bool tcache_get(const kuid_t *id, uint8 *len_ptr, const void **tok_ptr, time_t *time_ptr) { struct tokdata *td; g_assert(id != NULL); td = get_tokdata(id); if (NULL == td) return FALSE; if (delta_time(tm_time(), td->last_update) > token_life) { delete_tokdata(id); return FALSE; } if (len_ptr != NULL) *len_ptr = td->length; if (tok_ptr != NULL) *tok_ptr = td->token; if (time_ptr != NULL) *time_ptr = td->last_update; if (GNET_PROPERTY(dht_tcache_debug) > 4) { char buf[80]; bin_to_hex_buf(td->token, td->length, buf, sizeof buf); g_debug("DHT TCACHE security token for %s is %u-byte \"%s\" (%s)", kuid_to_hex_string(id), td->length, buf, compact_time(delta_time(tm_time(), td->last_update))); } gnet_stats_inc_general(GNR_DHT_CACHED_TOKENS_HITS); return TRUE; }
/** * Retry publishing after some delay. * * @param pe the entry to publish * @param delay delay in seconds * @param msg if non-NULL, logging message explaining the delay */ static void publisher_retry(struct publisher_entry *pe, int delay, const char *msg) { struct pubdata *pd; publisher_check(pe); g_assert(NULL == pe->publish_ev); g_assert(delay > 0); pd = get_pubdata(pe->sha1); if (pd != NULL) { pd->next_enqueue = time_advance(tm_time(), UNSIGNED(delay)); dbmw_write(db_pubdata, pe->sha1, pd, sizeof *pd); } pe->publish_ev = cq_insert(publish_cq, delay * 1000, handle_entry, pe); pe->last_delayed = tm_time(); if (GNET_PROPERTY(publisher_debug) > 3) { shared_file_t *sf = shared_file_by_sha1(pe->sha1); g_debug("PUBLISHER will retry SHA-1 %s %s\"%s\" in %s: %s", sha1_to_string(pe->sha1), (sf && sf != SHARE_REBUILDING && shared_file_is_partial(sf)) ? "partial " : "", (sf && sf != SHARE_REBUILDING) ? shared_file_name_nfc(sf) : "", compact_time(delay), msg != NULL ? msg : "<no reason>"); shared_file_unref(&sf); } }
/** * Pretty-printing of node information for logs into the supplied buffers. * * IP address is followed by '*' if the contact's address/port was patched. * IP address is followed by '?' if the UDP message came from another IP * * A "zombie" node is a node retrieved from the persisted routing table that * is not alive. Normally, only alive hosts from which we get traffic are * added, but here we have an instance that is not alive -- a zombie. * * A "cached" node is a node coming from the k-closest root cache. * * A firewalled node is indicated by a trailing "fw" indication. * * @return the buffer where printing was done. */ const char * knode_to_string_buf(const knode_t *kn, char buf[], size_t len) { char host_buf[HOST_ADDR_PORT_BUFLEN]; char vc_buf[VENDOR_CODE_BUFLEN]; char kuid_buf[KUID_HEX_BUFLEN]; knode_check(kn); bin_to_hex_buf(kn->id, KUID_RAW_SIZE, kuid_buf, sizeof kuid_buf); host_addr_port_to_string_buf(kn->addr, kn->port, host_buf, sizeof host_buf); vendor_code_to_string_buf(kn->vcode.u32, vc_buf, sizeof vc_buf); str_bprintf(buf, len, "%s%s%s (%s v%u.%u) [%s] \"%s\", ref=%d%s%s%s%s [%s]", host_buf, (kn->flags & KNODE_F_PCONTACT) ? "*" : "", (kn->flags & KNODE_F_FOREIGN_IP) ? "?" : "", vc_buf, kn->major, kn->minor, kuid_buf, knode_status_to_string(kn->status), kn->refcnt, (kn->status != KNODE_UNKNOWN && !(kn->flags & KNODE_F_ALIVE)) ? " zombie" : "", (kn->flags & KNODE_F_CACHED) ? " cached" : "", (kn->flags & KNODE_F_RPC) ? " RPC" : "", (kn->flags & KNODE_F_FIREWALLED) ? " fw" : "", compact_time(delta_time(tm_time(), kn->first_seen))); return buf; }
/** * DBMW foreach iterator to remove old entries. * @return TRUE if entry must be deleted. */ static bool guid_prune_old_entries(void *key, void *value, size_t u_len, void *u_data) { const guid_t *guid = key; const struct guiddata *gd = value; time_delta_t d; double p = 0.0; bool expired; (void) u_len; (void) u_data; /* * We reuse the statistical probability model of DHT nodes to project * whether it makes sense to keep an entry. */ d = delta_time(tm_time(), gd->last_time); if (gd->create_time == gd->last_time) { expired = d > GUID_STABLE_LIFETIME; } else { p = stable_still_alive_probability(gd->create_time, gd->last_time); expired = p < GUID_STABLE_PROBA; } if (GNET_PROPERTY(guid_debug) > 5) { g_debug("GUID cached %s life=%s last_seen=%s, p=%.2f%%%s", guid_hex_str(guid), compact_time(delta_time(gd->last_time, gd->create_time)), compact_time2(d), p * 100.0, expired ? " [EXPIRED]" : ""); } return expired; }
PSLIST_FOREACH(info, sl) { cq_info_t *cqi = sl->data; cq_info_check(cqi); if (THREAD_INVALID_ID == cqi->stid) str_printf(s, "%-2s ", "-"); else str_printf(s, "%-2d ", cqi->stid); str_catf(s, "%-6zu ", cqi->event_count); str_catf(s, "%-4zu ", cqi->periodic_count); str_catf(s, "%-4zu ", cqi->idle_count); str_catf(s, "%-5s ", 0 == cqi->last_idle ? "-" : compact_time(delta_time(tm_time(), cqi->last_idle))); str_catf(s, "%'6d ", cqi->period); str_catf(s, "%10zu ", cqi->heartbeat_count); str_catf(s, "%10zu ", cqi->triggered_count); str_catf(s, "\"%s\"%*s", cqi->name, (int) (maxlen - vstrlen(cqi->name)), ""); if (cqi->parent != NULL) str_catf(s, " (%s)", cqi->parent); str_putc(s, '\n'); shell_write(sh, str_2c(s)); }
/** * Record a SHA1 for publishing. */ void publisher_add(const sha1_t *sha1) { struct publisher_entry *pe; struct pubdata *pd; g_assert(sha1 != NULL); if (NULL == db_pubdata) return; /* Shutdowning */ /* * If already known, ignore silently. */ if (hikset_lookup(publisher_sha1, sha1)) return; /* * Create persistent publishing data if none known already. */ pd = get_pubdata(sha1); if (NULL == pd) { struct pubdata new_pd; new_pd.next_enqueue = 0; new_pd.expiration = 0; dbmw_write(db_pubdata, sha1, &new_pd, sizeof new_pd); if (GNET_PROPERTY(publisher_debug) > 2) { g_debug("PUBLISHER allocating new SHA-1 %s", sha1_to_string(sha1)); } } else { if (GNET_PROPERTY(publisher_debug) > 2) { time_delta_t enqueue = delta_time(pd->next_enqueue, tm_time()); time_delta_t expires = delta_time(pd->expiration, tm_time()); g_debug("PUBLISHER existing SHA-1 %s, next enqueue %s%s, %s%s", sha1_to_string(sha1), enqueue > 0 ? "in " : "", enqueue > 0 ? compact_time(enqueue) : "now", pd->expiration ? (expires > 0 ? "expires in " : "expired") : "not published", expires > 0 ? compact_time2(expires) : ""); } } /* * New entry will be processed immediately. */ pe = publisher_entry_alloc(sha1); hikset_insert_key(publisher_sha1, &pe->sha1); publisher_handle(pe); }
/** * Remove value from a key, discarding the association between the creator ID * and the 64-bit DB key. * * The keys is known to hold the value already. * * @param id the primary key * @param cid the secondary key (creator's ID) * @param dbkey the 64-bit DB key (informational, for assertions) */ void keys_remove_value(const kuid_t *id, const kuid_t *cid, uint64 dbkey) { struct keyinfo *ki; struct keydata *kd; int idx; ki = hikset_lookup(keys, id); g_assert(ki); kd = get_keydata(id); if (NULL == kd) return; g_assert(kd->values); g_assert(kd->values == ki->values); g_assert(kd->values <= MAX_VALUES); idx = lookup_secondary_idx(kd, cid); g_assert(idx >= 0 && idx < kd->values); g_assert(dbkey == kd->dbkeys[idx]); ARRAY_REMOVE(kd->creators, idx, kd->values); ARRAY_REMOVE(kd->dbkeys, idx, kd->values); ARRAY_REMOVE(kd->expire, idx, kd->values); /* * We do not synchronously delete empty keys. * * This lets us optimize the nominal case whereby a key loses all its * values due to a STORE request causing a lifetime check. But the * STORE will precisely insert back another value. * * Hence lazy expiration also gives us the opportunity to further exploit * caching in memory, the keyinfo being held there as a "cached" value. * * Reclaiming of dead keys happens during periodic key load computation. */ kd->values--; ki->values--; /* * Recompute next expiration time. */ ki->next_expire = TIME_T_MAX; for (idx = 0; idx < ki->values; idx++) { ki->next_expire = MIN(ki->next_expire, kd->expire[idx]); } dbmw_write(db_keydata, id, kd, sizeof *kd); if (GNET_PROPERTY(dht_storage_debug) > 2) { g_debug("DHT STORE key %s now holds only %d/%d value%s, expire in %s", kuid_to_hex_string(id), ki->values, MAX_VALUES, plural(ki->values), compact_time(delta_time(ki->next_expire, tm_time()))); } }
/** * Get key status (full and loaded boolean attributes). */ void keys_get_status(const kuid_t *id, bool *full, bool *loaded) { struct keyinfo *ki; time_t now; g_assert(id); g_assert(full); g_assert(loaded); *full = FALSE; *loaded = FALSE; ki = hikset_lookup(keys, id); if (ki == NULL) return; keyinfo_check(ki); if (GNET_PROPERTY(dht_storage_debug) > 1) { g_debug("DHT STORE key %s holds %d/%d value%s, " "load avg: get = %g [%s], store = %g [%s], expire in %s", kuid_to_hex_string(id), ki->values, MAX_VALUES, plural(ki->values), (int) (ki->get_req_load * 100) / 100.0, ki->get_req_load >= LOAD_GET_THRESH ? "LOADED" : "OK", (int) (ki->store_req_load * 100) / 100.0, ki->store_req_load >= LOAD_STO_THRESH ? "LOADED" : "OK", compact_time(delta_time(ki->next_expire, tm_time()))); } if (ki->get_req_load >= LOAD_GET_THRESH) { *loaded = TRUE; } else if (ki->get_requests) { float limit = LOAD_GET_THRESH / LOAD_SMOOTH - (1.0 - LOAD_SMOOTH) / LOAD_SMOOTH * ki->get_req_load; /* * Look whether the current amount of get requests is sufficient to * bring the EMA above the threshold at the next update. */ if (1.0 * ki->get_requests > limit) *loaded = TRUE; } /* * Check whether we reached the expiration time of one of the values held. * Try to expire values before answering. * * NB: even if all the values are collected from the key, deletion of the * `ki' structure will not happen immediately: this is done asynchronously * to avoid disabling a `ki' within a call chain using it. */ now = tm_time(); if (now >= ki->next_expire) { if (!keys_expire_values(ki, now)) return; /* Key info reclaimed */ } if (ki->values >= MAX_VALUES) *full = TRUE; }
/** * Handle a SHA-1 entry, publishing its alt-loc to the DHT if still shared. */ static void publisher_handle(struct publisher_entry *pe) { shared_file_t *sf; bool is_partial = FALSE; int alt_locs; time_delta_t min_uptime; uint32 avg_uptime; publisher_check(pe); g_assert(NULL == pe->publish_ev); sf = shared_file_by_sha1(pe->sha1); /* * Remove SHA1 if no longer shared. */ if (NULL == sf) { fileinfo_t *fi = file_info_by_sha1(pe->sha1); /* * If a partial file has lees than the minimum amount of data for PFSP, * shared_file_by_sha1() will return NULL, hence we need to explicitly * check for existence through file_info_by_sha1() and that the file * still exists. */ if (fi != NULL && file_exists(fi->pathname)) { /* Waiting for more data to be able to share, or PFSP re-enabled */ publisher_retry(pe, PUBLISH_BUSY, "partial file missing"); return; } if (GNET_PROPERTY(publisher_debug)) { g_debug("PUBLISHER SHA-1 %s is no longer shared", sha1_to_string(pe->sha1)); } publisher_entry_free(pe, TRUE); return; } /* * Wait when rebuilding the library. */ if (SHARE_REBUILDING == sf) { publisher_retry(pe, PUBLISH_BUSY, "library being rebuilt"); return; } is_partial = shared_file_is_partial(sf); /* * If the SHA1 is not available, wait. */ if ( !is_partial && (!sha1_hash_available(sf) || !sha1_hash_is_uptodate(sf)) ) { publisher_retry(pe, PUBLISH_BUSY, "SHA-1 of file unknown yet"); goto done; } /* * Look whether this node has a sufficient average uptime. * * We're stricter to publish partial files because we want to favor * publishing of full files in the DHT, and the benefits of publishing * partial entries come only if we're up for a long enough time. * * Since publishing imposes lookup traffic in the DHT, it is not efficient * to have transient nodes publish file sharing information because this * will likely never be useful. */ min_uptime = PUBLISH_TRANSIENT; if (is_partial) min_uptime *= 2; avg_uptime = get_average_servent_uptime(tm_time()); if (avg_uptime < UNSIGNED(min_uptime)) { time_delta_t delay = min_uptime - avg_uptime; delay = MAX(delay, PUBLISH_BUSY); publisher_retry(pe, delay, "minimum average uptime not reached yet"); goto done; } /* * If we are dealing with a file for which we know enough alternate * locations, assume it is popular and do not publish it yet. * * We do not publish the SHA-1 of a partial file for which we know * of at least two alternate locations because the purpose of us publishing * these partial SHA-1s is to attract other PFSP-aware hosts and * recreate a mesh. */ alt_locs = dmesh_count(pe->sha1); is_partial = is_partial && !shared_file_is_finished(sf); if (alt_locs > (is_partial ? PUBLISH_PARTIAL_MAX : PUBLISH_DMESH_MAX)) { if (GNET_PROPERTY(publisher_debug)) { g_debug("PUBLISHER SHA-1 %s %s\"%s\" has %d download mesh " "entr%s, skipped", sha1_to_string(pe->sha1), is_partial ? "partial " : "", shared_file_name_nfc(sf), alt_locs, plural_y(alt_locs)); } publisher_hold(pe, PUBLISH_POPULAR, "popular file"); goto done; } /* * If the DHT is not enabled, postpone processing. */ if (!dht_enabled()) { publisher_hold(pe, PUBLISH_BUSY, "DHT disabled"); goto done; } /* * If this is a partial file for which we have less than the minimum * for PFSP sharing, or if PFSP has been disabled, skip it. */ if (shared_file_is_partial(sf)) { fileinfo_t *fi = shared_file_fileinfo(sf); if ( !file_info_partial_shareable(fi) || fi->done < GNET_PROPERTY(pfsp_minimum_filesize) ) { publisher_hold(pe, PUBLISH_BUSY, "PFSP minima not reached"); goto done; } } /* * Check whether it is time to process the entry, in case we're * restarting quickly after a shutdown. */ if (0 == pe->last_publish) { struct pubdata *pd = get_pubdata(pe->sha1); if (pd != NULL) { time_t now = tm_time(); time_delta_t enqueue = delta_time(pd->next_enqueue, now); time_delta_t expire = delta_time(pd->expiration, now); if (enqueue > 0 && (0 == pd->expiration || expire > 0)) { int delay = MIN(enqueue, PUBLISH_POPULAR); if (pd->expiration != 0) delay = MIN(delay, expire); if (GNET_PROPERTY(publisher_debug) > 1) { g_debug("PUBLISHER SHA-1 %s delayed by %s", sha1_to_string(pe->sha1), compact_time(enqueue)); } publisher_retry(pe, delay, "first-time delay"); goto done; } } } /* * Cancel possible remaining backgrounded publishing. */ if (pe->backgrounded) { pdht_cancel_file(pe->sha1, FALSE); pe->backgrounded = FALSE; } /* * OK, we can publish this alternate location. */ if (pe->last_publish) { if (GNET_PROPERTY(publisher_debug) > 2) { g_debug("PUBLISHER SHA-1 %s re-enqueued %d secs " "after last publish", sha1_to_string(pe->sha1), (int) delta_time(tm_time(), pe->last_publish)); } } pe->last_enqueued = tm_time(); pdht_publish_file(sf, publisher_done, pe); /* FALL THROUGH */ done: shared_file_unref(&sf); }
/** * Publishing callback invoked when asynchronous publication is completed, * or ended with an error. * * @return TRUE if we accept the publishing, FALSE otherwise to get the * publishing layer to continue attempts to failed STORE roots and report * on progress using the same callback. */ static bool publisher_done(void *arg, pdht_error_t code, const pdht_info_t *info) { struct publisher_entry *pe = arg; struct pubdata *pd; int delay = PUBLISH_BUSY; bool expired = FALSE; bool accepted = TRUE; publisher_check(pe); pd = get_pubdata(pe->sha1); /* * Update stats on republishing before value expiration. */ if (PDHT_E_OK == code) { if (pe->last_publish && info->roots > 0) { if (pd != NULL) { if (pd->expiration && delta_time(tm_time(), pd->expiration) > 0) expired = TRUE; } else { time_delta_t elapsed = delta_time(tm_time(), pe->last_publish); if (elapsed > DHT_VALUE_ALOC_EXPIRE) expired = TRUE; } if (expired) gnet_stats_inc_general(GNR_DHT_REPUBLISHED_LATE); } } /* * Compute retry delay. */ switch (code) { case PDHT_E_OK: /* * If we were not able to publish to KDA_K nodes, decrease the * delay before republishing. We use a non-linear decimation of * the republish time, as a function of the number of nodes to which * we could publish. */ delay = publisher_delay(info, DHT_VALUE_ALOC_EXPIRE); accepted = publisher_is_acceptable(info); break; case PDHT_E_POPULAR: /* * Compute the suitable delay: the first time, we use PUBLISH_POPULAR, * and then we double each time until we reach PUBLISH_POPULAR_MAX. * * If we already tried to publish the entry, pe->last_delayed will * be non-zero. */ if (0 != pe->last_delayed) { time_delta_t elapsed = delta_time(tm_time(), pe->last_delayed); if (elapsed < PUBLISH_POPULAR) { delay = PUBLISH_POPULAR; } else if (elapsed >= PUBLISH_POPULAR_MAX / 2) { delay = PUBLISH_POPULAR_MAX; } else { delay = elapsed * 2; } } else { delay = PUBLISH_POPULAR; } break; case PDHT_E_NOT_SHARED: case PDHT_E_LOOKUP_EXPIRED: case PDHT_E_LOOKUP: case PDHT_E_UDP_CLOGGED: case PDHT_E_PUBLISH_EXPIRED: case PDHT_E_PUBLISH_ERROR: case PDHT_E_SHA1: case PDHT_E_PENDING: case PDHT_E_CANCELLED: case PDHT_E_GGEP: case PDHT_E_NONE: delay = PUBLISH_BUSY; break; case PDHT_E_MAX: g_assert_not_reached(); } /* * For a backgrounded entry publishing, we need to adjust the computed * delay with the time that was elapsed */ g_assert(!pe->backgrounded == !(pe->publish_ev != NULL)); if (pe->backgrounded) { time_delta_t elapsed = delta_time(tm_time(), pe->last_delayed); g_assert(pe->last_delayed > 0); cq_cancel(&pe->publish_ev); if (delay > elapsed) { delay -= elapsed; } else { delay = 1; } } /* * Logging. */ if (GNET_PROPERTY(publisher_debug) > 1) { shared_file_t *sf = shared_file_by_sha1(pe->sha1); char retry[80]; char after[80]; const char *late = ""; after[0] = '\0'; if (pe->last_publish) { time_delta_t elapsed = delta_time(tm_time(), pe->last_publish); str_bprintf(after, sizeof after, " after %s", compact_time(elapsed)); if (pd != NULL) { if (expired) late = "late, "; } else { late = "no data, "; } } str_bprintf(retry, sizeof retry, "%s", compact_time(delay)); g_debug("PUBLISHER SHA-1 %s %s%s\"%s\" %spublished to %u node%s%s: %s" " (%stook %s, total %u node%s, proba %.3f%%, retry in %s," " %s bg, path %u) [%s]", sha1_to_string(pe->sha1), pe->backgrounded ? "[bg] " : "", (sf && sf != SHARE_REBUILDING && shared_file_is_partial(sf)) ? "partial " : "", (sf && sf != SHARE_REBUILDING) ? shared_file_name_nfc(sf) : "", pe->last_publish ? "re" : "", info->roots, plural(info->roots), after, pdht_strerror(code), late, compact_time(delta_time(tm_time(), pe->last_enqueued)), info->all_roots, plural(info->all_roots), info->presence * 100.0, retry, info->can_bg ? "can" : "no", info->path_len, accepted ? "OK" : "INCOMPLETE"); shared_file_unref(&sf); } /* * Update last publishing time and remember expiration time. */ if (PDHT_E_OK == code && info->roots > 0) { pe->last_publish = tm_time(); if (pd != NULL) { pd->expiration = time_advance(pe->last_publish, DHT_VALUE_ALOC_EXPIRE); dbmw_write(db_pubdata, pe->sha1, pd, sizeof *pd); } } /* * If entry was deemed popular, we're going to delay its republishing * by a larger amount of time and any data we published already about * it will surely expire. Since this is our decision, we do not want * to be told that republishing, if it occurs again, was done later than * required. Hence call publisher_hold() to mark that we don't care. */ if (PDHT_E_POPULAR == code) publisher_hold(pe, delay, "popular entry"); else publisher_retry(pe, delay, accepted ? "accepted publish" : "published"); pe->backgrounded = !accepted; return accepted; }