/** * The KUID of the node has changed: remove its entry if it had one and make * sure we have an entry for the new KUID. * * @param kn the old node * @param rn the replacing node */ void stable_replace(const knode_t *kn, const knode_t *rn) { struct lifedata *ld; knode_check(kn); knode_check(rn); g_assert(rn->flags & KNODE_F_ALIVE); ld = get_lifedata(kn->id); if (NULL == ld) return; /* Node was not recorded in the "stable" set */ if (GNET_PROPERTY(dht_stable_debug)) { g_debug("DHT STABLE removing obsolete %s, now at %s", knode_to_string(kn), knode_to_string2(rn)); } /* * Remove the old node and create an entry for the new one. */ dbmw_delete(db_lifedata, kn->id->v); gnet_stats_dec_general(GNR_DHT_STABLE_NODES_HELD); stable_record_activity(rn); }
/** * Equality of knodes. */ int knode_eq(const void *a, const void *b) { const knode_t *k1 = a; const knode_t *k2 = b; knode_check(k1); knode_check(k2); return k1->id == k2->id; /* We know IDs are atoms */ }
/** * Comparison of two knodes based on the last_seen time. */ int knode_seen_cmp(const void *a, const void *b) { const knode_t *k1 = a; const knode_t *k2 = b; knode_check(k1); knode_check(k2); return CMP(k1->last_seen, k2->last_seen); }
/** * Comparison of two knodes based on the last_seen time. */ int knode_seen_cmp(gconstpointer a, gconstpointer b) { const knode_t *k1 = a; const knode_t *k2 = b; knode_check(k1); knode_check(k2); return CMP(k1->last_seen, k2->last_seen); }
/** * Equality of knodes. */ int knode_eq(gconstpointer a, gconstpointer b) { const knode_t *k1 = a; const knode_t *k2 = b; knode_check(k1); knode_check(k2); return k1->id == k2->id; /* We know IDs are atoms */ }
/** * Pretty-printing of node information for logs into the supplied buffers. * * IP address is followed by '*' if the contact's address/port was patched. * IP address is followed by '?' if the UDP message came from another IP * * A "zombie" node is a node retrieved from the persisted routing table that * is not alive. Normally, only alive hosts from which we get traffic are * added, but here we have an instance that is not alive -- a zombie. * * A "cached" node is a node coming from the k-closest root cache. * * A firewalled node is indicated by a trailing "fw" indication. * * @return the buffer where printing was done. */ const char * knode_to_string_buf(const knode_t *kn, char buf[], size_t len) { char host_buf[HOST_ADDR_PORT_BUFLEN]; char vc_buf[VENDOR_CODE_BUFLEN]; char kuid_buf[KUID_HEX_BUFLEN]; knode_check(kn); bin_to_hex_buf(kn->id, KUID_RAW_SIZE, kuid_buf, sizeof kuid_buf); host_addr_port_to_string_buf(kn->addr, kn->port, host_buf, sizeof host_buf); vendor_code_to_string_buf(kn->vcode.u32, vc_buf, sizeof vc_buf); str_bprintf(buf, len, "%s%s%s (%s v%u.%u) [%s] \"%s\", ref=%d%s%s%s%s [%s]", host_buf, (kn->flags & KNODE_F_PCONTACT) ? "*" : "", (kn->flags & KNODE_F_FOREIGN_IP) ? "?" : "", vc_buf, kn->major, kn->minor, kuid_buf, knode_status_to_string(kn->status), kn->refcnt, (kn->status != KNODE_UNKNOWN && !(kn->flags & KNODE_F_ALIVE)) ? " zombie" : "", (kn->flags & KNODE_F_CACHED) ? " cached" : "", (kn->flags & KNODE_F_RPC) ? " RPC" : "", (kn->flags & KNODE_F_FIREWALLED) ? " fw" : "", compact_time(delta_time(tm_time(), kn->first_seen))); return buf; }
/** * Record activity on the node. */ void stable_record_activity(const knode_t *kn) { struct lifedata *ld; struct lifedata new_ld; knode_check(kn); g_assert(kn->flags & KNODE_F_ALIVE); ld = get_lifedata(kn->id); if (NULL == ld) { ld = &new_ld; new_ld.version = LIFEDATA_STRUCT_VERSION; new_ld.first_seen = kn->first_seen; new_ld.last_seen = kn->last_seen; gnet_stats_count_general(GNR_DHT_STABLE_NODES_HELD, +1); } else { if (kn->last_seen <= ld->last_seen) return; ld->last_seen = kn->last_seen; } dbmw_write(db_lifedata, kn->id->v, ld, sizeof *ld); }
/** * Hashing of knodes, */ unsigned int knode_hash(const void *key) { const knode_t *kn = key; knode_check(kn); return kuid_hash(kn->id); }
/** * Hashing of knodes, */ unsigned int knode_hash(gconstpointer key) { const knode_t *kn = key; knode_check(kn); return kuid_hash(kn->id); }
/** * Remove a reference on a Kademlia node, disposing of the structure when * none remain. */ void knode_free(knode_t *kn) { knode_check(kn); g_assert(kn->refcnt > 0); if (--kn->refcnt) return; knode_dispose(kn); }
/** * @return whether host can be kept as a valid contact */ bool knode_is_usable(const knode_t *kn) { knode_check(kn); if (!host_is_valid(kn->addr, kn->port)) return FALSE; if (hostiles_is_bad(kn->addr)) return FALSE; return TRUE; }
/** * @return whether host's address is a valid DHT value creator. */ bool knode_addr_is_usable(const knode_t *kn) { knode_check(kn); if (!host_address_is_usable(kn->addr)) return FALSE; if (hostiles_is_bad(kn->addr)) return FALSE; return TRUE; }
/** * PATRICIA iterator callback to free Kademlia nodes */ void knode_patricia_free(gpointer key, size_t u_kbits, gpointer value, gpointer u_d) { knode_t *kn = value; (void) u_kbits; (void) u_d; knode_check(kn); g_assert(key == kn->id); knode_free(kn); }
/** * PATRICIA iterator callback to free Kademlia nodes */ void knode_patricia_free(void *key, size_t u_kbits, void *value, void *u_d) { knode_t *kn = value; (void) u_kbits; (void) u_d; knode_check(kn); g_assert(key == kn->id); knode_free(kn); }
/** * Change node's version */ void knode_change_version(knode_t *kn, uint8 major, uint8 minor) { knode_check(kn); if (GNET_PROPERTY(dht_debug)) g_warning("DHT node %s at %s changed from v%u.%u to v%u.%u", kuid_to_hex_string(kn->id), host_addr_port_to_string(kn->addr, kn->port), kn->major, kn->minor, major, minor); kn->major = major; kn->minor = minor; }
/** * Can the node which timed-out in the past be considered again as the * target of an RPC, and therefore returned in k-closest lookups? */ bool knode_can_recontact(const knode_t *kn) { time_t grace; time_delta_t elapsed; knode_check(kn); if (!kn->rpc_timeouts) return TRUE; /* Timeout condition was cleared */ grace = 1 << kn->rpc_timeouts; elapsed = delta_time(tm_time(), kn->last_sent); return elapsed > grace; }
/** * Change node's vendor code. */ void knode_change_vendor(knode_t *kn, vendor_code_t vcode) { knode_check(kn); if (GNET_PROPERTY(dht_debug)) { char vc_old[VENDOR_CODE_BUFLEN]; char vc_new[VENDOR_CODE_BUFLEN]; vendor_code_to_string_buf(kn->vcode.u32, vc_old, sizeof vc_old); vendor_code_to_string_buf(vcode.u32, vc_new, sizeof vc_new); g_warning("DHT node %s at %s changed vendor from %s to %s", kuid_to_hex_string(kn->id), host_addr_port_to_string(kn->addr, kn->port), vc_old, vc_new); } kn->vcode = vcode; }
/** * Send a STORE message to specified KUID. * * @param kn the node to contact * @param mb the message block to send * @param id the caller unique ID * @param ops the callback operations to invoke * @param udata opaque argument given to RPC user callbacks */ void revent_store(knode_t *kn, pmsg_t *mb, struct nid id, struct revent_ops *ops, uint32 udata) { struct revent_pmsg_info *pmi; struct revent_rpc_info *rpi; knode_check(kn); g_assert(mb != NULL); g_assert(ops != NULL); /* * Install our own callbacks in order to dispatch the user-supplied * callbacks using the processing logic and order defined by our * message free and RPC callabcks. */ revent_get_pair(id, kn, udata, ops, &pmi, &rpi); dht_rpc_store(kn, mb, revent_rpc_cb, rpi, revent_pmsg_free, pmi); }
/** * Find specified DHT value. * * @param kn the node to contact * @param kuid the KUID of the value to look for * @param type the type of value to look for * @param skeys (optional) array of secondary keys to request * @param scnt amount of entries in the skeys array * @param id the caller unique ID * @param ops the callback operations to invoke * @param udata opaque argument given to RPC user callbacks */ void revent_find_value(knode_t *kn, const kuid_t *kuid, dht_value_type_t type, kuid_t **skeys, int scnt, struct nid id, struct revent_ops *ops, uint32 udata) { struct revent_pmsg_info *pmi; struct revent_rpc_info *rpi; knode_check(kn); g_assert(kuid != NULL); g_assert(ops != NULL); /* * Install our own callbacks in order to dispatch the user-supplied * callbacks using the processing logic and order defined by our * message free and RPC callabcks. */ revent_get_pair(id, kn, udata, ops, &pmi, &rpi); dht_rpc_find_value(kn, kuid, type, skeys, scnt, revent_rpc_cb, rpi, revent_pmsg_free, pmi); }
/** * Convenience routine to compute theoretical probability of presence for * a node, adjusted down when RPC timeouts occurred recently. */ double knode_still_alive_probability(const knode_t *kn) { double p; static bool inited; static double decimation[KNODE_MAX_TIMEOUTS]; knode_check(kn); if (G_UNLIKELY(!inited)) { size_t i; for (i = 0; i < G_N_ELEMENTS(decimation); i++) { decimation[i] = pow(KNODE_ALIVE_DECIMATION, (double) (i + 1)); } inited = TRUE; } p = stable_still_alive_probability(kn->first_seen, kn->last_seen); /* * If RPC timeouts occurred, the theoretical probability is further * adjusted down. The decimation is arbitrary of course, but the * rationale is that an RPC timeout somehow is an information that the * node may not be alive. Of course, it could be an UDP drop, an IP * drop somewhere, but this is why we don't use 0.0 as the decimation! */ if (0 == kn->rpc_timeouts) return p; else { size_t i = MIN(kn->rpc_timeouts, G_N_ELEMENTS(decimation)) - 1; return p * decimation[i]; } }
/** * RPC callback. * * @param type DHT_RPC_REPLY or DHT_RPC_TIMEOUT * @param kn the replying node * @param function the type of message we got (0 on TIMEOUT) * @param payload the payload we got * @param len the length of the payload * @param arg user-defined callback parameter */ static void revent_rpc_cb( enum dht_rpc_ret type, const knode_t *kn, const gnutella_node_t *unused_n, kda_msg_t function, const char *payload, size_t len, void *arg) { struct revent_rpc_info *rpi = arg; struct revent_ops *ops; void *obj; (void) unused_n; knode_check(kn); rpi_check(rpi); ops = rpi->ops; /* * It is possible that whilst the RPC was in transit, the operation was * terminated. Therefore, we need to ensure that the recorded user is * still alive. */ obj = (*ops->is_alive)(rpi->rid); if (NULL == obj) { if (*ops->debug > 2) g_debug("DHT %s[%s] late RPC %s from %s", ops->name, nid_to_string(&rpi->rid), type == DHT_RPC_TIMEOUT ? "timeout" : "reply", knode_to_string(kn)); goto cleanup; } /* * Let them know we're about to handle the RPC. */ if (*ops->debug > 2) g_debug("DHT %s[%s] handling %s for RPC issued %s%u to %s", ops->name, nid_to_string(&rpi->rid), type == DHT_RPC_TIMEOUT ? "timeout" : "reply", ops->udata_name, rpi->udata, knode_to_string(kn)); if (ops->handling_rpc) (*ops->handling_rpc)(obj, type, kn, rpi->udata); /* * Handle reply. */ if (type == DHT_RPC_TIMEOUT) { if (rpi->pmi != NULL) /* Message not processed by UDP queue yet */ rpi->pmi->rpc_done = TRUE; } else { g_assert(NULL == rpi->pmi); /* Since message has been sent */ if (!(*ops->handle_reply)(obj, kn, function, payload, len, rpi->udata)) goto cleanup; } /* * Allow next iteration to proceed. */ if (ops->iterate) (*ops->iterate)(obj, type, rpi->udata); cleanup: revent_rpi_free(rpi); }
/* * Offload keys to remote node, as appropriate. * * Firstly we only consider remote nodes whose KUID falls within our k-ball. * * Secondly, we are only considering remote nodes that end-up being in our * routing table (i.e. ones which are close enough to us to get room in the * table, which also means they're not firewalled nor going to shutdown soon). * This is normally ensured by our caller. * * Thirdly, we are only going to consider keys closer to the node than we are * and for which we are the closest among our k-closest nodes, to avoid too * many redundant STORE operations. */ void keys_offload(const knode_t *kn) { struct offload_context ctx; unsigned n; knode_t *kclosest[KDA_K]; /* Our known k-closest nodes */ bool debug; knode_check(kn); if (kn->flags & (KNODE_F_FIREWALLED | KNODE_F_SHUTDOWNING)) return; if ( !dht_bootstrapped() || /* Not bootstrapped */ !keys_within_kball(kn->id) || /* Node KUID outside our k-ball */ 0 == hikset_count(keys) /* No keys held */ ) return; debug = GNET_PROPERTY(dht_storage_debug) > 1 || GNET_PROPERTY(dht_publish_debug) > 1; if (debug) g_debug("DHT preparing key offloading to %s", knode_to_string(kn)); gnet_stats_inc_general(GNR_DHT_KEY_OFFLOADING_CHECKS); ctx.our_kuid = get_our_kuid(); ctx.remote_kuid = kn->id; ctx.found = NULL; ctx.count = 0; /* * We need to have KDA_K closest known alive neighbours in order to * be able to select proper keys to offload. * * Note that we make sure to NOT include the new node in our k-closest set * since it would always be closer than ourselves to keys we wish to * offload to it... */ n = dht_fill_closest(ctx.our_kuid, kclosest, G_N_ELEMENTS(kclosest), ctx.remote_kuid, TRUE); if (n < G_N_ELEMENTS(kclosest)) { if (debug) g_warning("DHT got only %u closest alive nodes, cannot offload", n); return; } /* * Prepare a PATRICIA containing the ID of our k-closest alive nodes * plus ourselves. */ ctx.kclosest = patricia_create(KUID_RAW_BITSIZE); for (n = 0; n < G_N_ELEMENTS(kclosest); n++) { patricia_insert(ctx.kclosest, kclosest[n]->id, kclosest[n]->id); } patricia_insert(ctx.kclosest, ctx.our_kuid, ctx.our_kuid); /* * Select offloading candidate keys. */ hikset_foreach(keys, keys_offload_prepare, &ctx); patricia_destroy(ctx.kclosest); if (debug) { g_debug("DHT found %u/%zu offloading candidate%s", ctx.count, hikset_count(keys), plural(ctx.count)); } if (ctx.count) publish_offload(kn, ctx.found); pslist_free_null(&ctx.found); }