/** * The KUID of the node has changed: remove its entry if it had one and make * sure we have an entry for the new KUID. * * @param kn the old node * @param rn the replacing node */ void stable_replace(const knode_t *kn, const knode_t *rn) { struct lifedata *ld; knode_check(kn); knode_check(rn); g_assert(rn->flags & KNODE_F_ALIVE); ld = get_lifedata(kn->id); if (NULL == ld) return; /* Node was not recorded in the "stable" set */ if (GNET_PROPERTY(dht_stable_debug)) { g_debug("DHT STABLE removing obsolete %s, now at %s", knode_to_string(kn), knode_to_string2(rn)); } /* * Remove the old node and create an entry for the new one. */ dbmw_delete(db_lifedata, kn->id->v); gnet_stats_dec_general(GNR_DHT_STABLE_NODES_HELD); stable_record_activity(rn); }
/** * Reclaim memory used by Kademlia node. */ static void knode_dispose(knode_t *kn) { g_assert(kn); g_assert(KNODE_MAGIC == kn->magic); g_assert(0 == kn->refcnt); /* * If the status is not KNODE_UNKNOWN, then the node is still held in * the routing table and therefore must not be freed. If it is, then * it means someone has forgotten to knode_refcnt_inc() somewhere... */ if (kn->status != KNODE_UNKNOWN) { kn->refcnt++; /* Revitalize for knode_to_string() assertions */ g_error("attempting to free node still held in routing table: %s", knode_to_string(kn)); g_assert_not_reached(); } kuid_atom_free_null(&kn->id); kn->magic = 0; WFREE(kn); }
/* * Offload keys to remote node, as appropriate. * * Firstly we only consider remote nodes whose KUID falls within our k-ball. * * Secondly, we are only considering remote nodes that end-up being in our * routing table (i.e. ones which are close enough to us to get room in the * table, which also means they're not firewalled nor going to shutdown soon). * This is normally ensured by our caller. * * Thirdly, we are only going to consider keys closer to the node than we are * and for which we are the closest among our k-closest nodes, to avoid too * many redundant STORE operations. */ void keys_offload(const knode_t *kn) { struct offload_context ctx; unsigned n; knode_t *kclosest[KDA_K]; /* Our known k-closest nodes */ bool debug; knode_check(kn); if (kn->flags & (KNODE_F_FIREWALLED | KNODE_F_SHUTDOWNING)) return; if ( !dht_bootstrapped() || /* Not bootstrapped */ !keys_within_kball(kn->id) || /* Node KUID outside our k-ball */ 0 == hikset_count(keys) /* No keys held */ ) return; debug = GNET_PROPERTY(dht_storage_debug) > 1 || GNET_PROPERTY(dht_publish_debug) > 1; if (debug) g_debug("DHT preparing key offloading to %s", knode_to_string(kn)); gnet_stats_inc_general(GNR_DHT_KEY_OFFLOADING_CHECKS); ctx.our_kuid = get_our_kuid(); ctx.remote_kuid = kn->id; ctx.found = NULL; ctx.count = 0; /* * We need to have KDA_K closest known alive neighbours in order to * be able to select proper keys to offload. * * Note that we make sure to NOT include the new node in our k-closest set * since it would always be closer than ourselves to keys we wish to * offload to it... */ n = dht_fill_closest(ctx.our_kuid, kclosest, G_N_ELEMENTS(kclosest), ctx.remote_kuid, TRUE); if (n < G_N_ELEMENTS(kclosest)) { if (debug) g_warning("DHT got only %u closest alive nodes, cannot offload", n); return; } /* * Prepare a PATRICIA containing the ID of our k-closest alive nodes * plus ourselves. */ ctx.kclosest = patricia_create(KUID_RAW_BITSIZE); for (n = 0; n < G_N_ELEMENTS(kclosest); n++) { patricia_insert(ctx.kclosest, kclosest[n]->id, kclosest[n]->id); } patricia_insert(ctx.kclosest, ctx.our_kuid, ctx.our_kuid); /* * Select offloading candidate keys. */ hikset_foreach(keys, keys_offload_prepare, &ctx); patricia_destroy(ctx.kclosest); if (debug) { g_debug("DHT found %u/%zu offloading candidate%s", ctx.count, hikset_count(keys), plural(ctx.count)); } if (ctx.count) publish_offload(kn, ctx.found); pslist_free_null(&ctx.found); }
/** * Update k-ball information. */ void keys_update_kball(void) { kuid_t *our_kuid = get_our_kuid(); knode_t **kvec; int kcnt; patricia_t *pt; int i; WALLOC_ARRAY(kvec, KDA_K); kcnt = dht_fill_closest(our_kuid, kvec, KDA_K, NULL, TRUE); kball.seeded = TRUE; /* * If we know of no alive nodes yet, request any node we have in the * routing table, even "zombies". If we get less than KDA_K of these, * we definitively know not enough about the DHT structure yet! */ if (0 == kcnt) { kcnt = dht_fill_closest(our_kuid, kvec, KDA_K, NULL, FALSE); if (kcnt < KDA_K) kball.seeded = FALSE; } pt = patricia_create(KUID_RAW_BITSIZE); for (i = 0; i < kcnt; i++) { knode_t *kn = kvec[i]; patricia_insert(pt, kn->id, kn); } if (patricia_count(pt)) { knode_t *furthest = patricia_furthest(pt, our_kuid); knode_t *closest = patricia_closest(pt, our_kuid); size_t fbits; size_t cbits; kuid_atom_change(&kball.furthest, furthest->id); kuid_atom_change(&kball.closest, closest->id); fbits = kuid_common_prefix(kball.furthest, our_kuid); cbits = kuid_common_prefix(kball.closest, our_kuid); g_assert(fbits <= cbits); g_assert(cbits <= KUID_RAW_BITSIZE); if (GNET_PROPERTY(dht_debug)) { uint8 width = cbits - fbits; g_debug("DHT %sk-ball %s %u bit%s (was %u-bit wide)", kball.seeded ? "" : "(not seeded yet) ", width == kball.width ? "remained at" : width > kball.width ? "expanded to" : "shrunk to", width, plural(width), kball.width); g_debug("DHT k-ball closest (%zu common bit%s) is %s", cbits, plural(cbits), knode_to_string(closest)); g_debug("DHT k-ball furthest (%zu common bit%s) is %s", fbits, plural(fbits), knode_to_string(furthest)); } STATIC_ASSERT(KUID_RAW_BITSIZE < 256); kball.furthest_bits = fbits & 0xff; kball.closest_bits = cbits & 0xff; kball.width = (cbits - fbits) & 0xff; kball.theoretical_bits = dht_get_kball_furthest() & 0xff; gnet_stats_set_general(GNR_DHT_KBALL_FURTHEST, kball.furthest_bits); gnet_stats_set_general(GNR_DHT_KBALL_CLOSEST, kball.closest_bits); } WFREE_ARRAY(kvec, KDA_K); patricia_destroy(pt); }
/** * RPC callback. * * @param type DHT_RPC_REPLY or DHT_RPC_TIMEOUT * @param kn the replying node * @param function the type of message we got (0 on TIMEOUT) * @param payload the payload we got * @param len the length of the payload * @param arg user-defined callback parameter */ static void revent_rpc_cb( enum dht_rpc_ret type, const knode_t *kn, const gnutella_node_t *unused_n, kda_msg_t function, const char *payload, size_t len, void *arg) { struct revent_rpc_info *rpi = arg; struct revent_ops *ops; void *obj; (void) unused_n; knode_check(kn); rpi_check(rpi); ops = rpi->ops; /* * It is possible that whilst the RPC was in transit, the operation was * terminated. Therefore, we need to ensure that the recorded user is * still alive. */ obj = (*ops->is_alive)(rpi->rid); if (NULL == obj) { if (*ops->debug > 2) g_debug("DHT %s[%s] late RPC %s from %s", ops->name, nid_to_string(&rpi->rid), type == DHT_RPC_TIMEOUT ? "timeout" : "reply", knode_to_string(kn)); goto cleanup; } /* * Let them know we're about to handle the RPC. */ if (*ops->debug > 2) g_debug("DHT %s[%s] handling %s for RPC issued %s%u to %s", ops->name, nid_to_string(&rpi->rid), type == DHT_RPC_TIMEOUT ? "timeout" : "reply", ops->udata_name, rpi->udata, knode_to_string(kn)); if (ops->handling_rpc) (*ops->handling_rpc)(obj, type, kn, rpi->udata); /* * Handle reply. */ if (type == DHT_RPC_TIMEOUT) { if (rpi->pmi != NULL) /* Message not processed by UDP queue yet */ rpi->pmi->rpc_done = TRUE; } else { g_assert(NULL == rpi->pmi); /* Since message has been sent */ if (!(*ops->handle_reply)(obj, kn, function, payload, len, rpi->udata)) goto cleanup; } /* * Allow next iteration to proceed. */ if (ops->iterate) (*ops->iterate)(obj, type, rpi->udata); cleanup: revent_rpi_free(rpi); }
/** * Free routine for our extended message blocks. */ static void revent_pmsg_free(pmsg_t *mb, void *arg) { struct revent_pmsg_info *pmi = arg; struct revent_ops *ops; void *obj; pmi_check(pmi); g_assert(pmsg_is_extended(mb)); ops = pmi->ops; /* * It is possible that whilst the message was in the message queue, * the operation was terminated. Therefore, we need to ensure that the * recorded user is still alive. */ obj = (*ops->is_alive)(pmi->rid); if (NULL == obj) { if (*ops->debug > 2) g_debug("DHT %s[%s] late UDP message %s", ops->name, nid_to_string(&pmi->rid), pmsg_was_sent(mb) ? "sending" : "dropping"); goto cleanup; } /* * Signal message freeing, so that user structure can decrement the * amount of pending messsages if necessary. */ if (ops->freeing_msg) (*ops->freeing_msg)(obj); /* * If the RPC callback triggered before the UDP message queue could * process the message on the way out, then we don't need to do anything * as the RPC is already dead and has been processed as such... */ if (pmi->rpc_done) goto cleanup; pmi->rpi->pmi = NULL; /* Break x-ref as message was processed */ if (pmsg_was_sent(mb)) { knode_t *kn = pmi->kn; /* * Message was successfully sent from the queue. */ kn->last_sent = tm_time(); if (ops->msg_sent) (*ops->msg_sent)(obj, mb); if (*ops->debug > 4) g_debug("DHT %s[%s] sent %s (%d bytes) to %s, RTT=%u", ops->name, nid_to_string(&pmi->rid), kmsg_infostr(pmsg_phys_base(mb)), pmsg_written_size(mb), knode_to_string(kn), kn->rtt); } else { knode_t *kn = pmi->kn; guid_t *muid; if (*ops->debug > 2) g_debug("DHT %s[%s] message %s%u to %s dropped by UDP queue", ops->name, nid_to_string(&pmi->rid), ops->udata_name, pmi->rpi->udata, knode_to_string(kn)); /* * Message was not sent and dropped by the queue. */ if (ops->msg_dropped) (*ops->msg_dropped)(obj, kn, mb); /* * Cancel the RPC, since the message was never sent out... * The MUID is at the start of the message. */ g_assert(pmsg_written_size(mb) > GUID_RAW_SIZE); muid = cast_to_guid_ptr(pmsg_phys_base(mb)); dht_rpc_cancel(muid); if (ops->rpc_cancelled) (*ops->rpc_cancelled)(obj, pmi->rpi->udata); revent_rpi_free(pmi->rpi); /* Cancel does not invoke RPC callback */ } cleanup: revent_pmi_free(pmi); }