/** * Move entry to the tail of the list. */ void hash_list_moveto_tail(hash_list_t *hl, const void *key) { struct hash_list_item *item; hash_list_check(hl); g_assert(1 == hl->refcount); g_assert(size_is_positive(elist_count(&hl->list))); item = hikset_lookup(hl->ht, key); g_assert(item != NULL); /* * Remove item from list and insert it back at the tail. */ if (elist_last(&hl->list) != &item->lnk) { elist_link_remove(&hl->list, &item->lnk); elist_link_append(&hl->list, &item->lnk); } hl->stamp++; hash_list_regression(hl); }
/** * Remove `data' from the list but remembers the item's position so that * re-insertion can happen at the same place using the supplied token. * * If no re-insertion is required, the token must be freed with * hash_list_forget_position(). * * @return a token that can be used to re-insert the key at the same * position in the list via hash_list_insert_position(), or NULL if * the data was not found. */ void * hash_list_remove_position(hash_list_t *hl, const void *key) { struct hash_list_item *item; struct hash_list_position *pt; hash_list_check(hl); g_assert(1 == hl->refcount); item = hikset_lookup(hl->ht, key); if (NULL == item) return NULL; /* * Record position in the list so that re-insertion can happen after * the predecessor of the item. For sanity checks, we save the hash_list_t * object as well to make sure items are re-inserted in the proper list! * * No unsafe update (moving / deletion) must happen between the removal and * the re-insertion, and this is checked by the saved stamp. */ WALLOC(pt); pt->magic = HASH_LIST_POSITION_MAGIC; pt->hl = hl; pt->prev = elist_prev(&item->lnk); pt->stamp = hl->stamp; hash_list_remove_item(hl, item); return pt; }
/** * Check whether key already holds data from the creator. * * @param id the primary key * @param cid the secondary key (creator's id) * @param store whether to increment the store request count * * @return 64-bit DB key for the value if it does, 0 if key either does not * exist yet or does not hold data from the creator. */ uint64 keys_has(const kuid_t *id, const kuid_t *cid, bool store) { struct keyinfo *ki; struct keydata *kd; uint64 dbkey; ki = hikset_lookup(keys, id); if (ki == NULL) return 0; if (store) ki->store_requests++; kd = get_keydata(id); if (kd == NULL) return 0; g_assert(ki->values == kd->values); dbkey = lookup_secondary(kd, cid); if (GNET_PROPERTY(dht_storage_debug) > 15) { g_debug("DHT lookup secondary for %s/%s => dbkey %s", kuid_to_hex_string(id), kuid_to_hex_string2(cid), uint64_to_string(dbkey)); } return dbkey; }
bool huge_update_hashes(shared_file_t *sf, const struct sha1 *sha1, const struct tth *tth) { struct sha1_cache_entry *cached; filestat_t sb; shared_file_check(sf); g_return_val_if_fail(sha1, FALSE); /* * Make sure the file's timestamp is still accurate. */ if (-1 == stat(shared_file_path(sf), &sb)) { g_warning("discarding SHA1 for file \"%s\": can't stat(): %m", shared_file_path(sf)); shared_file_remove(sf); return TRUE; } if (sb.st_mtime != shared_file_modification_time(sf)) { g_warning("file \"%s\" was modified whilst SHA1 was computed", shared_file_path(sf)); shared_file_set_modification_time(sf, sb.st_mtime); request_sha1(sf); /* Retry! */ return TRUE; } if (huge_spam_check(sf, sha1)) { shared_file_remove(sf); return FALSE; } shared_file_set_sha1(sf, sha1); shared_file_set_tth(sf, tth); /* Update cache */ cached = hikset_lookup(sha1_cache, shared_file_path(sf)); if (cached) { update_volatile_cache(cached, shared_file_size(sf), shared_file_modification_time(sf), sha1, tth); cache_dirty = TRUE; /* Dump the cache at most about once per minute. */ if (!cache_dumped || delta_time(tm_time(), cache_dumped) > 60) { dump_cache(FALSE); } } else { add_volatile_cache_entry(shared_file_path(sf), shared_file_size(sf), shared_file_modification_time(sf), sha1, tth, TRUE); add_persistent_cache_entry(shared_file_path(sf), shared_file_size(sf), shared_file_modification_time(sf), sha1, tth); } return TRUE; }
/** * @return whether key is "store-loaded", i.e. if we are getting too many * STORE requests for it. */ bool keys_is_store_loaded(const kuid_t *id) { struct keyinfo *ki; g_assert(id); ki = hikset_lookup(keys, id); if (ki == NULL) return FALSE; if (ki->store_req_load >= LOAD_STO_THRESH) return TRUE; /* * Look whether the current amount of store requests is sufficient to * bring the EMA above the threshold at the next update. */ if (ki->store_requests) { float limit = LOAD_STO_THRESH / LOAD_SMOOTH - (1.0 - LOAD_SMOOTH) / LOAD_SMOOTH * ki->store_req_load; if (1.0 * ki->store_requests > limit) return TRUE; } return FALSE; }
/** * External interface to call for getting the hash for a shared_file. */ void request_sha1(shared_file_t *sf) { struct sha1_cache_entry *cached; shared_file_check(sf); if (!shared_file_indexed(sf)) return; /* "stale" shared file, has been superseded or removed */ cached = hikset_lookup(sha1_cache, shared_file_path(sf)); if (cached && cached_entry_up_to_date(cached, sf)) { cache_dirty = TRUE; cached->shared = TRUE; shared_file_set_sha1(sf, cached->sha1); shared_file_set_tth(sf, cached->tth); request_tigertree(sf, NULL == cached->tth); } else { if (GNET_PROPERTY(share_debug) > 1) { if (cached) g_debug("cached SHA1 entry for \"%s\" outdated: " "had mtime %lu, now %lu", shared_file_path(sf), (ulong) cached->mtime, (ulong) shared_file_modification_time(sf)); else g_debug("queuing \"%s\" for SHA1 computation", shared_file_path(sf)); } queue_shared_file_for_sha1_computation(sf); } }
/** * Record a SHA1 for publishing. */ void publisher_add(const sha1_t *sha1) { struct publisher_entry *pe; struct pubdata *pd; g_assert(sha1 != NULL); if (NULL == db_pubdata) return; /* Shutdowning */ /* * If already known, ignore silently. */ if (hikset_lookup(publisher_sha1, sha1)) return; /* * Create persistent publishing data if none known already. */ pd = get_pubdata(sha1); if (NULL == pd) { struct pubdata new_pd; new_pd.next_enqueue = 0; new_pd.expiration = 0; dbmw_write(db_pubdata, sha1, &new_pd, sizeof new_pd); if (GNET_PROPERTY(publisher_debug) > 2) { g_debug("PUBLISHER allocating new SHA-1 %s", sha1_to_string(sha1)); } } else { if (GNET_PROPERTY(publisher_debug) > 2) { time_delta_t enqueue = delta_time(pd->next_enqueue, tm_time()); time_delta_t expires = delta_time(pd->expiration, tm_time()); g_debug("PUBLISHER existing SHA-1 %s, next enqueue %s%s, %s%s", sha1_to_string(sha1), enqueue > 0 ? "in " : "", enqueue > 0 ? compact_time(enqueue) : "now", pd->expiration ? (expires > 0 ? "expires in " : "expired") : "not published", expires > 0 ? compact_time2(expires) : ""); } } /* * New entry will be processed immediately. */ pe = publisher_entry_alloc(sha1); hikset_insert_key(publisher_sha1, &pe->sha1); publisher_handle(pe); }
/** * External interface to check whether the sha1 for shared_file is known. */ bool sha1_is_cached(const shared_file_t *sf) { const struct sha1_cache_entry *cached; cached = hikset_lookup(sha1_cache, shared_file_path(sf)); return cached && cached_entry_up_to_date(cached, sf); }
/** * Remove `data' from the list. * * @return the data that was associated with the given key. */ void * hash_list_remove(hash_list_t *hl, const void *key) { struct hash_list_item *item; hash_list_check(hl); g_assert(1 == hl->refcount); item = hikset_lookup(hl->ht, key); return item ? hash_list_remove_item(hl, item) : NULL; }
/** * Get the item before a given key. */ void * hash_list_previous(hash_list_t *hl, const void *key) { struct hash_list_item *item; hash_list_check(hl); item = hikset_lookup(hl->ht, key); item = item ? elist_data(&hl->list, elist_prev(&item->lnk)) : NULL; return item ? deconstify_pointer(item->key) : NULL; }
/** * Cancel monitoring of specified file. */ void watcher_unregister(const char *filename) { struct monitored *m; m = hikset_lookup(monitored, filename); g_assert(m != NULL); hikset_remove(monitored, m->filename); watcher_free(m); }
/** * Find key in hashlist. If ``orig_key_ptr'' is not NULL and the key * exists, a pointer to the stored key is written into it. * * @return TRUE if the key is present. */ bool hash_list_find(hash_list_t *hl, const void *key, const void **orig_key_ptr) { struct hash_list_item *item; hash_list_check(hl); item = hikset_lookup(hl->ht, key); if (item && orig_key_ptr) { *orig_key_ptr = item->key; } return NULL != item; }
/** * Find an existing file object associated with the given pathname * for the given access mode. * * @return If no file object with the given pathname is found NULL * is returned. */ static struct file_object * file_object_find(const char * const pathname, int accmode) { struct file_object *fo; g_return_val_if_fail(ht_file_objects_rdonly, NULL); g_return_val_if_fail(ht_file_objects_wronly, NULL); g_return_val_if_fail(ht_file_objects_rdwr, NULL); g_return_val_if_fail(pathname, NULL); g_return_val_if_fail(is_absolute_path(pathname), NULL); fo = hikset_lookup(file_object_mode_get_table(O_RDWR), pathname); /* * We need to find a more specific file object if looking for O_WRONLY * or O_RDONLY ones. */ if (O_RDWR != accmode) { struct file_object *xfo; xfo = hikset_lookup(file_object_mode_get_table(accmode), pathname); if (xfo != NULL) { g_assert(xfo->accmode == accmode); fo = xfo; } } if (fo) { file_object_check(fo); g_assert(is_valid_fd(fo->fd)); g_assert(0 == strcmp(pathname, fo->pathname)); g_assert(accmode_is_valid(fo->fd, accmode)); g_assert(!fo->removed); } return fo; }
/** * Lookup value in table. */ void * aging_lookup(const aging_table_t *ag, const void *key) { struct aging_value *aval; void *data; aging_check(ag); aging_synchronize(ag); aval = hikset_lookup(ag->table, key); data = aval == NULL ? NULL : aval->value; aging_return(ag, data); }
/** * Return entry age in seconds, (time_delta_t) -1 if not found. */ time_delta_t aging_age(const aging_table_t *ag, const void *key) { struct aging_value *aval; time_delta_t age; aging_check(ag); aging_synchronize(ag); aval = hikset_lookup(ag->table, key); age = aval == NULL ? (time_delta_t) -1 : delta_time(tm_time(), aval->last_insert); aging_return(ag, age); }
/** * Generate a new GUID atom that is not already conflicting with any other * GUID recorded in the supplied hikset (hash set with values pointing to * the GUID key). * * @attention * It is up to the caller to later insert the value referencing this GUID in * the hikset to prevent further duplicates. To avoid race conditions between * the checking of the hiset and the insertion, the hikset should be locked * if it is shared by multiple threads. * * @param hik the hikset against which we need to check for duplicates * @param gtkg whether to flag the GUID as being generated by GTKG. * * @return a new unique GUID atom. */ const guid_t * guid_unique_atom(const hikset_t *hik, bool gtkg) { int i; guid_t guid; entropy_harvest_time(); for (i = 0; i < 100; i++) { guid_random_fill(&guid); if (gtkg) guid_flag_gtkg(&guid); /* Mark as being from GTKG */ if (NULL == hikset_lookup(hik, &guid)) return atom_guid_get(&guid); } g_error("%s(): no luck with random number generator", G_STRFUNC); }
/** * A value held under the key was updated and has a new expiration time. * * @param id the primary key (existing already) * @param cid the secondary key (creator's ID) * @param expire expiration time for the value */ void keys_update_value(const kuid_t *id, const kuid_t *cid, time_t expire) { struct keyinfo *ki; struct keydata *kd; ki = hikset_lookup(keys, id); g_assert(ki != NULL); ki->next_expire = MIN(ki->next_expire, expire); kd = get_keydata(id); if (kd != NULL) { int low = 0, high = ki->values - 1; bool found = FALSE; while (low <= high) { int mid = low + (high - low) / 2; int c; g_assert(mid >= 0 && mid < ki->values); c = kuid_cmp(&kd->creators[mid], cid); if (0 == c) { kd->expire[mid] = expire; found = TRUE; break; } else if (c < 0) { low = mid + 1; } else { high = mid - 1; } } if (found) { dbmw_write(db_keydata, id, kd, sizeof *kd); } else if (GNET_PROPERTY(dht_keys_debug)) { g_warning("DHT KEYS %s(): creator %s not found under %s", G_STRFUNC, kuid_to_hex_string(cid), kuid_to_hex_string2(id)); } } }
/** * Look whether we still need to compute the SHA1 of the given shared file * by looking into our in-core cache to see whether the entry we have is * up-to-date. * * @param sf the shared file for which we want to compute the SHA1 * * @return TRUE if the file need SHA1 recomputation. */ static bool huge_need_sha1(shared_file_t *sf) { struct sha1_cache_entry *cached; shared_file_check(sf); /* * After a rescan, there might be files in the queue which are * no longer shared. */ if (!shared_file_indexed(sf)) return FALSE; if G_UNLIKELY(NULL == sha1_cache) return FALSE; /* Shutdown occurred (processing TEQ event?) */ cached = hikset_lookup(sha1_cache, shared_file_path(sf)); if (cached != NULL) { filestat_t sb; if (-1 == stat(shared_file_path(sf), &sb)) { g_warning("ignoring SHA1 recomputation request for \"%s\": %m", shared_file_path(sf)); return FALSE; } if ( cached->size + (fileoffset_t) 0 == sb.st_size + (filesize_t) 0 && cached->mtime == sb.st_mtime ) { if (GNET_PROPERTY(share_debug) > 1) { g_warning("ignoring duplicate SHA1 work for \"%s\"", shared_file_path(sf)); } return FALSE; } } return TRUE; }
/** * Lookup value in table, and if found, revitalize entry, restoring the * initial lifetime the key/value pair had at insertion time. */ void * aging_lookup_revitalise(aging_table_t *ag, const void *key) { struct aging_value *aval; void *data; aging_check(ag); aging_synchronize(ag); aval = hikset_lookup(ag->table, key); if (aval != NULL) { aval->last_insert = tm_time(); elist_moveto_tail(&ag->list, aval); } data = NULL == aval ? NULL : aval->value; aging_return(ag, data); }
static void file_object_free(struct file_object * const fo) { g_return_if_fail(fo); file_object_check(fo); g_return_if_fail(1 == fo->ref_count); if (fo->removed) { const struct file_object *xfo; xfo = hikset_lookup(file_object_mode_get_table(fo->accmode), fo->pathname); g_assert(xfo != fo); } else { file_object_remove(fo); } fd_close(&fo->fd); atom_str_free_null(&fo->pathname); fo->magic = 0; WFREE(fo); }
/** * Looks for ``hostname'' in ``cache'' wrt to cache->timeout. If * ``hostname'' is not found or the entry is expired, FALSE will be * returned. Expired entries will be removed! ``addr'' is allowed to * be NULL, otherwise the cached IP will be stored into the variable * ``addr'' points to. * * @param addrs An array of host_addr_t items. If not NULL, up to * ``n'' items will be copied from the cache. * @param n The number of items "addrs" can hold. * @return The number of cached addresses for the given hostname. */ static size_t adns_cache_lookup(adns_cache_t *cache, time_t now, const char *hostname, host_addr_t *addrs, size_t n) { adns_cache_entry_t *entry; g_assert(NULL != cache); g_assert(NULL != hostname); g_assert(0 == n || NULL != addrs); entry = hikset_lookup(cache->ht, hostname); if (entry) { if (delta_time(now, entry->timestamp) < cache->timeout) { size_t i; for (i = 0; i < n; i++) { if (i < entry->n) { addrs[i] = entry->addrs[i]; if (common_dbg > 0) g_debug("%s: \"%s\" cached (addr=%s)", G_STRFUNC, entry->hostname, host_addr_to_string(addrs[i])); } else { addrs[i] = zero_host_addr; } } } else { if (common_dbg > 0) { g_debug("%s: removing \"%s\" from cache", G_STRFUNC, entry->hostname); } hikset_remove(cache->ht, hostname); adns_cache_free_entry(cache, entry->id); entry = NULL; } } return entry ? entry->n : 0; }
/** * Fill supplied value vector with all the DHT values we have under the key. * * This is an internal call, not the result of an external query, so no * statistics are updated. * * @param id the primary key of the value * @param valvec value vector where results are stored * @param valcnt size of value vector * * @return amount of values filled into valvec. The values are dynamically * created and must be freed by caller through dht_value_free(). */ int keys_get_all(const kuid_t *id, dht_value_t **valvec, int valcnt) { struct keyinfo *ki; struct keydata *kd; int i; int vcnt = valcnt; dht_value_t **vvec = valvec; g_assert(valvec); g_assert(valcnt > 0); ki = hikset_lookup(keys, id); if (ki == NULL) return 0; kd = get_keydata(id); if (kd == NULL) /* DB failure */ return 0; for (i = 0; i < kd->values && vcnt > 0; i++) { uint64 dbkey = kd->dbkeys[i]; dht_value_t *v; g_assert(0 != dbkey); v = values_get(dbkey, DHT_VT_ANY); if (v == NULL) continue; g_assert(kuid_eq(dht_value_key(v), id)); *vvec++ = v; vcnt--; } return vvec - valvec; /* Amount of entries filled */ }
/** * Adds ``hostname'' and ``addr'' to the cache. The cache is implemented * as a wrap-around FIFO. In case it's full, the oldest entry will be * overwritten. */ static void adns_cache_add(adns_cache_t *cache, time_t now, const char *hostname, const host_addr_t *addrs, size_t n) { adns_cache_entry_t *entry; size_t i; g_assert(NULL != addrs); g_assert(NULL != cache); g_assert(NULL != hostname); g_assert(n > 0); g_assert(!hikset_contains(cache->ht, hostname)); g_assert(cache->pos < G_N_ELEMENTS(cache->entries)); entry = adns_cache_get_entry(cache, cache->pos); if (entry) { g_assert(entry->hostname); g_assert(entry == hikset_lookup(cache->ht, entry->hostname)); hikset_remove(cache->ht, entry->hostname); adns_cache_free_entry(cache, cache->pos); entry = NULL; } entry = walloc(adns_cache_entry_size(n)); entry->n = n; entry->hostname = atom_str_get(hostname); entry->timestamp = now; entry->id = cache->pos; for (i = 0; i < entry->n; i++) { entry->addrs[i] = addrs[i]; } hikset_insert_key(cache->ht, &entry->hostname); cache->entries[cache->pos++] = entry; cache->pos %= G_N_ELEMENTS(cache->entries); }
/** * Get an iterator on the list, positionned at the specified item. * Get next items with hash_list_iter_next() or hash_list_iter_previous(). * * @return the iterator object or NULL if the key is not in the list. */ hash_list_iter_t * hash_list_iterator_at(hash_list_t *hl, const void *key) { if (hl) { struct hash_list_item *item; hash_list_check(hl); item = hikset_lookup(hl->ht, key); if (item) { hash_list_iter_t *iter; iter = hash_list_iterator_new(hl, HASH_LIST_ITER_UNDEFINED); iter->prev = elist_prev(&item->lnk); iter->next = elist_next(&item->lnk); iter->item = item; return iter; } else { return NULL; } } else { return NULL; } }
/** * Get key status (full and loaded boolean attributes). */ void keys_get_status(const kuid_t *id, bool *full, bool *loaded) { struct keyinfo *ki; time_t now; g_assert(id); g_assert(full); g_assert(loaded); *full = FALSE; *loaded = FALSE; ki = hikset_lookup(keys, id); if (ki == NULL) return; keyinfo_check(ki); if (GNET_PROPERTY(dht_storage_debug) > 1) { g_debug("DHT STORE key %s holds %d/%d value%s, " "load avg: get = %g [%s], store = %g [%s], expire in %s", kuid_to_hex_string(id), ki->values, MAX_VALUES, plural(ki->values), (int) (ki->get_req_load * 100) / 100.0, ki->get_req_load >= LOAD_GET_THRESH ? "LOADED" : "OK", (int) (ki->store_req_load * 100) / 100.0, ki->store_req_load >= LOAD_STO_THRESH ? "LOADED" : "OK", compact_time(delta_time(ki->next_expire, tm_time()))); } if (ki->get_req_load >= LOAD_GET_THRESH) { *loaded = TRUE; } else if (ki->get_requests) { float limit = LOAD_GET_THRESH / LOAD_SMOOTH - (1.0 - LOAD_SMOOTH) / LOAD_SMOOTH * ki->get_req_load; /* * Look whether the current amount of get requests is sufficient to * bring the EMA above the threshold at the next update. */ if (1.0 * ki->get_requests > limit) *loaded = TRUE; } /* * Check whether we reached the expiration time of one of the values held. * Try to expire values before answering. * * NB: even if all the values are collected from the key, deletion of the * `ki' structure will not happen immediately: this is done asynchronously * to avoid disabling a `ki' within a call chain using it. */ now = tm_time(); if (now >= ki->next_expire) { if (!keys_expire_values(ki, now)) return; /* Key info reclaimed */ } if (ki->values >= MAX_VALUES) *full = TRUE; }
/** * Remove value from a key, discarding the association between the creator ID * and the 64-bit DB key. * * The keys is known to hold the value already. * * @param id the primary key * @param cid the secondary key (creator's ID) * @param dbkey the 64-bit DB key (informational, for assertions) */ void keys_remove_value(const kuid_t *id, const kuid_t *cid, uint64 dbkey) { struct keyinfo *ki; struct keydata *kd; int idx; ki = hikset_lookup(keys, id); g_assert(ki); kd = get_keydata(id); if (NULL == kd) return; g_assert(kd->values); g_assert(kd->values == ki->values); g_assert(kd->values <= MAX_VALUES); idx = lookup_secondary_idx(kd, cid); g_assert(idx >= 0 && idx < kd->values); g_assert(dbkey == kd->dbkeys[idx]); ARRAY_REMOVE(kd->creators, idx, kd->values); ARRAY_REMOVE(kd->dbkeys, idx, kd->values); ARRAY_REMOVE(kd->expire, idx, kd->values); /* * We do not synchronously delete empty keys. * * This lets us optimize the nominal case whereby a key loses all its * values due to a STORE request causing a lifetime check. But the * STORE will precisely insert back another value. * * Hence lazy expiration also gives us the opportunity to further exploit * caching in memory, the keyinfo being held there as a "cached" value. * * Reclaiming of dead keys happens during periodic key load computation. */ kd->values--; ki->values--; /* * Recompute next expiration time. */ ki->next_expire = TIME_T_MAX; for (idx = 0; idx < ki->values; idx++) { ki->next_expire = MIN(ki->next_expire, kd->expire[idx]); } dbmw_write(db_keydata, id, kd, sizeof *kd); if (GNET_PROPERTY(dht_storage_debug) > 2) { g_debug("DHT STORE key %s now holds only %d/%d value%s, expire in %s", kuid_to_hex_string(id), ki->values, MAX_VALUES, plural(ki->values), compact_time(delta_time(ki->next_expire, tm_time()))); } }
/** * Add value to a key, recording the new association between the KUID of the * creator (secondary key) and the 64-bit DB key under which the value is * stored. * * @param id the primary key (may not exist yet) * @param cid the secondary key (creator's ID) * @param dbkey the 64-bit DB key * @param expire expiration time for the value */ void keys_add_value(const kuid_t *id, const kuid_t *cid, uint64 dbkey, time_t expire) { struct keyinfo *ki; struct keydata *kd; struct keydata new_kd; ki = hikset_lookup(keys, id); /* * If we're storing the first value under a key, we do not have any * keyinfo structure yet. */ if (NULL == ki) { size_t common; bool in_kball; common = kuid_common_prefix(get_our_kuid(), id); in_kball = bits_within_kball(common); if (GNET_PROPERTY(dht_storage_debug) > 5) g_debug("DHT STORE new %s %s (%zu common bit%s) with creator %s", in_kball ? "key" : "cached key", kuid_to_hex_string(id), common, plural(common), kuid_to_hex_string2(cid)); ki = allocate_keyinfo(id, common); ki->next_expire = expire; ki->flags = in_kball ? 0 : DHT_KEY_F_CACHED; hikset_insert_key(keys, &ki->kuid); kd = &new_kd; kd->values = 0; /* will be incremented below */ kd->creators[0] = *cid; /* struct copy */ kd->dbkeys[0] = dbkey; kd->expire[0] = expire; gnet_stats_inc_general(GNR_DHT_KEYS_HELD); if (!in_kball) gnet_stats_inc_general(GNR_DHT_CACHED_KEYS_HELD); } else { int low = 0; int high = ki->values - 1; kd = get_keydata(id); if (NULL == kd) return; g_assert(kd->values == ki->values); g_assert(kd->values < MAX_VALUES); if (GNET_PROPERTY(dht_storage_debug) > 5) g_debug("DHT STORE existing key %s (%u common bit%s) " "has new creator %s", kuid_to_hex_string(id), ki->common_bits, plural(ki->common_bits), kuid_to_hex_string2(cid)); /* * Keys are collected asynchronously, so it is possible that * the key structure still exists, yet holds no values. If this * happens, then we win because we spared the useless deletion of * the key structure to recreate it a little bit later. */ if (0 == kd->values) goto empty; /* * Insert KUID of creator in array, which must be kept sorted. * We perform a binary insertion. */ while (low <= high) { int mid = low + (high - low) / 2; int c; g_assert(mid >= 0 && mid < ki->values); c = kuid_cmp(&kd->creators[mid], cid); if (0 == c) g_error("new creator KUID %s must not already be present", kuid_to_hex_string(cid)); else if (c < 0) low = mid + 1; else high = mid - 1; } /* Make room for inserting new item at `low' */ ARRAY_FIXED_MAKEROOM(kd->creators, low, kd->values); ARRAY_FIXED_MAKEROOM(kd->dbkeys, low, kd->values); ARRAY_FIXED_MAKEROOM(kd->expire, low, kd->values); /* FALL THROUGH */ empty: /* Insert new item at `low' */ kd->creators[low] = *cid; /* struct copy */ kd->dbkeys[low] = dbkey; kd->expire[low] = expire; ki->next_expire = MIN(ki->next_expire, expire); } kd->values++; ki->values++; dbmw_write(db_keydata, id, kd, sizeof *kd); if (GNET_PROPERTY(dht_storage_debug) > 2) g_debug("DHT STORE %s key %s now holds %d/%d value%s", &new_kd == kd ? "new" : "existing", kuid_to_hex_string(id), ki->values, MAX_VALUES, plural(ki->values)); }
/** * Fill supplied value vector with the DHT values we have under the key that * match the specifications: among those bearing the specified secondary keys * (or all of them if no secondary keys are supplied), return only those with * the proper DHT value type. * * @param id the primary key of the value * @param type type of DHT value they want * @param secondary optional secondary keys * @param secondary_count amount of secondary keys supplied * @param valvec value vector where results are stored * @param valcnt size of value vector * @param loadptr where to write the average request load for key * @param cached if non-NULL, filled with whether key was cached * * @return amount of values filled into valvec. The values are dynamically * created and must be freed by caller through dht_value_free(). */ int keys_get(const kuid_t *id, dht_value_type_t type, kuid_t **secondary, int secondary_count, dht_value_t **valvec, int valcnt, float *loadptr, bool *cached) { struct keyinfo *ki; struct keydata *kd; int i; int vcnt = valcnt; dht_value_t **vvec = valvec; g_assert(secondary_count == 0 || secondary != NULL); g_assert(valvec); g_assert(valcnt > 0); g_assert(loadptr); ki = hikset_lookup(keys, id); g_assert(ki); /* If called, we know the key exists */ if (GNET_PROPERTY(dht_storage_debug) > 5) g_debug("DHT FETCH key %s (load = %g, current reqs = %u) type %s" " with %d secondary key%s", kuid_to_hex_string(id), ki->get_req_load, ki->get_requests, dht_value_type_to_string(type), secondary_count, plural(secondary_count)); *loadptr = ki->get_req_load; ki->get_requests++; kd = get_keydata(id); if (kd == NULL) /* DB failure */ return 0; /* * If secondary keys were requested, lookup them up and make sure * they have the right DHT type (or skip them). */ for (i = 0; i < secondary_count && vcnt > 0; i++) { uint64 dbkey = lookup_secondary(kd, secondary[i]); dht_value_t *v; if (0 == dbkey) continue; v = values_get(dbkey, type); if (v == NULL) continue; g_assert(kuid_eq(dht_value_key(v), id)); if (GNET_PROPERTY(dht_storage_debug) > 5) g_debug("DHT FETCH key %s via secondary key %s has matching %s", kuid_to_hex_string(id), kuid_to_hex_string2(secondary[i]), dht_value_to_string(v)); *vvec++ = v; vcnt--; } /* * Don't count secondary-key fetches in the local hit stats: in order to * be able to get these fetches, we must have initially provided the * list of these keys, and thus we have already traversed the code below * for that fetch, which accounted the hit already. */ if (secondary_count) { int n = vvec - valvec; /* Amount of entries filled */ gnet_stats_count_general(GNR_DHT_CLAIMED_SECONDARY_KEYS, n); if (ki->flags & DHT_KEY_F_CACHED) gnet_stats_count_general(GNR_DHT_CLAIMED_CACHED_SECONDARY_KEYS, n); goto done; } /* * No secondary keys specified. Look them all up. */ for (i = 0; i < kd->values && vcnt > 0; i++) { uint64 dbkey = kd->dbkeys[i]; dht_value_t *v; g_assert(0 != dbkey); v = values_get(dbkey, type); if (v == NULL) continue; g_assert(kuid_eq(dht_value_key(v), id)); if (GNET_PROPERTY(dht_storage_debug) > 5) g_debug("DHT FETCH key %s has matching %s", kuid_to_hex_string(id), dht_value_to_string(v)); *vvec++ = v; vcnt--; } /* * Stats update: we count all the hits, plus successful hits on keys * that do not fall within our k-ball, i.e. keys for which we act as * a "cache". Note that our k-ball frontier can evolve through time, * so we rely on the DHT_KEY_F_CACHED flag, positionned at creation time. */ if (vvec != valvec) { gnet_stats_inc_general(GNR_DHT_FETCH_LOCAL_HITS); if (ki->flags & DHT_KEY_F_CACHED) gnet_stats_inc_general(GNR_DHT_FETCH_LOCAL_CACHED_HITS); } done: if (cached) *cached = (ki->flags & DHT_KEY_F_CACHED) ? TRUE : FALSE; return vvec - valvec; /* Amount of entries filled */ }