/** * Let sleepers know about the wake-up condition. * * @param hl the list of waiting parties * @param data waking-up data to supply to callback */ static void wq_notify(hash_list_t *hl, void *data) { hash_list_iter_t *iter; size_t i, count; iter = hash_list_iterator(hl); count = hash_list_length(hl); i = 0; while (hash_list_iter_has_next(iter)) { wq_event_t *we = hash_list_iter_next(iter); wq_status_t status; wq_event_check(we); /* * Stop iteration in case callbacks have called wq_sleep() on the * same waiting queue we're iterating on and added items to the list. * This sanity check ensures we're not going to loop forever with a * callback systematically appending something. */ if (i++ >= count) { /* Something is odd, let them know about the calling stack */ s_critical("stopping after processing %zu item%s (list now has %u)", count, plural(count), hash_list_length(hl)); } status = (*we->cb)(we->arg, data); switch (status) { case WQ_SLEEP: continue; /* Still sleeping, leave in the list */ case WQ_EXCLUSIVE: case WQ_REMOVE: goto remove; } s_error("invalid status %d returned by %s()", status, stacktrace_function_name(we->cb)); remove: hash_list_iter_remove(iter); wq_event_free(we); /* * The callback may decide that we shouldn't continue notifying * other sleepers (because it knows it grabbed a resource that others * will need for instance). This is used as an early termination * of the loop. */ if (WQ_EXCLUSIVE == status) break; } hash_list_iter_release(&iter); }
/** * @return the number of attributes held. */ size_t xattr_table_count(const xattr_table_t *xat) { xattr_table_check(xat); return hash_list_length(xat->hl); }
/** * @return amount of items held in table. */ size_t ohash_table_count(const ohash_table_t *oh) { ohash_table_check(oh); return hash_list_length(oh->hl); }
/** * Destroys the UDP TX scheduler, which must no longer be attached to anything. */ void udp_sched_free(udp_sched_t *us) { udp_sched_check(us); unsigned i; /* * TX stacks are asynchronously collected, so we need to force collection * now to make sure nobody references us any longer. */ tx_collect(); g_assert(0 == hash_list_length(us->stacks)); for (i = 0; i < N_ITEMS(us->lifo); i++) { udp_sched_drop_all(us, &us->lifo[i]); } udp_sched_tx_release(us); udp_sched_seen_clear(us); pool_free(us->txpool); hset_free_null(&us->seen); hash_list_free(&us->stacks); udp_sched_clear_sockets(us); us->magic = 0; WFREE(us); }
/** * Allocate a new entry in the cache to hold the deserialized value. * * @param dw the DBM wrapper * @param key key we want a cache entry for * @param filled optionally, a new cache entry already filled with the data * * @attention * An older cache entry structure can be returned, and it will still * point to the previous data. Caller should normally invoke fill_entry() * immediately to make sure these stale data are not associated wrongly * with the new key, or supply his own filled structure directly. * * @return a cache entry object that can be filled with the value. */ static struct cached * allocate_entry(dbmw_t *dw, gconstpointer key, struct cached *filled) { struct cached *entry; gpointer saved_key; g_assert(!hash_list_contains(dw->keys, key)); g_assert(!map_contains(dw->values, key)); g_assert(!filled || (!filled->len == !filled->data)); saved_key = wcopy(key, dbmw_keylen(dw, key)); /* * If we have less keys cached than our maximum, add it. * Otherwise evict the least recently used key, at the head. */ if (hash_list_length(dw->keys) < dw->max_cached) { if (filled) entry = filled; else WALLOC0(entry); } else { gpointer head; g_assert(hash_list_length(dw->keys) == dw->max_cached); head = hash_list_head(dw->keys); entry = remove_entry(dw, head, filled != NULL, TRUE); g_assert(filled != NULL || entry != NULL); if (filled) entry = filled; } /* * Add entry into cache. */ g_assert(entry); hash_list_append(dw->keys, saved_key); map_insert(dw->values, saved_key, entry); return entry; }
/** * Callout queue callback fired when waiting event times out. */ static void wq_timed_out(cqueue_t *cq, void *arg) { wq_event_t *we = arg; hash_list_t *hl; wq_status_t status; wq_event_check(we); g_assert(we->tm != NULL); cq_zero(cq, &we->tm->timeout_ev); hl = htable_lookup(waitqueue, we->key); g_assert(hl != NULL); /* * Invoke the callback with the sentinel data signalling a timeout. */ status = (*we->cb)(we->arg, WQ_TIMED_OUT); /* * When the callback returns WQ_SLEEP, we re-instantiate the initial * timeout. * * Otherwise the event is discarded (removed from the wait queue) and * the callback will never be invoked again for this event. */ switch (status) { case WQ_SLEEP: we->tm->timeout_ev = cq_main_insert(we->tm->delay, wq_timed_out, we); return; case WQ_EXCLUSIVE: s_critical("weird status WQ_EXCLUSIVE on timeout invocation of %s()", stacktrace_function_name(we->cb)); /* FALL THROUGH */ case WQ_REMOVE: hash_list_remove(hl, we); /* * Cleanup the table if it ends-up being empty. */ if (0 == hash_list_length(hl)) { hash_list_free(&hl); htable_remove(waitqueue, we->key); } wq_event_free(we); return; } g_assert_not_reached(); }
static bool udp_ping_register(const struct guid *muid, host_addr_t addr, uint16 port, udp_ping_cb_t cb, void *data, bool multiple) { struct udp_ping *ping; uint length; g_assert(muid); g_return_val_if_fail(udp_pings, FALSE); if (hash_list_contains(udp_pings, muid)) { /* Probably a duplicate */ return FALSE; } /* random early drop */ length = hash_list_length(udp_pings); if (length >= UDP_PING_MAX) { return FALSE; } else if (length > (UDP_PING_MAX / 4) * 3) { if (random_value(UDP_PING_MAX - 1) < length) return FALSE; } WALLOC(ping); ping->muid = *muid; ping->added = tm_time(); { gnet_host_t host; gnet_host_set(&host, addr, port); ping->host = atom_host_get(&host); } if (cb != NULL) { WALLOC0(ping->callback); ping->callback->cb = cb; ping->callback->data = data; ping->callback->multiple = booleanize(multiple); } else { ping->callback = NULL; } hash_list_append(udp_pings, ping); return TRUE; }
/** * Notify wake-up condition to sleepers on the key. * * @param key the rendez-vous point * @param data additional data to supply to woken-up parties */ void wq_wakeup(const void *key, void *data) { hash_list_t *hl; hl = htable_lookup(waitqueue, key); if (hl != NULL) { wq_notify(hl, data); /* * Cleanup the table if it ends-up being empty. */ if (0 == hash_list_length(hl)) { hash_list_free(&hl); htable_remove(waitqueue, key); } } }
/** * Remove an event from the queue. */ static void wq_remove(wq_event_t *we) { hash_list_t *hl; wq_event_check(we); hl = htable_lookup(waitqueue, we->key); if (NULL == hl) { s_critical("attempt to remove event %s() on unknown key %p", stacktrace_function_name(we->cb), we->key); } if (NULL == hash_list_remove(hl, we)) { s_critical("attempt to remove unknown event %s() on %p", stacktrace_function_name(we->cb), we->key); } else if (0 == hash_list_length(hl)) { hash_list_free(&hl); htable_remove(waitqueue, we->key); } wq_event_free(we); }