/** * Expire the tsync record. */ static void tsync_expire(cqueue_t *cq, void *obj) { struct tsync *ts = obj; gnutella_node_t *n; g_assert(ts); g_assert(ts->magic == TSYNC_MAGIC); if (GNET_PROPERTY(tsync_debug) > 1) g_debug("TSYNC expiring time %d.%d", (int) ts->sent.tv_sec, (int) ts->sent.tv_usec); cq_zero(cq, &ts->expire_ev); hevset_remove(tsync_by_time, &ts->sent); /* * If we sent the request via UDP, the node is probably UDP-firewalled: * use TCP next time... */ if (ts->udp) { n = node_active_by_id(ts->node_id); if (n != NULL) n->flags |= NODE_F_TSYNC_TCP; } tsync_free(ts); }
/** * Called from the callout queue when the Nagle timer expires. * * If we can send the buffer, flush it and send it. Otherwise, reschedule. */ static void deflate_nagle_timeout(cqueue_t *cq, void *arg) { txdrv_t *tx = arg; struct attr *attr = tx->opaque; cq_zero(cq, &attr->tm_ev); if (-1 != attr->send_idx) { /* Send buffer still incompletely sent */ if (tx_deflate_debugging(9)) { g_debug("TX %s: (%s) buffer #%d unsent, exiting [%c%c]", G_STRFUNC, gnet_host_to_string(&tx->host), attr->send_idx, (attr->flags & DF_FLOWC) ? 'C' : '-', (attr->flags & DF_FLUSH) ? 'f' : '-'); } attr->tm_ev = cq_insert(attr->cq, BUFFER_NAGLE, deflate_nagle_timeout, tx); return; } attr->flags &= ~DF_NAGLE; if (tx_deflate_debugging(9)) { struct buffer *b = &attr->buf[attr->fill_idx]; /* Buffer to send */ g_debug("TX %s: (%s) flushing %zu bytes (buffer #%d) [%c%c]", G_STRFUNC, gnet_host_to_string(&tx->host), b->wptr - b->rptr, attr->fill_idx, (attr->flags & DF_FLOWC) ? 'C' : '-', (attr->flags & DF_FLUSH) ? 'f' : '-'); } deflate_flush_send(tx); }
/** * Callout queue callback for k-ball updates. */ static void keys_periodic_kball(cqueue_t *cq, void *unused_obj) { (void) unused_obj; cq_zero(cq, &kball_ev); install_periodic_kball(KBALL_PERIOD); keys_update_kball(); }
/** * Callout queue callback to handle an entry. */ static void handle_entry(cqueue_t *cq, void *obj) { struct publisher_entry *pe = obj; publisher_check(pe); cq_zero(cq, &pe->publish_ev); publisher_handle(pe); }
/** * Callout queue callback fired when waiting event times out. */ static void wq_timed_out(cqueue_t *cq, void *arg) { wq_event_t *we = arg; hash_list_t *hl; wq_status_t status; wq_event_check(we); g_assert(we->tm != NULL); cq_zero(cq, &we->tm->timeout_ev); hl = htable_lookup(waitqueue, we->key); g_assert(hl != NULL); /* * Invoke the callback with the sentinel data signalling a timeout. */ status = (*we->cb)(we->arg, WQ_TIMED_OUT); /* * When the callback returns WQ_SLEEP, we re-instantiate the initial * timeout. * * Otherwise the event is discarded (removed from the wait queue) and * the callback will never be invoked again for this event. */ switch (status) { case WQ_SLEEP: we->tm->timeout_ev = cq_main_insert(we->tm->delay, wq_timed_out, we); return; case WQ_EXCLUSIVE: s_critical("weird status WQ_EXCLUSIVE on timeout invocation of %s()", stacktrace_function_name(we->cb)); /* FALL THROUGH */ case WQ_REMOVE: hash_list_remove(hl, we); /* * Cleanup the table if it ends-up being empty. */ if (0 == hash_list_length(hl)) { hash_list_free(&hl); htable_remove(waitqueue, we->key); } wq_event_free(we); return; } g_assert_not_reached(); }
/** * Callout queue callback, invoked when the ping was sent and we did not * get a reply within the specified timeout. */ static void uhc_ping_timeout(cqueue_t *cq, void *unused_obj) { (void) unused_obj; if (GNET_PROPERTY(bootstrap_debug)) g_warning("no reply from UDP host cache %s:%u", uhc_ctx.host, uhc_ctx.port); cq_zero(cq, &uhc_ctx.timeout_ev); uhc_try_next(); }
static void row_selected_expire(cqueue_t *cq, gpointer unused_udata) { search_t *search; (void) unused_udata; cq_zero(cq, &row_selected_ev); search = search_gui_get_current_search(); if (search) { search_gui_refresh_popup(); search_gui_refresh_details(selected_record); } else { search_gui_clear_details(); } }
/** * Token key rotating event. */ static void sectoken_rotate(cqueue_t *cq, void *obj) { size_t i; sectoken_gen_t *stg = obj; sectoken_gen_check(stg); cq_zero(cq, &stg->rotate_ev); stg->rotate_ev = cq_main_insert(stg->refresh * 1000, sectoken_rotate, stg); for (i = 0; i < stg->keycnt - 1; i++) stg->keys[i + 1] = stg->keys[i]; /* 0 is most recent key */ random_strong_bytes(&stg->keys[0], sizeof(stg->keys[0])); }
/** * RPC timeout callback. */ static void g2_rpc_timeout(cqueue_t *cq, void *obj) { struct g2_rpc *gr = obj; g2_rpc_check(gr); if (GNET_PROPERTY(g2_rpc_debug) > 1) { g_debug("%s(): /%s RPC to %s timed out, calling %s()", G_STRFUNC, g2_msg_type_name(gr->key.type), host_addr_to_string(gr->key.addr), stacktrace_function_name(gr->cb)); } cq_zero(cq, &gr->timeout_ev); (*gr->cb)(NULL, NULL, gr->arg); g2_rpc_free(gr, FALSE); }