/** * Get rid of the driver's private data. */ static void tx_deflate_destroy(txdrv_t *tx) { struct attr *attr = tx->opaque; int i; int ret; g_assert(attr->outz); for (i = 0; i < BUFFER_COUNT; i++) { struct buffer *b = &attr->buf[i]; wfree(b->arena, attr->buffer_size); } /* * We ignore Z_DATA_ERROR errors (discarded data, probably). */ ret = deflateEnd(attr->outz); if (Z_OK != ret && Z_DATA_ERROR != ret) g_warning("while freeing compressor for peer %s: %s", gnet_host_to_string(&tx->host), zlib_strerror(ret)); WFREE(attr->outz); cq_cancel(&attr->tm_ev); WFREE(attr); }
/** * Free allocated periodic event. */ static void cq_periodic_free(cperiodic_t *cp, bool force) { cperiodic_check(cp); if (NULL == cp->ev && !force) { /* * Trying to free the periodic event whilst in the middle of the * cq_periodic_trampoline() call. Record that the object must * be freed and defer until we return from the user call. */ cp->to_free = TRUE; } else { cqueue_t *cq; cq = cp->cq; cqueue_check(cq); cq_cancel(&cp->ev); cq_unregister_object(cq->cq_periodic, cp); cp->magic = 0; WFREE(cp); } }
/** * Dispose of the value from the `used' table. */ static void val_free(struct used_val *v) { g_assert(v); g_assert(is_host_addr(v->addr)); cq_cancel(&v->cq_ev); WFREE(v); }
/** * Stop the nagle timer. */ static void deflate_nagle_stop(txdrv_t *tx) { struct attr *attr = tx->opaque; g_assert(attr->flags & DF_NAGLE); g_assert(NULL != attr->tm_ev); cq_cancel(&attr->tm_ev); attr->flags &= ~DF_NAGLE; }
/** * Free a tsync structure. */ static void tsync_free(struct tsync *ts) { g_assert(ts); g_assert(ts->magic == TSYNC_MAGIC); cq_cancel(&ts->expire_ev); nid_unref(ts->node_id); ts->magic = 0; WFREE(ts); }
/** * Put the watchdog to sleep. * * @return TRUE if we stopped the watchdog, FALSE if it was already aslept. */ bool wd_sleep(watchdog_t *wd) { watchdog_check(wd); if (NULL == wd->ev) return FALSE; cq_cancel(&wd->ev); return TRUE; }
void search_gui_callbacks_shutdown(void) { /* * Remove delayed callbacks */ cq_cancel(&row_selected_ev); search_gui_clear_details(); if (selected_record) { search_gui_unref_record(selected_record); selected_record = NULL; } }
/** * Free waiting event. */ static void wq_event_free(wq_event_t *we) { wq_event_check(we); if (we->tm != NULL) { cq_cancel(&we->tm->timeout_ev); WFREE(we->tm); we->tm = NULL; } we->magic = 0; WFREE(we); }
/** * Cleanup during process termination. */ void G_COLD uhc_close(void) { cq_cancel(&uhc_ctx.timeout_ev); uhc_connecting = FALSE; if (uhc_list) { struct uhc *uhc; while (NULL != (uhc = hash_list_shift(uhc_list))) { uhc_free(&uhc); } hash_list_free(&uhc_list); } }
/** * Free the RPC descriptor. */ static void g2_rpc_free(struct g2_rpc *gr, bool in_shutdown) { g2_rpc_check(gr); if (in_shutdown) { (*gr->cb)(NULL, NULL, gr->arg); } else { hevset_remove(g2_rpc_pending, &gr->key); } cq_cancel(&gr->timeout_ev); gr->magic = 0; WFREE(gr); }
/** * Destroy the security token generator and nullify its pointer. */ void sectoken_gen_free_null(sectoken_gen_t **stg_ptr) { sectoken_gen_t *stg = *stg_ptr; if (stg != NULL) { sectoken_gen_check(stg); cq_cancel(&stg->rotate_ev); WFREE_NULL(stg->keys, stg->keycnt * sizeof stg->keys[0]); stg->magic = 0; WFREE(stg); *stg_ptr = NULL; } }
/** * Free the callback waiting indication. */ static void urpc_cb_free(struct urpc_cb *ucb, bool in_shutdown) { urpc_cb_check(ucb); if (in_shutdown) { (*ucb->cb)(URPC_TIMEOUT, ucb->addr, ucb->port, NULL, 0, ucb->arg); } else { htable_remove(pending, ucb->s); } cq_cancel(&ucb->timeout_ev); socket_free_null(&ucb->s); ucb->magic = 0; WFREE(ucb); }
/** * Free keys and values from the aging table. */ static void aging_free_kv(void *key, void *value, void *udata) { aging_table_t *ag = udata; struct aging_value *aval = value; aging_check(ag); g_assert(aval->ag == ag); g_assert(aval->key == key); if (ag->kvfree != NULL) (*ag->kvfree)(key, aval->value); cq_cancel(&aval->cq_ev); WFREE(aval); }
/** * Free a SOAP request. */ static void soap_rpc_free(soap_rpc_t *sr) { soap_rpc_check(sr); atom_str_free_null(&sr->url); atom_str_free_null(&sr->action); cq_cancel(&sr->delay_ev); http_async_cancel_null(&sr->ha); header_free_null(&sr->header); pmsg_free_null(&sr->mb); HFREE_NULL(sr->reply_data); sr->magic = 0; WFREE(sr); }
/** * Free publisher entry. */ static void publisher_entry_free(struct publisher_entry *pe, bool do_remove) { publisher_check(pe); if (do_remove) { hikset_remove(publisher_sha1, pe->sha1); delete_pubdata(pe->sha1); } if (pe->backgrounded) pdht_cancel_file(pe->sha1, FALSE); atom_sha1_free_null(&pe->sha1); cq_cancel(&pe->publish_ev); WFREE(pe); }
/** * Trigger callback and then put the watchdog to sleep, ignoring any desire * from the callback to re-arm the watchdog. * * @return TRUE if we stopped the watchdog, FALSE if it was already aslept, * in which case the trigger was not invoked. */ bool wd_expire(watchdog_t *wd) { watchdog_check(wd); if (NULL == wd->ev) return FALSE; cq_cancel(&wd->ev); (*wd->trigger)(wd, wd->arg); if (wd->ev != NULL) { g_critical("%s(): " "watchdog \"%s\" re-armed within %s() callback, turning it off", G_STRFUNC, wd_name(wd), stacktrace_function_name(wd->trigger)); } return TRUE; }
/** * Close local key management. */ G_GNUC_COLD void keys_close(void) { values_close(); dbstore_close(db_keydata, settings_dht_db_dir(), db_keybase); db_keydata = NULL; if (keys) { hikset_foreach(keys, keys_free_kv, NULL); hikset_free_null(&keys); } kuid_atom_free_null(&kball.furthest); kuid_atom_free_null(&kball.closest); gnet_stats_set_general(GNR_DHT_KEYS_HELD, 0); gnet_stats_set_general(GNR_DHT_CACHED_KEYS_HELD, 0); cq_cancel(&kball_ev); cq_periodic_remove(&keys_periodic_ev); cq_periodic_remove(&keys_sync_ev); }
/** * Expire timeout by removing it out of the queue and firing its callback. */ void cq_expire(cevent_t *ev) { cqueue_t *cq; cq_service_t fn; void *arg; cevent_check(ev); cq = ev->ce_cq; cqueue_check(cq); /* * Need to lock to read-in callback information because of cq_replace(). * * We can use a hidden lock because there's no function call in the * critical section, so no opportunity to ever deadlock. */ CQ_LOCK(cq); cevent_check(ev); /* Not triggered since routine start */ fn = ev->ce_fn; arg = ev->ce_arg; CQ_UNLOCK(cq); g_assert(fn); cq_cancel(&ev); /* Remove event from queue before firing */ /* * All the callout queue data structures were updated. * It is now safe to invoke the callback, even if there is some * re-entry to the same callout queue. */ (*fn)(cq, arg); }
/** * Called when a pong with an "IPP" extension was received. */ void uhc_ipp_extract(gnutella_node_t *n, const char *payload, int paylen, enum net_type type) { int i, cnt; int len = NET_TYPE_IPV6 == type ? 18 : 6; const void *p; g_assert(0 == paylen % len); cnt = paylen / len; if (GNET_PROPERTY(bootstrap_debug)) g_debug("extracting %d host%s in UDP IPP pong #%s from %s", cnt, plural(cnt), guid_hex_str(gnutella_header_get_muid(&n->header)), node_addr(n)); for (i = 0, p = payload; i < cnt; i++, p = const_ptr_add_offset(p, len)) { host_addr_t ha; uint16 port; host_ip_port_peek(p, type, &ha, &port); hcache_add_caught(HOST_ULTRA, ha, port, "UDP-HC"); if (GNET_PROPERTY(bootstrap_debug) > 2) g_debug("BOOT collected %s from UDP IPP pong from %s", host_addr_port_to_string(ha, port), node_addr(n)); } if (!uhc_connecting) return; /* * Check whether this was a reply from our request. * * The reply could come well after we decided it timed out and picked * another UDP host cache, which ended-up replying, so we must really * check whether we're still in a probing cycle. */ if (!guid_eq(&uhc_ctx.muid, gnutella_header_get_muid(&n->header))) return; if (GNET_PROPERTY(bootstrap_debug)) { g_debug("BOOT UDP cache \"%s\" replied: got %d host%s from %s", uhc_ctx.host, cnt, plural(cnt), node_addr(n)); } /* * Terminate the probing cycle if we got hosts. */ if (cnt > 0) { char msg[256]; cq_cancel(&uhc_ctx.timeout_ev); uhc_connecting = FALSE; str_bprintf(msg, sizeof(msg), NG_("Got %d host from UDP host cache %s", "Got %d hosts from UDP host cache %s", cnt), cnt, uhc_ctx.host); gcu_statusbar_message(msg); } else { uhc_try_next(); } }
/** * Publishing callback invoked when asynchronous publication is completed, * or ended with an error. * * @return TRUE if we accept the publishing, FALSE otherwise to get the * publishing layer to continue attempts to failed STORE roots and report * on progress using the same callback. */ static bool publisher_done(void *arg, pdht_error_t code, const pdht_info_t *info) { struct publisher_entry *pe = arg; struct pubdata *pd; int delay = PUBLISH_BUSY; bool expired = FALSE; bool accepted = TRUE; publisher_check(pe); pd = get_pubdata(pe->sha1); /* * Update stats on republishing before value expiration. */ if (PDHT_E_OK == code) { if (pe->last_publish && info->roots > 0) { if (pd != NULL) { if (pd->expiration && delta_time(tm_time(), pd->expiration) > 0) expired = TRUE; } else { time_delta_t elapsed = delta_time(tm_time(), pe->last_publish); if (elapsed > DHT_VALUE_ALOC_EXPIRE) expired = TRUE; } if (expired) gnet_stats_inc_general(GNR_DHT_REPUBLISHED_LATE); } } /* * Compute retry delay. */ switch (code) { case PDHT_E_OK: /* * If we were not able to publish to KDA_K nodes, decrease the * delay before republishing. We use a non-linear decimation of * the republish time, as a function of the number of nodes to which * we could publish. */ delay = publisher_delay(info, DHT_VALUE_ALOC_EXPIRE); accepted = publisher_is_acceptable(info); break; case PDHT_E_POPULAR: /* * Compute the suitable delay: the first time, we use PUBLISH_POPULAR, * and then we double each time until we reach PUBLISH_POPULAR_MAX. * * If we already tried to publish the entry, pe->last_delayed will * be non-zero. */ if (0 != pe->last_delayed) { time_delta_t elapsed = delta_time(tm_time(), pe->last_delayed); if (elapsed < PUBLISH_POPULAR) { delay = PUBLISH_POPULAR; } else if (elapsed >= PUBLISH_POPULAR_MAX / 2) { delay = PUBLISH_POPULAR_MAX; } else { delay = elapsed * 2; } } else { delay = PUBLISH_POPULAR; } break; case PDHT_E_NOT_SHARED: case PDHT_E_LOOKUP_EXPIRED: case PDHT_E_LOOKUP: case PDHT_E_UDP_CLOGGED: case PDHT_E_PUBLISH_EXPIRED: case PDHT_E_PUBLISH_ERROR: case PDHT_E_SHA1: case PDHT_E_PENDING: case PDHT_E_CANCELLED: case PDHT_E_GGEP: case PDHT_E_NONE: delay = PUBLISH_BUSY; break; case PDHT_E_MAX: g_assert_not_reached(); } /* * For a backgrounded entry publishing, we need to adjust the computed * delay with the time that was elapsed */ g_assert(!pe->backgrounded == !(pe->publish_ev != NULL)); if (pe->backgrounded) { time_delta_t elapsed = delta_time(tm_time(), pe->last_delayed); g_assert(pe->last_delayed > 0); cq_cancel(&pe->publish_ev); if (delay > elapsed) { delay -= elapsed; } else { delay = 1; } } /* * Logging. */ if (GNET_PROPERTY(publisher_debug) > 1) { shared_file_t *sf = shared_file_by_sha1(pe->sha1); char retry[80]; char after[80]; const char *late = ""; after[0] = '\0'; if (pe->last_publish) { time_delta_t elapsed = delta_time(tm_time(), pe->last_publish); str_bprintf(after, sizeof after, " after %s", compact_time(elapsed)); if (pd != NULL) { if (expired) late = "late, "; } else { late = "no data, "; } } str_bprintf(retry, sizeof retry, "%s", compact_time(delay)); g_debug("PUBLISHER SHA-1 %s %s%s\"%s\" %spublished to %u node%s%s: %s" " (%stook %s, total %u node%s, proba %.3f%%, retry in %s," " %s bg, path %u) [%s]", sha1_to_string(pe->sha1), pe->backgrounded ? "[bg] " : "", (sf && sf != SHARE_REBUILDING && shared_file_is_partial(sf)) ? "partial " : "", (sf && sf != SHARE_REBUILDING) ? shared_file_name_nfc(sf) : "", pe->last_publish ? "re" : "", info->roots, plural(info->roots), after, pdht_strerror(code), late, compact_time(delta_time(tm_time(), pe->last_enqueued)), info->all_roots, plural(info->all_roots), info->presence * 100.0, retry, info->can_bg ? "can" : "no", info->path_len, accepted ? "OK" : "INCOMPLETE"); shared_file_unref(&sf); } /* * Update last publishing time and remember expiration time. */ if (PDHT_E_OK == code && info->roots > 0) { pe->last_publish = tm_time(); if (pd != NULL) { pd->expiration = time_advance(pe->last_publish, DHT_VALUE_ALOC_EXPIRE); dbmw_write(db_pubdata, pe->sha1, pd, sizeof *pd); } } /* * If entry was deemed popular, we're going to delay its republishing * by a larger amount of time and any data we published already about * it will surely expire. Since this is our decision, we do not want * to be told that republishing, if it occurs again, was done later than * required. Hence call publisher_hold() to mark that we don't care. */ if (PDHT_E_POPULAR == code) publisher_hold(pe, delay, "popular entry"); else publisher_retry(pe, delay, accepted ? "accepted publish" : "published"); pe->backgrounded = !accepted; return accepted; }