/** * Destroy header object. */ void header_free(header_t *o) { header_check(o); if (o->refcnt > 1) { o->refcnt--; return; } header_reset(o); o->magic = 0; WFREE(o); }
void fi_gui_source_hide(struct download *d) { GtkTreeIter *iter; iter = htable_lookup(fi_sources, d); if (iter) { if (store_sources) { gtk_list_store_remove(store_sources, iter); } htable_remove(fi_sources, d); WFREE(iter); } }
/** * Destroy the database. * * @param db the database */ void iprange_free(struct iprange_db **idb_ptr) { struct iprange_db *idb; idb = *idb_ptr; if (idb) { iprange_db_check(idb); sorted_array_free(&idb->tab4); sorted_array_free(&idb->tab6); WFREE(idb); *idb_ptr = NULL; } }
static void free_upload_data(const void *unused_key, void *value, void *unused_data) { struct upload_data *data = value; (void) unused_key; (void) unused_data; g_assert(data->us); g_assert(data->filename); data->us = NULL; atom_str_free_null(&data->filename); WFREE(data); }
/** * Map iterator to free cached entries. */ static bool free_cached(void *key, void *value, void *data) { dbmw_t *dw = data; struct cached *entry = value; dbmw_check(dw); g_assert(!entry->len == !entry->data); free_value(dw, entry, TRUE); wfree(key, dbmw_keylen(dw, key)); WFREE(entry); return TRUE; }
/** * Frees all entries in the spam database. */ void spam_close(void) { GSList *sl; for (sl = spam_lut.sl_names; NULL != sl; sl = g_slist_next(sl)) { struct namesize_item *item = sl->data; g_assert(item); regfree(&item->pattern); WFREE(item); } gm_slist_free_null(&spam_lut.sl_names); spam_sha1_close(); }
/** * Destroy the security token generator and nullify its pointer. */ void sectoken_gen_free_null(sectoken_gen_t **stg_ptr) { sectoken_gen_t *stg = *stg_ptr; if (stg != NULL) { sectoken_gen_check(stg); cq_cancel(&stg->rotate_ev); WFREE_NULL(stg->keys, stg->keycnt * sizeof stg->keys[0]); stg->magic = 0; WFREE(stg); *stg_ptr = NULL; } }
/** * Free the RPC descriptor. */ static void g2_rpc_free(struct g2_rpc *gr, bool in_shutdown) { g2_rpc_check(gr); if (in_shutdown) { (*gr->cb)(NULL, NULL, gr->arg); } else { hevset_remove(g2_rpc_pending, &gr->key); } cq_cancel(&gr->timeout_ev); gr->magic = 0; WFREE(gr); }
/** * Release hash set iterator. */ void hikset_iter_release(hikset_iter_t **hxi_ptr) { hikset_iter_t *hxi = *hxi_ptr; if (hxi != NULL) { hikset_iter_check(hxi); hash_refcnt_dec(HASH(hxi->hx)); if (hxi->deleted && 0 == hxi->hx->refcnt) hash_resize_as_needed(HASH(hxi->hx)); hxi->magic = 0; WFREE(hxi); *hxi_ptr = NULL; } }
/** * Release the map encapsulation, returning the underlying implementation * object (will need to be cast back to the proper type for perusal). */ void * map_release(map_t *m) { void *implementation; map_check(m); implementation = map_implementation(m); m->type = MAP_MAXTYPE; m->magic = 0; WFREE(m); return implementation; }
/** * Dispose of header formatting context. */ void header_fmt_free(header_fmt_t **hf_ptr) { header_fmt_t *hf = *hf_ptr; if (hf) { header_fmt_check(hf); str_destroy_null(&hf->header); atom_str_free_null(&hf->sep); hf->magic = 0; WFREE(hf); *hf_ptr = NULL; } }
/** * Release the iterator once we're done with it. */ void hash_list_iter_release(hash_list_iter_t **iter_ptr) { if (*iter_ptr) { hash_list_iter_t *iter = *iter_ptr; hash_list_iter_check(iter); iter->hl->refcount--; iter->magic = 0; WFREE(iter); *iter_ptr = NULL; } }
/** * Create a new LRU cache. * @return -1 with errno set on error, 0 if OK. */ static int init_cache(DBM *db, long pages, gboolean wdelay) { struct lru_cache *cache; g_assert(NULL == db->cache); WALLOC0(cache); if (-1 == setup_cache(cache, pages, wdelay)) { WFREE(cache); return -1; } db->cache = cache; return 0; }
/** * Free keys and values from the aging table. */ static void aging_free(void *value, void *data) { struct aging_value *aval = value; aging_table_t *ag = data; aging_check(ag); assert_aging_locked(ag); if (ag->kvfree != NULL) (*ag->kvfree)(aval->key, aval->value); elist_remove(&ag->list, aval); WFREE(aval); }
/** * Destroy container, freeing all keys and values, and nullify pointer. */ void aging_destroy(aging_table_t **ag_ptr) { aging_table_t *ag = *ag_ptr; if (ag) { aging_check(ag); aging_synchronize(ag); hikset_foreach(ag->table, aging_free, ag); hikset_free_null(&ag->table); cq_periodic_remove(&ag->gc_ev); if (ag->lock != NULL) { mutex_destroy(ag->lock); WFREE(ag->lock); } ag->magic = 0; WFREE(ag); *ag_ptr = NULL; } }
static void result_data_free(search_t *search, struct result_data *rd) { record_check(rd->record); g_assert(hset_contains(search->dups, rd->record)); hset_remove(search->dups, rd->record); search_gui_unref_record(rd->record); search_gui_unref_record(rd->record); /* * rd->record may point to freed memory now if this was the last reference */ WFREE(rd); }
/** * Create a new LRU cache. * @return -1 with errno set on error, 0 if OK. */ static int init_cache(DBM *db, long pages, bool wdelay) { struct lru_cache *cache; g_assert(NULL == db->cache); WALLOC0(cache); cache->magic = SDBM_LRU_MAGIC; if (-1 == setup_cache(cache, pages, wdelay)) { WFREE(cache); return -1; } db->cache = cache; return 0; }
/** * Free the callback waiting indication. */ static void urpc_cb_free(struct urpc_cb *ucb, bool in_shutdown) { urpc_cb_check(ucb); if (in_shutdown) { (*ucb->cb)(URPC_TIMEOUT, ucb->addr, ucb->port, NULL, 0, ucb->arg); } else { htable_remove(pending, ucb->s); } cq_cancel(&ucb->timeout_ev); socket_free_null(&ucb->s); ucb->magic = 0; WFREE(ucb); }
/** * Free a SOAP request. */ static void soap_rpc_free(soap_rpc_t *sr) { soap_rpc_check(sr); atom_str_free_null(&sr->url); atom_str_free_null(&sr->action); cq_cancel(&sr->delay_ev); http_async_cancel_null(&sr->ha); header_free_null(&sr->header); pmsg_free_null(&sr->mb); HFREE_NULL(sr->reply_data); sr->magic = 0; WFREE(sr); }
/** * Detach a UDP TX scheduling layer from a TX stack. * * @param us the UDP TX scheduler to detach from * @param tx the TX driver detaching from the scheduler */ void udp_sched_detach(udp_sched_t *us, const txdrv_t *tx) { struct udp_tx_stack key, *uts; const void *oldkey; udp_sched_check(us); key.tx = tx; g_assert(hash_list_contains(us->stacks, &key)); hash_list_find(us->stacks, &key, &oldkey); uts = deconstify_pointer(oldkey); hash_list_remove(us->stacks, uts); WFREE(uts); }
/** * Free keys and values from the aging table. */ static void aging_free_kv(void *key, void *value, void *udata) { aging_table_t *ag = udata; struct aging_value *aval = value; aging_check(ag); g_assert(aval->ag == ag); g_assert(aval->key == key); if (ag->kvfree != NULL) (*ag->kvfree)(key, aval->value); cq_cancel(&aval->cq_ev); WFREE(aval); }
void wswcurl_cleanup( void ) { if( !wswcurl_mempool ) return; while( http_requests ) { wswcurl_delete( http_requests ); } if( curldummy ) { qcurl_easy_cleanup( curldummy ); curldummy = NULL; } if( curlmulti ) { qcurl_multi_cleanup( curlmulti ); curlmulti = NULL; } QMutex_Destroy( &curldummy_mutex ); QMutex_Destroy( &http_requests_mutex ); #ifdef USE_OPENSSL if( cryptoLibrary ) { qCRYPTO_set_locking_callback( NULL ); if( crypto_num_mutexes && crypto_mutexes ) { int mutex_num; for( mutex_num = 0; mutex_num < crypto_num_mutexes; mutex_num++ ) QMutex_Destroy( &crypto_mutexes[mutex_num] ); WFREE( crypto_mutexes ); crypto_mutexes = NULL; } crypto_num_mutexes = 0; } #endif if( curlLibrary ) { qcurl_global_cleanup(); } wswcurl_unloadlib(); Mem_FreePool( &wswcurl_mempool ); }
/** * Remove specified item. * * @return the original key. */ static void * hash_list_remove_item(hash_list_t *hl, struct hash_list_item *item) { void *key; g_assert(item); key = deconstify_pointer(item->key); hikset_remove(hl->ht, key); elist_link_remove(&hl->list, &item->lnk); WFREE(item); hl->stamp++; /* Unsafe operation when iterating */ hash_list_regression(hl); return key; }
/** * Free publisher entry. */ static void publisher_entry_free(struct publisher_entry *pe, bool do_remove) { publisher_check(pe); if (do_remove) { hikset_remove(publisher_sha1, pe->sha1); delete_pubdata(pe->sha1); } if (pe->backgrounded) pdht_cancel_file(pe->sha1, FALSE); atom_sha1_free_null(&pe->sha1); cq_cancel(&pe->publish_ev); WFREE(pe); }
/** * Destroy container, freeing all keys and values, and nullify pointer. */ void aging_destroy(aging_table_t **ag_ptr) { aging_table_t *ag = *ag_ptr; if (ag) { aging_check(ag); g_hash_table_foreach(ag->table, aging_free_kv, ag); gm_hash_table_destroy_null(&ag->table); ag->magic = 0; WFREE(ag); ag_unref_callout_queue(); *ag_ptr = NULL; } }
/** * Close the LRU page cache. */ void lru_close(DBM *db) { struct lru_cache *cache = db->cache; if (cache) { if (!db->is_volatile) flush_dirtypag(db); if (common_stats) log_lrustats(db); free_cache(cache); WFREE(cache); } db->cache = NULL; }
/** * Close (i.e. free) the LRU page cache. * * @attention * This does not attempt to flush any remaining dirty pages. */ void lru_close(DBM *db) { struct lru_cache *cache = db->cache; if (cache) { sdbm_lru_check(cache); if (common_stats) log_lrustats(db); free_cache(cache); cache->magic = 0; WFREE(cache); } db->cache = NULL; }
/** * Terminate THEX download. */ void thex_download_free(struct thex_download **ptr) { struct thex_download *ctx = *ptr; if (ctx) { if (ctx->rx) { rx_free(ctx->rx); ctx->rx = NULL; } HFREE_NULL(ctx->data); G_FREE_NULL(ctx->leaves); atom_sha1_free_null(&ctx->sha1); atom_tth_free_null(&ctx->tth); WFREE(ctx); *ptr = NULL; } }
/** * Free descriptor managing large keys and values. */ void big_free(DBM *db) { DBMBIG *dbg = db->big; if (NULL == dbg) return; if (common_stats) log_bigstats(db); big_datfile_free_null(&dbg->file); WFREE_NULL(dbg->bitbuf, BIG_BLKSIZE); HFREE_NULL(dbg->bitcheck); HFREE_NULL(dbg->scratch); fd_forget_and_close(&dbg->fd); WFREE(dbg); }
/** * Removes all memory used by the header_features_add. */ static void header_features_cleanup(xfeature_t xf) { struct features *features; GList *cur; features = features_get(xf); g_return_if_fail(features); cur = g_list_first(features->list); for (/* NOTHING */; NULL != cur; cur = g_list_next(cur)) { struct header_x_feature *header = cur->data; G_FREE_NULL(header->name); WFREE(header); } gm_list_free_null(&features->list); }