void rspamd_worker_stop_accept (struct rspamd_worker *worker) { GList *cur; struct event *event; GHashTableIter it; struct rspamd_worker_signal_handler *sigh; gpointer k, v; /* Remove all events */ cur = worker->accept_events; while (cur) { event = cur->data; event_del (event); cur = g_list_next (cur); g_slice_free1 (sizeof (struct event), event); } if (worker->accept_events != NULL) { g_list_free (worker->accept_events); } g_hash_table_iter_init (&it, worker->signal_events); while (g_hash_table_iter_next (&it, &k, &v)) { sigh = (struct rspamd_worker_signal_handler *)v; g_hash_table_iter_steal (&it); if (sigh->enabled) { event_del (&sigh->ev); } g_free (sigh); } g_hash_table_unref (worker->signal_events); }
/** * g_content_types_get_registered: * * Gets a list of strings containing all the registered content types * known to the system. The list and its data should be freed using * <programlisting> * g_list_free_full (list, g_free); * </programlisting> * * Returns: (element-type utf8) (transfer full): #GList of the registered content types */ GList * g_content_types_get_registered (void) { const char * const* dirs; GHashTable *mimetypes; GHashTableIter iter; gpointer key; int i; GList *l; mimetypes = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL); enumerate_mimetypes_dir (g_get_user_data_dir (), mimetypes); dirs = g_get_system_data_dirs (); for (i = 0; dirs[i] != NULL; i++) enumerate_mimetypes_dir (dirs[i], mimetypes); l = NULL; g_hash_table_iter_init (&iter, mimetypes); while (g_hash_table_iter_next (&iter, &key, NULL)) { l = g_list_prepend (l, key); g_hash_table_iter_steal (&iter); } g_hash_table_destroy (mimetypes); return l; }
static void rspamd_re_cache_destroy (struct rspamd_re_cache *cache) { GHashTableIter it; gpointer k, v; struct rspamd_re_class *re_class; g_assert (cache != NULL); g_hash_table_iter_init (&it, cache->re_classes); while (g_hash_table_iter_next (&it, &k, &v)) { re_class = v; g_hash_table_iter_steal (&it); g_hash_table_unref (re_class->re); #ifdef WITH_HYPERSCAN if (re_class->hs_db) { hs_free_database (re_class->hs_db); } if (re_class->hs_scratch) { hs_free_scratch (re_class->hs_scratch); } if (re_class->hs_ids) { g_free (re_class->hs_ids); } #endif g_slice_free1 (sizeof (*re_class), re_class); } g_hash_table_unref (cache->re_classes); g_ptr_array_free (cache->re, TRUE); g_slice_free1 (sizeof (*cache), cache); }
GSList * meta0_info_uncompress_prefixes(GSList * mL, GError ** err) { gpointer k, v; GHashTableIter iter; GHashTable *map_prefix = NULL; GSList *list_result = NULL; if (!mL) { GSETERROR(err, "invalid parameter"); return NULL; } map_prefix = meta0_info_list_map_by_prefix(mL, err); if (!map_prefix) { GSETERROR(err, "cannot build the prefix-indexed hash-map"); return NULL; } g_hash_table_iter_init(&iter, map_prefix); while (g_hash_table_iter_next(&iter, &k, &v)) { list_result = g_slist_prepend(list_result, v); g_hash_table_iter_steal(&iter); } g_hash_table_destroy(map_prefix); return list_result; }
bool uri_resolver_steal_documents(UriResolver *u, UriResolver *source) { if (!source) return true; GHashTableIter it; g_hash_table_iter_init(&it, source->documents); char *document = NULL; gpointer fragments = NULL; while (g_hash_table_iter_next(&it, (gpointer *) &document, &fragments)) { // Skip the root fragment if (!*document) continue; gpointer old_fragments = g_hash_table_lookup(u->documents, document); // If the document has already been resolved, if (old_fragments && g_hash_table_size(old_fragments)) { // If we've got two bunches of fragments of the same document, // there's no way to merge them in (currently?). if (fragments && g_hash_table_size(fragments)) return false; // Otherwise, the document has already been resolved, and we // can safely skip it now. continue; } g_hash_table_iter_steal(&it); g_hash_table_replace(u->documents, document, fragments); } return true; }
void rspamd_worker_stop_accept (struct rspamd_worker *worker) { GList *cur; struct event *events; GHashTableIter it; struct rspamd_worker_signal_handler *sigh; gpointer k, v; struct rspamd_map *map; /* Remove all events */ cur = worker->accept_events; while (cur) { events = cur->data; if (event_get_base (&events[0])) { event_del (&events[0]); } if (event_get_base (&events[1])) { event_del (&events[1]); } cur = g_list_next (cur); g_slice_free1 (sizeof (struct event) * 2, events); } if (worker->accept_events != NULL) { g_list_free (worker->accept_events); } g_hash_table_iter_init (&it, worker->signal_events); while (g_hash_table_iter_next (&it, &k, &v)) { sigh = (struct rspamd_worker_signal_handler *)v; g_hash_table_iter_steal (&it); if (sigh->enabled) { event_del (&sigh->ev); } g_free (sigh); } g_hash_table_unref (worker->signal_events); /* Cleanup maps */ for (cur = worker->srv->cfg->maps; cur != NULL; cur = g_list_next (cur)) { map = cur->data; if (map->dtor) { map->dtor (map->dtor_data); } map->dtor = NULL; } }
static void remove_all_devices (NMBluez5Manager *self) { GHashTableIter iter; NMBluezDevice *device; NMBluez5ManagerPrivate *priv = NM_BLUEZ5_MANAGER_GET_PRIVATE (self); g_hash_table_iter_init (&iter, priv->devices); while (g_hash_table_iter_next (&iter, NULL, (gpointer) &device)) { g_hash_table_iter_steal (&iter); remove_device (self, device); g_object_unref (device); } }
/* * Generate a sorted array of [(checksum: str, size: uint64, names: array[string]), ...] * for regular file content. */ static gboolean build_content_sizenames_filtered (OstreeRepo *repo, GVariant *commit, GHashTable *include_only_objects, GPtrArray **out_sizenames, GCancellable *cancellable, GError **error) { gboolean ret = FALSE; g_autoptr(GPtrArray) ret_sizenames = g_ptr_array_new_with_free_func (_ostree_delta_content_sizenames_free); g_autoptr(GHashTable) sizenames_map = g_hash_table_new_full (g_str_hash, g_str_equal, NULL, _ostree_delta_content_sizenames_free); ostree_cleanup_repo_commit_traverse_iter OstreeRepoCommitTraverseIter iter = { 0, }; if (!ostree_repo_commit_traverse_iter_init_commit (&iter, repo, commit, OSTREE_REPO_COMMIT_TRAVERSE_FLAG_NONE, error)) goto out; if (!build_content_sizenames_recurse (repo, &iter, sizenames_map, include_only_objects, cancellable, error)) goto out; { GHashTableIter hashiter; gpointer hkey, hvalue; g_hash_table_iter_init (&hashiter, sizenames_map); while (g_hash_table_iter_next (&hashiter, &hkey, &hvalue)) { g_hash_table_iter_steal (&hashiter); g_ptr_array_add (ret_sizenames, hvalue); } } g_ptr_array_sort (ret_sizenames, compare_sizenames); ret = TRUE; gs_transfer_out_value (out_sizenames, &ret_sizenames); out: return ret; }
static void check_curfile(GHashTable* curfile){ GHashTableIter iter; gpointer key, value; g_hash_table_iter_init(&iter, curfile); while(g_hash_table_iter_next(&iter, &key, &value)){ struct chunk_item* chunk = value; struct chunk_item* target = g_hash_table_lookup(chunkset, key); if(target){ if(memcmp(target->hash, chunk->hash, 20)!=0){ fprintf(stderr, "+Find a collision between %s <-> %s! Try to detect it!\n", target->suffix, suffix); fprintf(stderr, "+Details: MinHash = %s, Size %lld <-> %lld\n", memcmp(target->minhash, chunk->minhash, 20) == 0?"True":"False", target->fsize, chunk->fsize); collisions++; }else{ dup_chunks++; } int read_back = 0; if(memcmp(target->minhash, minhash, 20) != 0){ read_back = 1; } if(read_back){ chunks_read_back++; if(memcmp(target->hash, chunk->hash, 20)!=0){ fprintf(stderr, "-Collision Detected!\n"); detected_collisions++; } } target->rc++; }else{ chunk->fid = file_count; memcpy(chunk->minhash, minhash, 20); memcpy(chunk->suffix, suffix, 8); g_hash_table_iter_steal(&iter); g_hash_table_insert(chunkset, chunk->hash, chunk); } } }
/*! * \internal * \brief Merge fence-history coming from remote into local history * * \param[in] history Hash-table holding remote history to be merged in */ static void stonith_merge_in_history_list(GHashTable *history) { GHashTableIter iter; remote_fencing_op_t *op = NULL; gboolean updated = FALSE; if (!history) { return; } init_stonith_remote_op_hash_table(&stonith_remote_op_list); g_hash_table_iter_init(&iter, history); while (g_hash_table_iter_next(&iter, NULL, (void **)&op)) { remote_fencing_op_t *stored_op = g_hash_table_lookup(stonith_remote_op_list, op->id); if (stored_op) { continue; // Skip existent (@TODO state-merging might be desirable) } updated = TRUE; g_hash_table_iter_steal(&iter); g_hash_table_insert(stonith_remote_op_list, op->id, op); /* we could trim the history here but if we bail * out after trim we might miss more recent entries * of those that might still be in the list * if we don't bail out trimming once is more * efficient and memory overhead is minimal as * we are just moving pointers from one hash to * another */ } stonith_fence_history_trim(); if (updated) { do_stonith_notify(0, T_STONITH_NOTIFY_HISTORY, 0, NULL); } g_hash_table_destroy(history); /* remove what is left */ }
static void _detect_obsolete_services(struct namespace_data_s *ns_data) { guint counter; time_t time_now, time_down, time_broken; GHashTableIter s_iterator; gpointer s_k, s_v; gchar *str_key; struct service_info_s *si; time_now = g_get_real_time()/1000000; time_down = time_now - 5; time_broken = time_now - 30; counter = 0; if (!ns_data->configured) { return; } /*move services from UP to DOWN */ g_hash_table_iter_init(&s_iterator, ns_data->local_services); while (g_hash_table_iter_next(&s_iterator, &s_k, &s_v)) { str_key = s_k; si = s_v; si->score.value = -2;/*score not set*/ if (si->score.timestamp < time_down) { gchar str_addr[STRLEN_ADDRINFO]; addr_info_to_string(&(si->addr),str_addr,sizeof(str_addr)); DEBUG("Timeout on service [%s/%s/%s] (%"G_GINT32_FORMAT" < %ld) --> DOWN", si->ns_name, si->type, str_addr, si->score.timestamp, time_down); g_hash_table_iter_steal(&s_iterator); g_hash_table_insert(ns_data->down_services, str_key, si); invalidate_conscience_service(ns_data,si); zero_service_stats(si->tags); service_tag_set_value_boolean(service_info_ensure_tag(si->tags,"tag.up"), FALSE); counter++; } } /*remove services DOWN from a long time ago */ g_hash_table_iter_init(&s_iterator, ns_data->down_services); while (g_hash_table_iter_next(&s_iterator, &s_k, &s_v)) { str_key = s_k; si = s_v; si->score.value = -2;/*score not set*/ if (si->score.timestamp < time_broken) { gchar str_addr[STRLEN_ADDRINFO]; addr_info_to_string(&(si->addr),str_addr,sizeof(str_addr)); DEBUG("Service obsolete [%s/%s/%s] --> DELETED", si->ns_name, si->type, str_addr); g_hash_table_iter_remove(&s_iterator); counter++; } else zero_service_stats(si->tags); } if (counter) DEBUG("[task_id=%s] %u services states have changed", TASK_ID, counter); }
int main(int argc, char * argv[]){ int i = 1; const char * k_mixture_model_filename = NULL; setlocale(LC_ALL, ""); while ( i < argc ){ if ( strcmp("--help", argv[i]) == 0 ){ print_help(); exit(0); } else if ( strcmp("--skip-pi-gram-training", argv[i]) == 0 ){ g_train_pi_gram = false; } else if ( strcmp("--maximum-occurs-allowed", argv[i]) == 0 ){ if ( ++i >= argc ){ print_help(); exit(EINVAL); } g_maximum_occurs = atoi(argv[i]); } else if ( strcmp("--maximum-increase-rates-allowed", argv[i]) == 0 ){ if ( ++i >= argc ){ print_help(); exit(EINVAL); } g_maximum_increase_rates = atof(argv[i]); } else if ( strcmp("--k-mixture-model-file", argv[i]) == 0 ){ if ( ++i >= argc ){ print_help(); exit(EINVAL); } k_mixture_model_filename = argv[i]; } else { break; } ++i; } PhraseLargeTable2 phrase_table; MemoryChunk * chunk = new MemoryChunk; chunk->load("phrase_index.bin"); phrase_table.load(chunk); FacadePhraseIndex phrase_index; if (!load_phrase_index(&phrase_index)) exit(ENOENT); KMixtureModelBigram bigram(K_MIXTURE_MODEL_MAGIC_NUMBER); bigram.attach(k_mixture_model_filename, ATTACH_READWRITE|ATTACH_CREATE); while ( i < argc ){ const char * filename = argv[i]; FILE * document = fopen(filename, "r"); if ( NULL == document ){ int err_saved = errno; fprintf(stderr, "can't open file: %s.\n", filename); fprintf(stderr, "error:%s.\n", strerror(err_saved)); exit(err_saved); } HashofDocument hash_of_document = g_hash_table_new (g_direct_hash, g_direct_equal); HashofUnigram hash_of_unigram = g_hash_table_new (g_direct_hash, g_direct_equal); assert(read_document(&phrase_table, &phrase_index, document, hash_of_document, hash_of_unigram)); fclose(document); document = NULL; GHashTableIter iter; gpointer key, value; /* train the document, and convert it to k mixture model. */ g_hash_table_iter_init(&iter, hash_of_document); while (g_hash_table_iter_next(&iter, &key, &value)) { phrase_token_t token1 = GPOINTER_TO_UINT(key); train_second_word(hash_of_unigram, &bigram, hash_of_document, token1); } KMixtureModelMagicHeader magic_header; assert(bigram.get_magic_header(magic_header)); magic_header.m_N ++; assert(bigram.set_magic_header(magic_header)); post_processing_unigram(&bigram, hash_of_unigram); /* free resources of g_hash_of_document */ g_hash_table_iter_init(&iter, hash_of_document); while (g_hash_table_iter_next(&iter, &key, &value)) { HashofSecondWord second_word = (HashofSecondWord) value; g_hash_table_iter_steal(&iter); g_hash_table_unref(second_word); } g_hash_table_unref(hash_of_document); hash_of_document = NULL; g_hash_table_unref(hash_of_unigram); hash_of_unigram = NULL; ++i; } return 0; }
static void check_cursegment(GHashTable* cursegment, int enable_wl){ GHashTableIter iter; gpointer key, value; GHashTable *whitelist = g_hash_table_new_full(g_int_hash, g_int_equal, free, NULL); g_hash_table_iter_init(&iter, cursegment); while(g_hash_table_iter_next(&iter, &key, &value)){ struct chunk_item* chunk = value; struct chunk_item* target = g_hash_table_lookup(chunkset, key); if(target){ int *sc = g_hash_table_lookup(whitelist, &target->sid); if(!sc){ sc = malloc(sizeof(int)*2); sc[0] = target->sid; sc[1] = 0; g_hash_table_insert(whitelist, sc, sc); } sc[1]++; } } g_hash_table_iter_init(&iter, cursegment); while(g_hash_table_iter_next(&iter, &key, &value)){ struct chunk_item* chunk = value; struct chunk_item* target = g_hash_table_lookup(chunkset, key); if(target){ if(memcmp(target->hash, chunk->hash, 20)!=0){ fprintf(stderr, "+Find a collision between %s <-> %s! Try to detect it!\n", target->suffix, suffix); fprintf(stderr, "+Details: MinHash = %s, Size %lld <-> %lld\n", memcmp(target->minhash, chunk->minhash, 20) == 0?"True":"False", target->fsize, chunk->fsize); collisions++; }else{ dup_chunks++; } int read_back = 0; /*if(memcmp(target->minhash, minhash, 20) != 0){*/ read_back = 1; int *sc = g_hash_table_lookup(whitelist, &target->sid); assert(sc); assert(sc[1]>0); if(enable_wl && sc[1] > wl_threshold){ read_back = 0; } /*} */ if(read_back){ chunks_read_back++; if(memcmp(target->hash, chunk->hash, 20)!=0){ fprintf(stderr, "-Collision Detected!\n"); detected_collisions++; } } target->rc++; }else{ chunk->fid = file_count; chunk->sid = segment_count; memcpy(chunk->minhash, minhash, 20); memcpy(chunk->suffix, suffix, 8); g_hash_table_iter_steal(&iter); g_hash_table_insert(chunkset, chunk->hash, chunk); } } g_hash_table_destroy(whitelist); }
int main(int argc, char * argv[]){ int i = 1; setlocale(LC_ALL, ""); GError * error = NULL; GOptionContext * context; context = g_option_context_new("- generate k mixture model"); g_option_context_add_main_entries(context, entries, NULL); if (!g_option_context_parse(context, &argc, &argv, &error)) { g_print("option parsing failed:%s\n", error->message); exit(EINVAL); } SystemTableInfo2 system_table_info; bool retval = system_table_info.load(SYSTEM_TABLE_INFO); if (!retval) { fprintf(stderr, "load table.conf failed.\n"); exit(ENOENT); } PhraseLargeTable3 phrase_table; phrase_table.attach(SYSTEM_PHRASE_INDEX, ATTACH_READONLY); FacadePhraseIndex phrase_index; const pinyin_table_info_t * phrase_files = system_table_info.get_default_tables(); if (!load_phrase_index(phrase_files, &phrase_index)) exit(ENOENT); KMixtureModelBigram bigram(K_MIXTURE_MODEL_MAGIC_NUMBER); bigram.attach(g_k_mixture_model_filename, ATTACH_READWRITE|ATTACH_CREATE); while ( i < argc ){ const char * filename = argv[i]; FILE * document = fopen(filename, "r"); if ( NULL == document ){ int err_saved = errno; fprintf(stderr, "can't open file: %s.\n", filename); fprintf(stderr, "error:%s.\n", strerror(err_saved)); exit(err_saved); } HashofDocument hash_of_document = g_hash_table_new (g_direct_hash, g_direct_equal); HashofUnigram hash_of_unigram = g_hash_table_new (g_direct_hash, g_direct_equal); assert(read_document(&phrase_table, &phrase_index, document, hash_of_document, hash_of_unigram)); fclose(document); document = NULL; GHashTableIter iter; gpointer key, value; /* train the document, and convert it to k mixture model. */ g_hash_table_iter_init(&iter, hash_of_document); while (g_hash_table_iter_next(&iter, &key, &value)) { phrase_token_t token1 = GPOINTER_TO_UINT(key); train_second_word(hash_of_unigram, &bigram, hash_of_document, token1); } KMixtureModelMagicHeader magic_header; assert(bigram.get_magic_header(magic_header)); magic_header.m_N ++; assert(bigram.set_magic_header(magic_header)); post_processing_unigram(&bigram, hash_of_unigram); /* free resources of g_hash_of_document */ g_hash_table_iter_init(&iter, hash_of_document); while (g_hash_table_iter_next(&iter, &key, &value)) { HashofSecondWord second_word = (HashofSecondWord) value; g_hash_table_iter_steal(&iter); g_hash_table_unref(second_word); } g_hash_table_unref(hash_of_document); hash_of_document = NULL; g_hash_table_unref(hash_of_unigram); hash_of_unigram = NULL; ++i; } return 0; }
int cr_metadata_load_xml(cr_Metadata *md, struct cr_MetadataLocation *ml, GError **err) { int result; GError *tmp_err = NULL; GHashTable *intern_hashtable; // key is checksum (pkgId) cr_HashTableKeyDupAction dupaction = md->dupaction; assert(md); assert(ml); assert(!err || *err == NULL); if (!ml->pri_xml_href) { g_set_error(err, ERR_DOMAIN, CRE_BADARG, "primary.xml file is missing"); return CRE_BADARG; } // Load metadata intern_hashtable = cr_new_metadata_hashtable(); result = cr_load_xml_files(intern_hashtable, ml->pri_xml_href, ml->fil_xml_href, ml->oth_xml_href, md->chunk, md->pkglist_ht, &tmp_err); if (result != CRE_OK) { g_critical("%s: Error encountered while parsing", __func__); g_propagate_prefixed_error(err, tmp_err, "Error encountered while parsing:"); cr_destroy_metadata_hashtable(intern_hashtable); return result; } g_debug("%s: Parsed items: %d", __func__, g_hash_table_size(intern_hashtable)); // Fill user hashtable and use user selected key GHashTableIter iter; gpointer p_key, p_value; GHashTable *ignored_keys = g_hash_table_new_full(g_str_hash, g_str_equal, g_free, NULL); g_hash_table_iter_init (&iter, intern_hashtable); while (g_hash_table_iter_next (&iter, &p_key, &p_value)) { cr_Package *pkg = (cr_Package *) p_value; cr_Package *epkg; gpointer new_key; switch (md->key) { case CR_HT_KEY_FILENAME: new_key = cr_get_filename(pkg->location_href); break; case CR_HT_KEY_HASH: new_key = pkg->pkgId; break; case CR_HT_KEY_NAME: new_key = pkg->name; break; default: // Well, this SHOULD never happend! // (md->key SHOULD be setted only by cr_metadata_new() // and it SHOULD set only valid key values) g_critical("%s: Unknown hash table key selected", __func__); assert(0); g_set_error(err, ERR_DOMAIN, CRE_ASSERT, "Bad db type"); return CRE_ASSERT; } epkg = g_hash_table_lookup(md->ht, new_key); if (epkg) { // Such key already exists if (dupaction == CR_HT_DUPACT_KEEPFIRST) { g_debug("%s: Key \"%s\" already exists in hashtable - Keeping the first occurrence", __func__, (char *) new_key); } else { if (pkg->time_file != epkg->time_file || pkg->size_package != epkg->size_package || g_strcmp0(pkg->pkgId, epkg->pkgId) || g_strcmp0(cr_get_filename(pkg->location_href), cr_get_filename(epkg->location_href)) ) { // We got a key (checksum, filename, pkg name, ..) // which has a multiple occurences which are different. // Ignore such key g_debug("%s: Key \"%s\" is present multiple times and with " "different values. Ignoring all occurrences. " "[size_package: %"G_GINT64_FORMAT"|%"G_GINT64_FORMAT "; time_file: %"G_GINT64_FORMAT"|%"G_GINT64_FORMAT "; pkgId: %s|%s; basename: %s|%s]", __func__, (gchar *) new_key, pkg->size_package, epkg->size_package, pkg->time_file, epkg->time_file, pkg->pkgId, epkg->pkgId, cr_get_filename(pkg->location_href), cr_get_filename(epkg->location_href)); g_hash_table_insert(ignored_keys, g_strdup((gchar *) new_key), NULL); } } // Remove the package from the iterator anyway g_hash_table_iter_remove(&iter); } else { g_hash_table_insert(md->ht, new_key, p_value); g_hash_table_iter_steal(&iter); } } // Remove ignored_keys from resulting hashtable g_hash_table_iter_init(&iter, ignored_keys); while (g_hash_table_iter_next(&iter, &p_key, &p_value)) { char *key = (gchar *) p_key; g_hash_table_remove(md->ht, key); } // How much items we really use g_debug("%s: Really usable items: %d", __func__, g_hash_table_size(md->ht)); // Cleanup g_hash_table_destroy(ignored_keys); cr_destroy_metadata_hashtable(intern_hashtable); return CRE_OK; }