cryptonite_decaf_error_t cryptonite_decaf_ed448_verify_prehash ( const uint8_t signature[CRYPTONITE_DECAF_EDDSA_448_SIGNATURE_BYTES], const uint8_t pubkey[CRYPTONITE_DECAF_EDDSA_448_PUBLIC_BYTES], const cryptonite_decaf_ed448_prehash_ctx_t hash, const uint8_t *context, uint8_t context_len ) { cryptonite_decaf_error_t ret; uint8_t hash_output[64]; /* MAGIC but true for all existing schemes */ { cryptonite_decaf_ed448_prehash_ctx_t hash_too; memcpy(hash_too,hash,sizeof(hash_too)); hash_final(hash_too,hash_output,sizeof(hash_output)); hash_destroy(hash_too); } ret = cryptonite_decaf_ed448_verify(signature,pubkey,hash_output,sizeof(hash_output),1,context,context_len); return ret; }
void XLogCloseRelationCache(void) { HASH_SEQ_STATUS status; XLogRelCacheEntry *hentry; if (!_xlrelarr) return; hash_seq_init(&status, _xlrelcache); while ((hentry = (XLogRelCacheEntry *) hash_seq_search(&status)) != NULL) _xl_remove_hash_entry(hentry->rdesc); hash_destroy(_xlrelcache); free(_xlrelarr); free(_xlpgcarr); _xlrelarr = NULL; }
void bdd_output_candidate(bdd_node_t *n, list_t *vars) { hash_t *used; bdd_node_t *cur; listentry_t *ve; used = hash_create(VAR_HASH_SIZE); #ifdef VERBOSE printf("Tracing solution:\n"); #endif while ((cur = n->parent) != NULL) { #ifdef VERBOSE printf( "n: %p(\"%s\") -> (%p,%p); parent: %p(\"%s\") -> (%p,%p)\n", n, n->var, n->iszero, n->isone, cur, cur->var, cur->iszero, cur->isone ); #endif if (cur->iszero == n) hash_put(used, cur->var, no.var, 2); else if (cur->isone == n) hash_put(used, cur->var, yes.var, 2); else abort(); n = cur; } ve = vars->first; while (ve != NULL) { char *res; res = hash_get(used, (char *)ve->element); if (res == NULL) printf("*"); else printf("%c", res[0]); ve = ve->next; if (ve != NULL) printf(" "); else printf("\n"); } hash_destroy(used); }
static int tv_wff_expr_to_obdd(const_variable_list variables, tv_wff_expr e, obdd_node_list nodes, const int brute_force) { int root = -1; /* These are special cases. */ if (e == tv_wff_exprNIL) { append_obdd_node_list(nodes, to_obdd_node(new_obdd_terminal_node(LYDIA_FALSE))); append_obdd_node_list(nodes, to_obdd_node(new_obdd_terminal_node(LYDIA_TRUE))); return 1; } if (e->tag == TAGtv_wff_e_const) { append_obdd_node_list(nodes, to_obdd_node(new_obdd_terminal_node(LYDIA_FALSE))); append_obdd_node_list(nodes, to_obdd_node(new_obdd_terminal_node(LYDIA_TRUE))); return to_tv_wff_e_const(e)->c; } /* Add the terminal nodes. */ if (brute_force) { hash_table index; append_obdd_node_list(nodes, to_obdd_node(new_obdd_terminal_node(LYDIA_FALSE))); append_obdd_node_list(nodes, to_obdd_node(new_obdd_terminal_node(LYDIA_TRUE))); hash_init(&index, 2, node_hash_function, NULL); root = build_obdd_conditioning(0, variables->sz, e, nodes, &index); hash_destroy(&index); } else { /* To Do: Constant folding in 'e'. */ obdd_node_list result = build_obdd(e, &root); concat_obdd_node_list(nodes, result); } set_int_counter("nodes", nodes->sz); return root; }
void address_release() { int i, fd; hash_node *hn; link_list *list; p_link_node ln, tmp_ln; if (addr_table == NULL) { return; } for (i = 0; i < addr_table->size; i++) { list = addr_table->lists[i]; ln = link_list_first(list); while (ln) { tmp_ln = link_list_get_next(list, ln); hn = (hash_node *) ln->data; if (hn->data != NULL) { fd = (int) (long) hn->data; hn->data = NULL; if (fd != 0) { tc_log_info(LOG_NOTICE, 0, "it close socket:%d", fd); close(fd); } } ln = tmp_ln; } } tc_log_info(LOG_NOTICE, 0, "destroy addr table"); hash_destroy(addr_table); free(addr_table); addr_table = NULL; }
/** * Deletes auxiliary components created by CFst_Cps_AddSdAux from the state table. * * @param _this Pointer to automaton instance * @see Cps_AddSdAux CFst_Cps_AddSdAux * @see Cps_SetSdAux CFst_Cps_SetSdAux * @see Cps_FindState CFst_Cps_FindState */ void CGEN_PRIVATE CFst_Cps_DelSdAux(CFst* _this) { INT32 i = 0; /* Destroy composed state hash map */ /* NOTE: MUST be done before deleting auxiliary state components! */ hash_free_nodes((hash_t*)_this->m_lpCpsHash); hash_destroy((hash_t*)_this->m_lpCpsHash); _this->m_lpCpsHash = NULL; /* Destroy hash node pool */ if (_this->m_lpCpsHnpool) for (i=0; i<(INT32)(dlp_size(_this->m_lpCpsHnpool)/sizeof(hnode_t*)); i++) dlp_free(_this->m_lpCpsHnpool[i]); dlp_free(_this->m_lpCpsHnpool); _this->m_lpCpsHnpool = NULL; _this->m_nCpsHnpoolSize = 0; /* Delete auxiliary components from state table */ CData_DeleteComps(AS(CData,_this->sd),_this->m_nIcSdAux,3); _this->m_nIcSdAux = -1; }
/* destroy delay table */ void delay_table_destroy() { uint32_t i; link_list *msg_list, *list; hash_node *hn; p_link_node ln; if (table != NULL) { tc_log_info(LOG_NOTICE, 0, "destroy delay table,total:%u", table->total); for (i = 0; i < table->size; i++) { list = table->lists[i]; ln = link_list_first(list); while (ln) { hn = (hash_node *)ln->data; if (hn->data != NULL) { msg_list = (link_list *)hn->data; msg_item_destr_cnt += link_list_clear(msg_list); free(msg_list); msg_ls_destr_cnt++; } hn->data = NULL; ln = link_list_get_next(list, ln); } } tc_log_info(LOG_NOTICE, 0, "destroy items:%llu,free:%llu,total:%llu", msg_item_destr_cnt, msg_item_free_cnt, msg_item_cnt); tc_log_info(LOG_NOTICE, 0, "create msg list:%llu,free:%llu,destr:%llu", msg_ls_cnt, msg_ls_free_cnt, msg_ls_destr_cnt); hash_destroy(table); free(table); table = NULL; } }
void free_connection(DBusConnection *conn, DBusError *err, hash_table_t *settings_table, DBusMessage *msg, DBusMessage *reply ){ if(msg != NULL) dbus_message_unref(msg); if(reply != NULL) dbus_message_unref(reply); if (err != NULL && dbus_error_is_set(err)) dbus_error_free(err); if(settings_table != NULL) hash_destroy(settings_table); if(conn != NULL) dbus_connection_close(conn); }
/// Breaks up the aggregation and uploads each piece separately. /// Audit groups do not nest. Therefore, when we see an aggregating /// cmd while already within an audit group, the "outer" group becomes /// invalid. This method traverses an audit group, publishes each of /// its audits individually, and destroys the group, thus clearing /// the way for the "inner" group to take over. /// @param[in] ldr the leader object pointer /// @param[in] process a function pointer to the publishing callback void ca_disband(ca_o ldr, void (*process) (ca_o)) { hnode_t *hnp; ck_o ck; ca_o sub; hscan_t hscan; _ca_verbosity_ag(ldr, "DISBANDING", NULL); hash_scan_begin(&hscan, ldr->ca_group_hash); for (hnp = hash_scan_next(&hscan); hnp; hnp = hash_scan_next(&hscan)) { ck = (ck_o)hnode_getkey(hnp); sub = (ca_o)hnode_get(hnp); if (ca_get_closed(sub)) { _ca_verbosity_ag(sub, "PROCESSING", NULL); ca_coalesce(sub); process(sub); } else { _ca_verbosity_ag(sub, "RELEASING", NULL); } ca_set_leader(sub, NULL); hash_scan_delete(ldr->ca_group_hash, hnp); hnode_destroy(hnp); ck_destroy(ck); } if (ca_get_closed(ldr)) { _ca_verbosity_ag(ldr, "PROCESSING", NULL); ca_coalesce(ldr); process(ldr); } else { _ca_verbosity_ag(ldr, "RELEASING", NULL); } hash_destroy(ldr->ca_group_hash); ldr->ca_group_hash = NULL; ca_set_leader(ldr, NULL); }
/// Finish off the CA by formatting it and sending it off. /// @param[in] ca the object pointer /// @param[in] process a function pointer to the publishing callback void ca_publish(ca_o ca, void (*process) (ca_o)) { _ca_verbosity_ag(ca, "BUNDLING", NULL); // Combine the textual command lines of the subcommands for record keeping. if (ca->ca_group_hash) { hscan_t hscan; hnode_t *hnp; ck_o ck; ca_o sub; hash_scan_begin(&hscan, ca->ca_group_hash); for (hnp = hash_scan_next(&hscan); hnp; hnp = hash_scan_next(&hscan)) { ck = (ck_o)hnode_getkey(hnp); sub = (ca_o)hnode_get(hnp); if (sub != ca) { _ca_verbosity_ag(sub, "MERGING", NULL); ca_merge(ca, sub); ca_set_processed(sub, 1); } hash_scan_delete(ca->ca_group_hash, hnp); hnode_destroy(hnp); ck_destroy(ck); } hash_destroy(ca->ca_group_hash); ca->ca_group_hash = NULL; } // Merge all PAs in subcommands in with the PA set of the leader. // Must be done before serializing for upload but after subcmds // are merged in. ca_coalesce(ca); // The sub CAs have now been sucked dry. Publish the fully merged leader. process(ca); }
CP_C_API cp_status_t cp_register_ploader(cp_context_t *ctx, cp_plugin_loader_t *loader) { cp_status_t status = CP_OK; hash_t *loader_plugins = NULL; CHECK_NOT_NULL(ctx); CHECK_NOT_NULL(loader); cpi_lock_context(ctx); cpi_check_invocation(ctx, CPI_CF_ANY, __func__); do { if ((loader_plugins = hash_create(HASHCOUNT_T_MAX, (int (*)(const void *, const void *)) strcmp, NULL)) == NULL) { status = CP_ERR_RESOURCE; break; } if (!hash_alloc_insert(ctx->env->loaders_to_plugins, loader, loader_plugins)) { status = CP_ERR_RESOURCE; break; } } while (0); // Report error or success if (status != CP_OK) { cpi_errorf(ctx, N_("The plug-in loader %p could not be registered due to insufficient memory."), (void *) loader); } else { cpi_debugf(ctx, N_("The plug-in loader %p was registered."), (void *) loader); } cpi_unlock_context(ctx); // Release resources if (status != CP_OK) { if (loader_plugins != NULL) { assert(hash_isempty(loader_plugins)); hash_destroy(loader_plugins); } } return status; }
/* remove the hash structure. if hashdata_free_cb != NULL, * this function will be called to remove the elements inside of the hash. * if you don't remove the elements, memory might be leaked. */ void hash_delete(struct hashtable_t *hash, hashdata_free_cb free_cb) { struct element_t *bucket, *last_bucket; int i; for (i = 0; i < hash->size; i++) { bucket = hash->table[i]; while (bucket != NULL) { if (free_cb != NULL) free_cb(bucket->data); last_bucket = bucket; bucket = bucket->next; debugFree(last_bucket, 1301); } } hash_destroy(hash); }
int _get_ostinfo_string (pctx_t ctx, char *name, char *s, int len) { char *uuid = NULL; hash_t stats_hash = NULL; int retval = -1; if (proc_lustre_uuid (ctx, name, &uuid) < 0) { if (lmt_conf_get_proto_debug ()) err ("error reading lustre %s uuid from proc", name); goto done; } _itemize_mdt_export_stats( ctx, name, s, len ); retval = 0; done: if (uuid) free (uuid); if (stats_hash) hash_destroy (stats_hash); return retval; }
void destory() { int i; pthread_cancel(t_heartbeat); for (t_num; t_num > 0; t_num--) { pthread_cancel(tid[t_num]); } for (i = 0; i < MAX_ROOMS; i++) { pthread_mutex_destroy(&t_mutex_room[i]); } pthread_mutex_destroy(&t_mutex_hash); pthread_mutex_destroy(&t_mutex); pthread_cond_destroy(&t_cond); db_close(); hash_destroy(); if (fp) fclose(fp); for (i = 0; i < MAX_FDS; i++) { if (fd_clients[i] != NULL) { node_del(fd_clients[i]); } } }
/* Destroy Frag Table * * Will destroy the entire tree, and destory each of the fragment lists + * fragments in the tree. * * @return -1 on failure * 0 on success */ int frag_table_finalize() { struct frag_list *it; unsigned i; const void *key; if(!fragtable) return -1; #ifdef ENABLE_PTHREADS tmq_stop(timeout_queue); #endif tmq_destroy(timeout_queue); for(it = hash_first(fragtable, &i, &key); it; it = hash_next(fragtable, &i, &key)) frag_table_remove((struct frag_key *) key, it); hash_destroy(fragtable); return 0; }
/* Complain about any remaining invalid-page entries */ void xlog_check_invalid_pages(void) { struct hseq_status status; xl_invalid_page *hentry; bool foundone; foundone = false; if (invalid_page_tab == NULL) return; /* nothing to do */ hseq_init(&status, invalid_page_tab); /* * Our strategy is to emit WARNING messages for all remaining entries * and only PANIC after we've dumped all the available info. */ while ((hentry = (xl_invalid_page *)hseq_search(&status)) != NULL) { char *path; path = relpathperm(hentry->key.node, hentry->key.forkno); if (hentry->present) elog(WARNING, "page %u of relation %s was" " uninitialized", hentry->key.blkno, path); else elog(WARNING, "page %u of relation %s did not exist", hentry->key.blkno, path); pfree(path); foundone = true; } if (foundone) elog(PANIC, "WAL contains references to invalid pages"); hash_destroy(invalid_page_tab); invalid_page_tab = NULL; }
int main(int argc, char** argv) { (void) argc; (void) argv; zn_hash *hash = hash_new(NULL); char key[4] = "aaa"; int i = 0; while (i++ < 25000) { hash_put(hash, key, strdup(key)); _inc_key(key, 2); } char* out = (char*)hash_get(hash, "efg"); if (strcmp(out, "efg") != 0) { printf("FAIL! expected 'efg' but got %s\n", out); return 1; } hash_destroy(hash); printf("Hash Table: [OK]\n"); }
/** * @brief Empty a hash table * @param name Name of the ash table to empty. * @param returrn a pointer to the hash table or NULL on error. */ hash_t *hash_empty(char *name) { hash_t *hash; char *newname; int size; char type; PROFILER_IN(__FILE__, __FUNCTION__, __LINE__); hash = hash_find(name); if (!hash) PROFILER_ROUT(__FILE__, __FUNCTION__, __LINE__, NULL); //printf("EMPTY HASH %s \n", name); size = hash->size; type = hash->type; hash_del(hash_hash, name); hash_destroy(hash); XALLOC(__FILE__, __FUNCTION__, __LINE__, newname, strlen(name) + 1, NULL); strncpy(newname, name, strlen(name)); hash_init(hash, newname, size, type); PROFILER_ROUT(__FILE__, __FUNCTION__, __LINE__, hash); }
void hash_deep_destroy(hash_table *table) { uint32_t index = 0; link_list *l; p_link_node ln, tmp_ln; hash_node *hn; for(; index < table->size; index++){ l = table->lists[index]; if(l != NULL){ ln = link_list_first(l); while(ln){ tmp_ln = link_list_get_next(l, ln); hn = (hash_node *)ln->data; if(hn->data != NULL){ free(hn->data); hn->data = NULL; } ln = tmp_ln; } } } hash_destroy(table); }
error filenamedb_open(const char *filename, filenamedb_t **pdb) { error err; char *filenamecopy = NULL; atom_set_t *filenames = NULL; hash_t *hash = NULL; filenamedb_t *db = NULL; assert(filename); assert(pdb); filenamecopy = str_dup(filename); if (filenamecopy == NULL) { err = error_OOM; goto Failure; } filenames = atom_create_tuned(ATOMBUFSZ / ESTATOMLEN, ATOMBUFSZ); if (filenames == NULL) { err = error_OOM; goto Failure; } err = hash_create(HASHSIZE, digestdb_hash, digestdb_compare, hash_no_destroy_key, hash_no_destroy_value, &hash); if (err) goto Failure; db = malloc(sizeof(*db)); if (db == NULL) { err = error_OOM; goto Failure; } db->filename = filenamecopy; db->filenames = filenames; db->hash = hash; /* read the database in */ err = pickle_unpickle(filename, db->hash, &pickle_writer_hash, &unformat_methods, db); if (err && err != error_PICKLE_COULDNT_OPEN_FILE) goto Failure; *pdb = db; return error_OK; Failure: free(db); hash_destroy(hash); atom_destroy(filenames); free(filenamecopy); return err; }
static void search_dictionaries(int out, Dlist *dic_list, char *rbuf) { Dlist_data *dd; Hash *word_hash; char word[SKKSERV_WORD_SIZE]; char result[SKKSERV_RESULT_SIZE]; char tmpresult[SKKSERV_RESULT_SIZE]; char *end; int i, p, r, len, rlen, ncandidates; if ((end = strchr(rbuf + 1, ' ')) == NULL) { rbuf[0] = SKKSERV_S_ERROR; write(out, rbuf, strlen(rbuf)); return; } len = end - (rbuf + 1); memcpy(word, rbuf + 1, len); word[len] = '\0'; word_hash = hash_create(HASH_SIZE); rlen = 1; ncandidates = 0; result[0] = SKKSERV_S_FOUND; dlist_iter(dic_list, dd) { Dictionary *dic = dlist_data(dd); pthread_mutex_lock(&dic->mutex); __cdb_findstart(dic); if ((r = __cdb_findnext(dic, word, len)) == -1) { err_message_fnc("cdb_findnext() failed.\n"); if (!ncandidates) { rbuf[0] = SKKSERV_S_ERROR; write(out, rbuf, strlen(rbuf)); } pthread_mutex_unlock(&dic->mutex); hash_destroy(word_hash); return; } debug_message_fnc("word %s, len %d, r = %d\n", word, len, r); if (r) { int dpos = cdb_datapos(&dic->cdb); int dlen = cdb_datalen(&dic->cdb); debug_message_fnc("%s found, r = %d, dpos = %d, dlen = %d.\n", word, r, dpos, dlen); if (rlen + dlen + 2 > SKKSERV_RESULT_SIZE) { err_message_fnc("Truncated: %s\n", word); r = SKKSERV_RESULT_SIZE - rlen - 2; } else { r = dlen; } debug_message_fnc("read %d bytes\n", r); if (cdb_read(&dic->cdb, tmpresult, r, dpos) == -1) { if (!ncandidates) { err_message_fnc("cdb_read() failed.\n"); rbuf[0] = SKKSERV_S_ERROR; write(out, rbuf, strlen(rbuf)); pthread_mutex_unlock(&dic->mutex); hash_destroy(word_hash); return; } else { result[rlen] = '\0'; continue; } } /* Merge */ p = 0; i = 1; while (i < r) { if (tmpresult[i] == '/') { if (i - p - 1 > 0 && hash_define_value(word_hash, tmpresult + p + 1, i - p - 1, (void *)1) == 1) { memcpy(result + rlen, tmpresult + p, i - p); rlen += i - p; ncandidates++; } p = i; } i++; } } pthread_mutex_unlock(&dic->mutex); }
int main() { struct hash *my_hash, *my_hash2; int x; /* Testing Hash with Linked List items */ struct list_t *root, *root2, *root3; int a=43,b=63,c=99,d=70,e=25,f=17,g=57,h=69,i=111,j=120,k=70,l=73,m=75; root = list_init(); root2 = list_init(); root3 = list_init(); root = list_remove_rear(root); root = list_remove_any(root,&k); root = list_remove_front(root); list_insert_rear(root, &a); list_insert_rear(root, &b); root = list_insert_after(root,&c,3); list_insert_rear(root, &d); root = list_insert_front(root, &e); root = list_insert_front(root, &f); list_insert_rear(root, &g); root = list_remove_front(root); list_insert_rear(root, &h); root = list_insert_after(root,&i,5); root = list_insert_after(root,&j,120); root = list_remove_any(root,&k); root = list_remove_front(root); list_insert_rear(root, &l); root = list_remove_any(root,&l); list_insert_rear(root, &m); root = list_remove_rear(root); root = list_remove_rear(root); root = list_remove_rear(root); root = list_remove_rear(root); root = list_remove_rear(root); root = list_remove_rear(root); root = list_remove_rear(root); my_hash = hash_init("list"); hash_insert(my_hash, "key1", root); hash_insert(my_hash, "key2", root2); hash_insert(my_hash, "key3", root3); printf("%s\n",my_hash->hash_type); char* s_val = "key2"; char* r_val = hash_get(my_hash, s_val); printf("%s - %s\n", s_val, r_val); hash_iterate(my_hash); hash_destroy(my_hash); /* Testing Hash with Binary Tree items */ struct node *root_node = NULL, *root_node2 = NULL, *root_node3 = NULL; root_node = insert(root_node,5,NULL); root_node = delete_node(root_node,5); root_node = insert(root_node,7,NULL); root_node = insert(root_node,3,NULL); root_node = insert(root_node,6,NULL); root_node = insert(root_node,9,NULL); root_node = insert(root_node,12,NULL); root_node = insert(root_node,1,NULL); print_preorder(root_node); root_node = delete_node(root_node,7); my_hash2 = hash_init("tree"); hash_insert(my_hash2, "key1", root_node); hash_insert(my_hash2, "key2", root_node2); hash_insert(my_hash2, "key3", root_node3); hash_iterate(my_hash2); hash_destroy(my_hash2); return 0; }
int mh_check_mailbox(CONTEXT *ctx, int *index_hint) { char buf[_POSIX_PATH_MAX], b1[LONG_STRING], b2[LONG_STRING]; struct stat st, st_cur; short modified = 0, have_new = 0, occult = 0; struct maildir *md, *p; struct maildir **last; HASH *fnames; int i, j; if(!option (OPTCHECKNEW)) return 0; if(ctx->magic == M_MH) { strfcpy(buf, ctx->path, sizeof(buf)); if(stat(buf, &st) == -1) return -1; /* create .mh_sequences when there isn't one. */ snprintf (buf, sizeof (buf), "%s/.mh_sequences", ctx->path); if (stat (buf, &st_cur) == -1) { if (errno == ENOENT) { char *tmp; FILE *fp = NULL; if (mh_mkstemp (ctx, &fp, &tmp) == 0) { safe_fclose (&fp); if (safe_rename (tmp, buf) == -1) unlink (tmp); safe_free ((void **) &tmp); } if (stat (buf, &st_cur) == -1) modified = 1; } else modified = 1; } } else if(ctx->magic == M_MAILDIR) { snprintf(buf, sizeof(buf), "%s/new", ctx->path); if(stat(buf, &st) == -1) return -1; snprintf(buf, sizeof(buf), "%s/cur", ctx->path); if(stat(buf, &st_cur) == -1) /* XXX - name is bad. */ modified = 1; } if(!modified && ctx->magic == M_MAILDIR && st_cur.st_mtime > ctx->mtime_cur) modified = 1; if(!modified && ctx->magic == M_MH && (st.st_mtime > ctx->mtime || st_cur.st_mtime > ctx->mtime_cur)) modified = 1; if(modified || (ctx->magic == M_MAILDIR && st.st_mtime > ctx->mtime)) have_new = 1; if(!modified && !have_new) return 0; ctx->mtime_cur = st_cur.st_mtime; ctx->mtime = st.st_mtime; #if 0 if(Sort != SORT_ORDER) { short old_sort; old_sort = Sort; Sort = SORT_ORDER; mutt_sort_headers(ctx, 1); Sort = old_sort; } #endif md = NULL; last = &md; if(ctx->magic == M_MAILDIR) { if(have_new) maildir_parse_dir(ctx, &last, "new", NULL); if(modified) maildir_parse_dir(ctx, &last, "cur", NULL); } else if(ctx->magic == M_MH) { struct mh_sequences mhs; memset (&mhs, 0, sizeof (mhs)); maildir_parse_dir (ctx, &last, NULL, NULL); mh_read_sequences (&mhs, ctx->path); mh_update_maildir (md, &mhs); mhs_free_sequences (&mhs); } /* check for modifications and adjust flags */ fnames = hash_create (1031); for(p = md; p; p = p->next) { if(ctx->magic == M_MAILDIR) { maildir_canon_filename(b2, p->h->path, sizeof(b2)); p->canon_fname = safe_strdup(b2); } else p->canon_fname = safe_strdup(p->h->path); hash_insert(fnames, p->canon_fname, p, 0); } for(i = 0; i < ctx->msgcount; i++) { ctx->hdrs[i]->active = 0; if(ctx->magic == M_MAILDIR) maildir_canon_filename(b1, ctx->hdrs[i]->path, sizeof(b1)); else strfcpy(b1, ctx->hdrs[i]->path, sizeof(b1)); dprint(2, (debugfile, "%s:%d: mh_check_mailbox(): Looking for %s.\n", __FILE__, __LINE__, b1)); if((p = hash_find(fnames, b1)) && p->h && mbox_strict_cmp_headers(ctx->hdrs[i], p->h)) { /* found the right message */ dprint(2, (debugfile, "%s:%d: Found. Flags before: %s%s%s%s%s\n", __FILE__, __LINE__, ctx->hdrs[i]->flagged ? "f" : "", ctx->hdrs[i]->deleted ? "D" : "", ctx->hdrs[i]->replied ? "r" : "", ctx->hdrs[i]->old ? "O" : "", ctx->hdrs[i]->read ? "R" : "")); if(mutt_strcmp(ctx->hdrs[i]->path, p->h->path)) mutt_str_replace (&ctx->hdrs[i]->path, p->h->path); if(modified) { if(!ctx->hdrs[i]->changed) { mutt_set_flag (ctx, ctx->hdrs[i], M_FLAG, p->h->flagged); mutt_set_flag (ctx, ctx->hdrs[i], M_REPLIED, p->h->replied); mutt_set_flag (ctx, ctx->hdrs[i], M_READ, p->h->read); } mutt_set_flag(ctx, ctx->hdrs[i], M_OLD, p->h->old); } ctx->hdrs[i]->active = 1; dprint(2, (debugfile, "%s:%d: Flags after: %s%s%s%s%s\n", __FILE__, __LINE__, ctx->hdrs[i]->flagged ? "f" : "", ctx->hdrs[i]->deleted ? "D" : "", ctx->hdrs[i]->replied ? "r" : "", ctx->hdrs[i]->old ? "O" : "", ctx->hdrs[i]->read ? "R" : "")); mutt_free_header(&p->h); } else if (ctx->magic == M_MAILDIR && !modified && !strncmp("cur/", ctx->hdrs[i]->path, 4)) { /* If the cur/ part wasn't externally modified for a maildir * type folder, assume the message is still active. Actually, * we simply don't know. */ ctx->hdrs[i]->active = 1; } else if (modified || (ctx->magic == M_MAILDIR && !strncmp("new/", ctx->hdrs[i]->path, 4))) { /* Mailbox was modified, or a new message vanished. */ /* Note: This code will _not_ apply for a new message which * is just moved to cur/, as this would modify cur's time * stamp and lead to modified == 1. Thus, we'd have parsed * the complete folder above, and the message would have * been found in the look-up table. */ dprint(2, (debugfile, "%s:%d: Not found. Flags were: %s%s%s%s%s\n", __FILE__, __LINE__, ctx->hdrs[i]->flagged ? "f" : "", ctx->hdrs[i]->deleted ? "D" : "", ctx->hdrs[i]->replied ? "r" : "", ctx->hdrs[i]->old ? "O" : "", ctx->hdrs[i]->read ? "R" : "")); occult = 1; } } /* destroy the file name hash */ hash_destroy(&fnames, NULL); /* If we didn't just get new mail, update the tables. */ if(modified || occult) { short old_sort; int old_count; #ifndef LIBMUTT if (Sort != SORT_ORDER) { old_sort = Sort; Sort = SORT_ORDER; mutt_sort_headers (ctx, 1); Sort = old_sort; } #endif old_count = ctx->msgcount; for (i = 0, j = 0; i < old_count; i++) { if (ctx->hdrs[i]->active && index_hint && *index_hint == i) *index_hint = j; if (ctx->hdrs[i]->active) ctx->hdrs[i]->index = j++; } mx_update_tables(ctx, 0); } /* Incorporate new messages */ maildir_move_to_context(ctx, &md); return (modified || occult) ? M_REOPENED : have_new ? M_NEW_MAIL : 0; }
/* * CompactCheckpointerRequestQueue * Remove duplicates from the request queue to avoid backend fsyncs. * Returns "true" if any entries were removed. * * Although a full fsync request queue is not common, it can lead to severe * performance problems when it does happen. So far, this situation has * only been observed to occur when the system is under heavy write load, * and especially during the "sync" phase of a checkpoint. Without this * logic, each backend begins doing an fsync for every block written, which * gets very expensive and can slow down the whole system. * * Trying to do this every time the queue is full could lose if there * aren't any removable entries. But that should be vanishingly rare in * practice: there's one queue entry per shared buffer. */ static bool CompactCheckpointerRequestQueue(void) { struct CheckpointerSlotMapping { CheckpointerRequest request; int slot; }; int n, preserve_count; int num_skipped = 0; HASHCTL ctl; HTAB *htab; bool *skip_slot; /* must hold CheckpointerCommLock in exclusive mode */ Assert(LWLockHeldByMe(CheckpointerCommLock)); /* Initialize skip_slot array */ skip_slot = palloc0(sizeof(bool) * CheckpointerShmem->num_requests); /* Initialize temporary hash table */ MemSet(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(CheckpointerRequest); ctl.entrysize = sizeof(struct CheckpointerSlotMapping); ctl.hcxt = CurrentMemoryContext; htab = hash_create("CompactCheckpointerRequestQueue", CheckpointerShmem->num_requests, &ctl, HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); /* * The basic idea here is that a request can be skipped if it's followed * by a later, identical request. It might seem more sensible to work * backwards from the end of the queue and check whether a request is * *preceded* by an earlier, identical request, in the hopes of doing less * copying. But that might change the semantics, if there's an * intervening FORGET_RELATION_FSYNC or FORGET_DATABASE_FSYNC request, so * we do it this way. It would be possible to be even smarter if we made * the code below understand the specific semantics of such requests (it * could blow away preceding entries that would end up being canceled * anyhow), but it's not clear that the extra complexity would buy us * anything. */ for (n = 0; n < CheckpointerShmem->num_requests; n++) { CheckpointerRequest *request; struct CheckpointerSlotMapping *slotmap; bool found; /* * We use the request struct directly as a hashtable key. This * assumes that any padding bytes in the structs are consistently the * same, which should be okay because we zeroed them in * CheckpointerShmemInit. Note also that RelFileNode had better * contain no pad bytes. */ request = &CheckpointerShmem->requests[n]; slotmap = hash_search(htab, request, HASH_ENTER, &found); if (found) { /* Duplicate, so mark the previous occurrence as skippable */ skip_slot[slotmap->slot] = true; num_skipped++; } /* Remember slot containing latest occurrence of this request value */ slotmap->slot = n; } /* Done with the hash table. */ hash_destroy(htab); /* If no duplicates, we're out of luck. */ if (!num_skipped) { pfree(skip_slot); return false; } /* We found some duplicates; remove them. */ preserve_count = 0; for (n = 0; n < CheckpointerShmem->num_requests; n++) { if (skip_slot[n]) continue; CheckpointerShmem->requests[preserve_count++] = CheckpointerShmem->requests[n]; } ereport(DEBUG1, (errmsg("compacted fsync request queue from %d entries to %d entries", CheckpointerShmem->num_requests, preserve_count))); CheckpointerShmem->num_requests = preserve_count; /* Cleanup. */ pfree(skip_slot); return true; }
void TestHashCreate(CuTest* tc) { hash_t *hash = hash_create(); CuAssert(tc, "hash_create does not return NULL", NULL != hash); hash_destroy(hash); }
static int init_htab (HTAB *hashp, int nelem) { register SEG_OFFSET *segp; register int nbuckets; register int nsegs; int l2; HHDR *hctl; hctl = hashp->hctl; /* * Divide number of elements by the fill factor and determine a desired * number of buckets. Allocate space for the next greater power of * two number of buckets */ nelem = (nelem - 1) / hctl->ffactor + 1; l2 = my_log2(nelem); nbuckets = 1 << l2; hctl->max_bucket = hctl->low_mask = nbuckets - 1; hctl->high_mask = (nbuckets << 1) - 1; nsegs = (nbuckets - 1) / hctl->ssize + 1; nsegs = 1 << my_log2(nsegs); if ( nsegs > hctl->dsize ) { hctl->dsize = nsegs; } /* Use two low order bits of points ???? */ /* if ( !(hctl->mem = bit_alloc ( nbuckets )) ) return(-1); if ( !(hctl->mod = bit_alloc ( nbuckets )) ) return(-1); */ /* allocate a directory */ if (!(hashp->dir)) { hashp->dir = (SEG_OFFSET *)hashp->alloc(hctl->dsize * sizeof(SEG_OFFSET)); if (! hashp->dir) return(-1); } /* Allocate initial segments */ for (segp = hashp->dir; hctl->nsegs < nsegs; hctl->nsegs++, segp++ ) { *segp = seg_alloc(hashp); if ( *segp == (SEG_OFFSET)0 ) { hash_destroy(hashp); return (0); } } # if HASH_DEBUG fprintf(stderr, "%s\n%s%x\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%x\n%s%x\n%s%d\n%s%d\n", "init_htab:", "TABLE POINTER ", hashp, "BUCKET SIZE ", hctl->bsize, "BUCKET SHIFT ", hctl->bshift, "DIRECTORY SIZE ", hctl->dsize, "SEGMENT SIZE ", hctl->ssize, "SEGMENT SHIFT ", hctl->sshift, "FILL FACTOR ", hctl->ffactor, "MAX BUCKET ", hctl->max_bucket, "HIGH MASK ", hctl->high_mask, "LOW MASK ", hctl->low_mask, "NSEGS ", hctl->nsegs, "NKEYS ", hctl->nkeys ); # endif return (0); }
HTAB * hash_create(int nelem, HASHCTL *info, int flags) { register HHDR * hctl; HTAB * hashp; hashp = (HTAB *) MEM_ALLOC((unsigned long) sizeof(HTAB)); memset(hashp, 0, sizeof(HTAB)); if ( flags & HASH_FUNCTION ) { hashp->hash = info->hash; } else { /* default */ hashp->hash = string_hash; } if ( flags & HASH_SHARED_MEM ) { /* ctl structure is preallocated for shared memory tables */ hashp->hctl = (HHDR *) info->hctl; hashp->segbase = (char *) info->segbase; hashp->alloc = info->alloc; hashp->dir = (SEG_OFFSET *)info->dir; /* hash table already exists, we're just attaching to it */ if (flags & HASH_ATTACH) { return(hashp); } } else { /* setup hash table defaults */ hashp->alloc = (dhalloc_ptr) MEM_ALLOC; hashp->dir = NULL; hashp->segbase = NULL; } if (! hashp->hctl) { hashp->hctl = (HHDR *) hashp->alloc((unsigned long)sizeof(HHDR)); if (! hashp->hctl) { return(0); } } if ( !hdefault(hashp) ) return(0); hctl = hashp->hctl; #ifdef HASH_STATISTICS hctl->accesses = hctl->collisions = 0; #endif if ( flags & HASH_BUCKET ) { hctl->bsize = info->bsize; hctl->bshift = my_log2(info->bsize); } if ( flags & HASH_SEGMENT ) { hctl->ssize = info->ssize; hctl->sshift = my_log2(info->ssize); } if ( flags & HASH_FFACTOR ) { hctl->ffactor = info->ffactor; } /* * SHM hash tables have fixed maximum size (allocate * a maximal sized directory). */ if ( flags & HASH_DIRSIZE ) { hctl->max_dsize = my_log2(info->max_size); hctl->dsize = my_log2(info->dsize); } /* hash table now allocates space for key and data * but you have to say how much space to allocate */ if ( flags & HASH_ELEM ) { hctl->keysize = info->keysize; hctl->datasize = info->datasize; } if ( flags & HASH_ALLOC ) { hashp->alloc = info->alloc; } if ( init_htab (hashp, nelem ) ) { hash_destroy(hashp); return(0); } return(hashp); }
static void populate_recordset_object_end(void *state) { PopulateRecordsetState _state = (PopulateRecordsetState) state; HTAB *json_hash = _state->json_hash; Datum *values; bool *nulls; char fname[NAMEDATALEN]; int i; RecordIOData *my_extra = _state->my_extra; int ncolumns = my_extra->ncolumns; TupleDesc tupdesc = _state->ret_tdesc; JsonHashEntry hashentry; HeapTupleHeader rec = _state->rec; HeapTuple rettuple; if (_state->lex->lex_level > 1) return; values = (Datum *) palloc(ncolumns * sizeof(Datum)); nulls = (bool *) palloc(ncolumns * sizeof(bool)); if (_state->rec) { HeapTupleData tuple; /* Build a temporary HeapTuple control structure */ tuple.t_len = HeapTupleHeaderGetDatumLength(_state->rec); ItemPointerSetInvalid(&(tuple.t_self)); tuple.t_data = _state->rec; /* Break down the tuple into fields */ heap_deform_tuple(&tuple, tupdesc, values, nulls); } else { for (i = 0; i < ncolumns; ++i) { values[i] = (Datum) 0; nulls[i] = true; } } for (i = 0; i < ncolumns; ++i) { ColumnIOData *column_info = &my_extra->columns[i]; Oid column_type = tupdesc->attrs[i]->atttypid; char *value; /* Ignore dropped columns in datatype */ if (tupdesc->attrs[i]->attisdropped) { nulls[i] = true; continue; } memset(fname, 0, NAMEDATALEN); strncpy(fname, NameStr(tupdesc->attrs[i]->attname), NAMEDATALEN); hashentry = hash_search(json_hash, fname, HASH_FIND, NULL); /* * we can't just skip here if the key wasn't found since we might have * a domain to deal with. If we were passed in a non-null record * datum, we assume that the existing values are valid (if they're * not, then it's not our fault), but if we were passed in a null, * then every field which we don't populate needs to be run through * the input function just in case it's a domain type. */ if (hashentry == NULL && rec) continue; /* * Prepare to convert the column value from text */ if (column_info->column_type != column_type) { getTypeInputInfo(column_type, &column_info->typiofunc, &column_info->typioparam); fmgr_info_cxt(column_info->typiofunc, &column_info->proc, _state->fn_mcxt); column_info->column_type = column_type; } if (hashentry == NULL || hashentry->isnull) { /* * need InputFunctionCall to happen even for nulls, so that domain * checks are done */ values[i] = InputFunctionCall(&column_info->proc, NULL, column_info->typioparam, tupdesc->attrs[i]->atttypmod); nulls[i] = true; } else { value = hashentry->val; values[i] = InputFunctionCall(&column_info->proc, value, column_info->typioparam, tupdesc->attrs[i]->atttypmod); nulls[i] = false; } } rettuple = heap_form_tuple(tupdesc, values, nulls); tuplestore_puttuple(_state->tuple_store, rettuple); hash_destroy(json_hash); }
/* Process one per-dbspace directory for ResetUnloggedRelations */ static void ResetUnloggedRelationsInDbspaceDir(const char *dbspacedirname, int op) { DIR *dbspace_dir; struct dirent *de; char rm_path[MAXPGPATH]; /* Caller must specify at least one operation. */ Assert((op & (UNLOGGED_RELATION_CLEANUP | UNLOGGED_RELATION_INIT)) != 0); /* * Cleanup is a two-pass operation. First, we go through and identify all * the files with init forks. Then, we go through again and nuke * everything with the same OID except the init fork. */ if ((op & UNLOGGED_RELATION_CLEANUP) != 0) { HTAB *hash = NULL; HASHCTL ctl; /* Open the directory. */ dbspace_dir = AllocateDir(dbspacedirname); if (dbspace_dir == NULL) { elog(LOG, "could not open dbspace directory \"%s\": %m", dbspacedirname); return; } /* * It's possible that someone could create a ton of unlogged relations * in the same database & tablespace, so we'd better use a hash table * rather than an array or linked list to keep track of which files * need to be reset. Otherwise, this cleanup operation would be * O(n^2). */ ctl.keysize = sizeof(unlogged_relation_entry); ctl.entrysize = sizeof(unlogged_relation_entry); hash = hash_create("unlogged hash", 32, &ctl, HASH_ELEM); /* Scan the directory. */ while ((de = ReadDir(dbspace_dir, dbspacedirname)) != NULL) { ForkNumber forkNum; int oidchars; unlogged_relation_entry ent; /* Skip anything that doesn't look like a relation data file. */ if (!parse_filename_for_nontemp_relation(de->d_name, &oidchars, &forkNum)) continue; /* Also skip it unless this is the init fork. */ if (forkNum != INIT_FORKNUM) continue; /* * Put the OID portion of the name into the hash table, if it * isn't already. */ memset(ent.oid, 0, sizeof(ent.oid)); memcpy(ent.oid, de->d_name, oidchars); hash_search(hash, &ent, HASH_ENTER, NULL); } /* Done with the first pass. */ FreeDir(dbspace_dir); /* * If we didn't find any init forks, there's no point in continuing; * we can bail out now. */ if (hash_get_num_entries(hash) == 0) { hash_destroy(hash); return; } /* * Now, make a second pass and remove anything that matches. First, * reopen the directory. */ dbspace_dir = AllocateDir(dbspacedirname); if (dbspace_dir == NULL) { elog(LOG, "could not open dbspace directory \"%s\": %m", dbspacedirname); hash_destroy(hash); return; } /* Scan the directory. */ while ((de = ReadDir(dbspace_dir, dbspacedirname)) != NULL) { ForkNumber forkNum; int oidchars; bool found; unlogged_relation_entry ent; /* Skip anything that doesn't look like a relation data file. */ if (!parse_filename_for_nontemp_relation(de->d_name, &oidchars, &forkNum)) continue; /* We never remove the init fork. */ if (forkNum == INIT_FORKNUM) continue; /* * See whether the OID portion of the name shows up in the hash * table. */ memset(ent.oid, 0, sizeof(ent.oid)); memcpy(ent.oid, de->d_name, oidchars); hash_search(hash, &ent, HASH_FIND, &found); /* If so, nuke it! */ if (found) { snprintf(rm_path, sizeof(rm_path), "%s/%s", dbspacedirname, de->d_name); /* * It's tempting to actually throw an error here, but since * this code gets run during database startup, that could * result in the database failing to start. (XXX Should we do * it anyway?) */ if (unlink(rm_path)) elog(LOG, "could not unlink file \"%s\": %m", rm_path); else elog(DEBUG2, "unlinked file \"%s\"", rm_path); } } /* Cleanup is complete. */ FreeDir(dbspace_dir); hash_destroy(hash); } /* * Initialization happens after cleanup is complete: we copy each init * fork file to the corresponding main fork file. Note that if we are * asked to do both cleanup and init, we may never get here: if the * cleanup code determines that there are no init forks in this dbspace, * it will return before we get to this point. */ if ((op & UNLOGGED_RELATION_INIT) != 0) { /* Open the directory. */ dbspace_dir = AllocateDir(dbspacedirname); if (dbspace_dir == NULL) { /* we just saw this directory, so it really ought to be there */ elog(LOG, "could not open dbspace directory \"%s\": %m", dbspacedirname); return; } /* Scan the directory. */ while ((de = ReadDir(dbspace_dir, dbspacedirname)) != NULL) { ForkNumber forkNum; int oidchars; char oidbuf[OIDCHARS + 1]; char srcpath[MAXPGPATH]; char dstpath[MAXPGPATH]; /* Skip anything that doesn't look like a relation data file. */ if (!parse_filename_for_nontemp_relation(de->d_name, &oidchars, &forkNum)) continue; /* Also skip it unless this is the init fork. */ if (forkNum != INIT_FORKNUM) continue; /* Construct source pathname. */ snprintf(srcpath, sizeof(srcpath), "%s/%s", dbspacedirname, de->d_name); /* Construct destination pathname. */ memcpy(oidbuf, de->d_name, oidchars); oidbuf[oidchars] = '\0'; snprintf(dstpath, sizeof(dstpath), "%s/%s%s", dbspacedirname, oidbuf, de->d_name + oidchars + 1 + strlen(forkNames[INIT_FORKNUM])); /* OK, we're ready to perform the actual copy. */ elog(DEBUG2, "copying %s to %s", srcpath, dstpath); copy_file(srcpath, dstpath); } /* Done with the first pass. */ FreeDir(dbspace_dir); } }
/** * \page_destroy_table * \Destroy sup page table, just call hash_destroy with action * * \param table sup page table to be destroyed * * \retval void */ void page_destroy_table (struct hash *table) { hash_destroy (table, page_destroy_action); }