bool consolidate_gpos_single(otfcc_Font *font, table_OTL *table, otl_Subtable *_subtable, const otfcc_Options *options) { subtable_gpos_single *subtable = &(_subtable->gpos_single); gpos_single_hash *h = NULL; for (glyphid_t k = 0; k < subtable->length; k++) { if (!GlyphOrder.consolidateHandle(font->glyph_order, &subtable->items[k].target)) { logWarning("[Consolidate] Ignored missing glyph /%s.\n", subtable->items[k].target.name); continue; } gpos_single_hash *s; int fromid = subtable->items[k].target.index; HASH_FIND_INT(h, &fromid, s); if (s) { logWarning("[Consolidate] Detected glyph double-mapping about /%s.\n", subtable->items[k].target.name); } else { NEW(s); s->fromid = subtable->items[k].target.index; s->fromname = sdsdup(subtable->items[k].target.name); s->v = subtable->items[k].value; HASH_ADD_INT(h, fromid, s); } } HASH_SORT(h, gpos_by_from_id); iSubtable_gpos_single.clear(subtable); gpos_single_hash *s, *tmp; HASH_ITER(hh, h, s, tmp) { iSubtable_gpos_single.push(subtable, ((otl_GposSingleEntry){ .target = Handle.fromConsolidated(s->fromid, s->fromname), .value = s->v, }));
caryll_buffer *caryll_write_gsub_ligature_subtable(otl_subtable *_subtable) { caryll_buffer *buf = bufnew(); subtable_gsub_ligature *subtable = &(_subtable->gsub_ligature); ligature_aggerator *h = NULL, *s, *tmp; uint16_t nLigatures = subtable->to->numGlyphs; for (uint16_t j = 0; j < nLigatures; j++) { int sgid = subtable->from[j]->glyphs[0].gid; HASH_FIND_INT(h, &sgid, s); if (!s) { NEW(s); s->gid = sgid; s->ligid = HASH_COUNT(h); HASH_ADD_INT(h, gid, s); } } HASH_SORT(h, by_gid); otl_coverage *startCoverage; NEW(startCoverage); startCoverage->numGlyphs = HASH_COUNT(h); NEW_N(startCoverage->glyphs, startCoverage->numGlyphs); uint16_t jj = 0; foreach_hash(s, h) { s->ligid = jj; startCoverage->glyphs[jj].gid = s->gid; startCoverage->glyphs[jj].name = NULL; jj++; }
int main(int argc,char *argv[]) { name_rec *name, *names=NULL; char linebuf[BUFLEN]; FILE *file; file = fopen( "test11.dat", "r" ); if (file == NULL) { perror("can't open: "); exit(-1); } while (fgets(linebuf,BUFLEN,file) != NULL) { name = (name_rec*)malloc(sizeof(name_rec)); if (name == NULL) { exit(-1); } strcpy(name->boy_name, linebuf); HASH_ADD_STR(names,boy_name,name); } fclose(file); HASH_SORT(names,namecmp); for(name=names; name!=NULL; name=(name_rec*)(name->hh.next)) { printf("%s",name->boy_name); } return 0; }
static void dump_tree(struct node *root, int level, unsigned int nb_lines, struct node **uniq) { struct node *current; struct node *search; int should_dump; if (root == NULL) return ; HASH_SORT(root, weight_sort); for (current = root; current != NULL; current = current->hh.next) { should_dump = 1; if (level == 0) { HASH_FIND_STR(*uniq, current->word, search); should_dump = search == NULL; kick_all_uniques(current, uniq); } if (should_dump && (level > 0 || count_childs(current->childs) > 0)) { printf("%*s[%d] %s ", 4 * level, "", current->weight, current->word); dump_tree(try_flat_dump(current->childs), level + 1, nb_lines, uniq); } } }
/* * prints the indexed words and counts to the given output file */ void printIndexToFile(char *outputFile, int upDir) { if(upDir) { chdir(".."); } FILE *fp; fp = fopen( outputFile, "w+" ); HASH_SORT( words, name_sort ); int isFirstIter = 1; TokenPtr tmp, currentWord; HASH_ITER(hh, words, currentWord, tmp) { if(isFirstIter){ isFirstIter = 0; } else { fprintf(fp, "\n"); } fprintf(fp, "<list> "); fprintf(fp, "%s\n", currentWord->key); LL_SORT(currentWord->fileHead, count_sort); FileNodePtr tmp = currentWord->fileHead; fprintf(fp, "%s %d", tmp->filename, tmp->tokenCount); while(tmp->next != NULL) { tmp = tmp->next; // printf("file: %s \t count: %d\n", tmp->filename, tmp->tokenCount); fprintf(fp, " %s %d", tmp->filename, tmp->tokenCount); } // if(currentWord->fileHead == NULL) fprintf(fp, "\n</list>"); } fclose(fp); }
void FileListModel::loadFileList() { beginResetModel(); m_fileList.clear(); FcitxStringHashSet* files = FcitxXDGGetFiles(dictDir().toAscii().constData(), NULL, ".txt"); HASH_SORT(files, fcitx_utils_string_hash_set_compare); HASH_FOREACH(f, files, FcitxStringHashSet) { m_fileList.append(QString::fromLocal8Bit(f->name).prepend(dictDir() + "/")); }
/** * Marks that an author has at least one book, in author_table. * * @param int idnum The author's player idnum. */ void add_book_author(int idnum) { struct author_data *auth; HASH_FIND_INT(author_table, &idnum, auth); if (!auth) { CREATE(auth, struct author_data, 1); auth->idnum = idnum; HASH_ADD_INT(author_table, idnum, auth); HASH_SORT(author_table, author_sort); }
void fcitx::FileListModel::loadFileList() { beginResetModel(); m_fileList.clear(); m_fileList.append(QUICK_PHRASE_CONFIG_FILE); FcitxStringHashSet *files = FcitxXDGGetFiles(QUICK_PHRASE_CONFIG_DIR, NULL, ".mb"); HASH_SORT(files, fcitx_utils_string_hash_set_compare); HASH_FOREACH(f, files, FcitxStringHashSet) { m_fileList.append(QString::fromLocal8Bit(f->name).prepend( QUICK_PHRASE_CONFIG_DIR "/")); }
bool consolidate_gsub_single(caryll_font *font, table_otl *table, otl_subtable *_subtable, sds lookupName) { subtable_gsub_single *subtable = &(_subtable->gsub_single); consolidate_coverage(font, subtable->from, lookupName); consolidate_coverage(font, subtable->to, lookupName); uint16_t len = (subtable->from->numGlyphs < subtable->to->numGlyphs ? subtable->from->numGlyphs : subtable->from->numGlyphs); gsub_single_map_hash *h = NULL; for (uint16_t k = 0; k < len; k++) { if (subtable->from->glyphs[k].name && subtable->to->glyphs[k].name) { gsub_single_map_hash *s; int fromid = subtable->from->glyphs[k].gid; HASH_FIND_INT(h, &fromid, s); if (s) { fprintf(stderr, "[Consolidate] Double-mapping a glyph in a " "single substitution /%s.\n", subtable->from->glyphs[k].name); } else { NEW(s); s->fromid = subtable->from->glyphs[k].gid; s->toid = subtable->to->glyphs[k].gid; s->fromname = subtable->from->glyphs[k].name; s->toname = subtable->to->glyphs[k].name; HASH_ADD_INT(h, fromid, s); } } } HASH_SORT(h, by_from_id); if (HASH_COUNT(h) != subtable->from->numGlyphs || HASH_COUNT(h) != subtable->to->numGlyphs) { fprintf(stderr, "[Consolidate] In single subsitution lookup %s, some " "mappings are ignored.\n", lookupName); } subtable->from->numGlyphs = HASH_COUNT(h); subtable->to->numGlyphs = HASH_COUNT(h); FREE(subtable->from->glyphs); FREE(subtable->to->glyphs); NEW_N(subtable->from->glyphs, subtable->from->numGlyphs); NEW_N(subtable->to->glyphs, subtable->to->numGlyphs); { gsub_single_map_hash *s, *tmp; uint16_t j = 0; HASH_ITER(hh, h, s, tmp) { subtable->from->glyphs[j].gid = s->fromid; subtable->from->glyphs[j].name = s->fromname; subtable->to->glyphs[j].gid = s->toid; subtable->to->glyphs[j].name = s->toname; j++; HASH_DEL(h, s); free(s); } }
int main(int argc,char *argv[]) { int i; example_user_t *user, *users=NULL; /* create elements */ for(i=9;i>=0;i--) { if ( (user = (example_user_t*)malloc(sizeof(example_user_t))) == NULL) exit(-1); user->id = i; user->cookie = i*i; HASH_ADD_INT(users,id,user); } for(user=users; user != NULL; user=(example_user_t*)user->hh.next) { printf("user %d, cookie %d\n", user->id, user->cookie); } printf("sorting\n"); HASH_SORT(users,rev); for(user=users; user != NULL; user=(example_user_t*)user->hh.next) { printf("user %d, cookie %d\n", user->id, user->cookie); } printf("adding 10-20\n"); for(i=20;i>=10;i--) { if ( (user = (example_user_t*)malloc(sizeof(example_user_t))) == NULL) exit(-1); user->id = i; user->cookie = i*i; HASH_ADD_INT(users,id,user); } for(user=users; user != NULL; user=(example_user_t*)user->hh.next) { printf("user %d, cookie %d\n", user->id, user->cookie); } printf("sorting\n"); HASH_SORT(users,rev); for(user=users; user != NULL; user=(example_user_t*)user->hh.next) { printf("user %d, cookie %d\n", user->id, user->cookie); } return 0; }
struct document_frequencies_hash* frequencies_hash_to_bins(struct score_hash *scores) { struct score_hash *s; struct document_frequencies_hash *tmp, *bins_hash = NULL; int bin_index = 0; HASH_SORT(scores, ascending_score_sort); for(s=scores; s != NULL; s = s->hh.next) { if (s->score >= 10.5) { break; } // Filter >4 frequent words tmp = (struct document_frequencies_hash*)malloc(sizeof(struct document_frequencies_hash)); tmp->word = strdup(s->word); tmp->count = ++bin_index; HASH_ADD_KEYPTR( hh, bins_hash, tmp->word, strlen(tmp->word), tmp ); } fprintf(stderr, "Total bins: %d\n",bin_index); return bins_hash; }
static void print_global_var_table (shmem_trace_t msgtype) { globalvar_t *g; globalvar_t *tmp; if (!shmemi_trace_is_enabled (msgtype)) { return; } shmemi_trace (msgtype, "-- start hash table --"); HASH_SORT (gvp, addr_sort); HASH_ITER (hh, gvp, g, tmp) { shmemi_trace (msgtype, "address %p: name \"%s\", size %ld", g->addr, g->name, g->size); }
int transfer(char *host, char *port) { int sfd; // server socket fileop_t *f; // contains all operations for a file sfd = inetConnect(host, port, SOCK_STREAM); if (sfd == -1) { errnoMsg(LOG_ERR, "Unable to connect to the server."); return -1; } // initiate sync (sync-id, resource, number of files) errMsg(LOG_INFO, "Number of files: %d", HASH_COUNT(files)); switch (initiateSync(sfd, HASH_COUNT(files))) { case -1: // error return -1; case -2: // there's nothing to synchronize return 0; default: break; } // iterate over files in correct order HASH_SORT(files, sortByOrder); for (f = files; f != NULL; f = (fileop_t*) (f->hh.next)) { if (transferFile(sfd, f) == -1) { errMsg(LOG_ERR, "Transfer of file %s has failed.", f->relpath); return -1; } } // wait for ACK SyncFinish *conf; conf = (SyncFinish *) recvMessage(sfd, SyncFinishType, NULL); if (conf == NULL) { errMsg(LOG_ERR, "Could not get transfer confirmation."); return -1; } return 0; }
bool consolidate_gsub_multi(caryll_font *font, table_otl *table, otl_subtable *_subtable, sds lookupName) { subtable_gsub_multi *subtable = &(_subtable->gsub_multi); consolidate_coverage(font, subtable->from, lookupName); for (uint16_t j = 0; j < subtable->from->numGlyphs; j++) { consolidate_coverage(font, subtable->to[j], lookupName); shrink_coverage(subtable->to[j], false); } gsub_multi_hash *h = NULL; for (uint16_t k = 0; k < subtable->from->numGlyphs; k++) { if (subtable->from->glyphs[k].name) { gsub_multi_hash *s; int fromid = subtable->from->glyphs[k].gid; HASH_FIND_INT(h, &fromid, s); if (!s) { NEW(s); s->fromid = subtable->from->glyphs[k].gid; s->fromname = subtable->from->glyphs[k].name; s->to = subtable->to[k]; HASH_ADD_INT(h, fromid, s); } else { caryll_delete_coverage(subtable->to[k]); } } else { caryll_delete_coverage(subtable->to[k]); } } HASH_SORT(h, by_from_id_multi); subtable->from->numGlyphs = HASH_COUNT(h); { gsub_multi_hash *s, *tmp; uint16_t j = 0; HASH_ITER(hh, h, s, tmp) { subtable->from->glyphs[j].gid = s->fromid; subtable->from->glyphs[j].name = s->fromname; subtable->to[j] = s->to; j++; HASH_DEL(h, s); free(s); } }
bool consolidate_gpos_single(caryll_font *font, table_otl *table, otl_subtable *_subtable, sds lookupName) { subtable_gpos_single *subtable = &(_subtable->gpos_single); consolidate_coverage(font, subtable->coverage, lookupName); gpos_single_hash *h = NULL; for (uint16_t k = 0; k < subtable->coverage->numGlyphs; k++) { if (subtable->coverage->glyphs[k].name) { gpos_single_hash *s; int fromid = subtable->coverage->glyphs[k].gid; HASH_FIND_INT(h, &fromid, s); if (s) { fprintf(stderr, "[Consolidate] Double-mapping a glyph in a " "single substitution /%s.\n", subtable->coverage->glyphs[k].name); } else { NEW(s); s->fromid = subtable->coverage->glyphs[k].gid; s->fromname = subtable->coverage->glyphs[k].name; s->v = subtable->values[k]; HASH_ADD_INT(h, fromid, s); } } } HASH_SORT(h, gpos_by_from_id); subtable->coverage->numGlyphs = HASH_COUNT(h); { gpos_single_hash *s, *tmp; uint16_t j = 0; HASH_ITER(hh, h, s, tmp) { subtable->coverage->glyphs[j].gid = s->fromid; subtable->coverage->glyphs[j].name = s->fromname; subtable->values[j] = s->v; j++; HASH_DEL(h, s); free(s); } }
bool consolidate_gsub_multi(otfcc_Font *font, table_OTL *table, otl_Subtable *_subtable, const otfcc_Options *options) { subtable_gsub_multi *subtable = &(_subtable->gsub_multi); gsub_multi_hash *h = NULL; for (glyphid_t k = 0; k < subtable->length; k++) { if (!GlyphOrder.consolidateHandle(font->glyph_order, &subtable->items[k].from)) { logWarning("[Consolidate] Ignored missing glyph /%s.\n", subtable->items[k].from.name); continue; } fontop_consolidateCoverage(font, subtable->items[k].to, options); Coverage.shrink(subtable->items[k].to, false); gsub_multi_hash *s; int fromid = subtable->items[k].from.index; HASH_FIND_INT(h, &fromid, s); if (!s) { NEW(s); s->fromid = subtable->items[k].from.index; s->fromname = sdsdup(subtable->items[k].from.name); s->to = subtable->items[k].to; subtable->items[k].to = NULL; // Transfer ownership HASH_ADD_INT(h, fromid, s); } } HASH_SORT(h, by_from_id_multi); iSubtable_gsub_multi.clear(subtable); { gsub_multi_hash *s, *tmp; HASH_ITER(hh, h, s, tmp) { iSubtable_gsub_multi.push(subtable, ((otl_GsubMultiEntry){ .from = Handle.fromConsolidated(s->fromid, s->fromname), .to = s->to, })); sdsfree(s->fromname); HASH_DEL(h, s); FREE(s); }
static void consolidateMarkArray(caryll_font *font, table_otl *table, sds lookupName, otl_coverage *marks, otl_mark_array *markArray, uint16_t classCount) { mark_hash *hm = NULL; for (uint16_t k = 0; k < marks->numGlyphs; k++) { if (marks->glyphs[k].name) { mark_hash *s = NULL; HASH_FIND_INT(hm, &(marks->glyphs[k].gid), s); if (!s && markArray->records[k].anchor.present && markArray->records[k].markClass < classCount) { NEW(s); s->gid = marks->glyphs[k].gid; s->name = marks->glyphs[k].name; s->markrec = markArray->records[k]; HASH_ADD_INT(hm, gid, s); } else { fprintf(stderr, "[Consolidate] Ignored invalid or " "double-mapping mark definition for /%s in " "lookup %s.\n", marks->glyphs[k].name, lookupName); } } } HASH_SORT(hm, mark_by_gid); marks->numGlyphs = HASH_COUNT(hm); markArray->markCount = HASH_COUNT(hm); mark_hash *s, *tmp; uint16_t k = 0; HASH_ITER(hh, hm, s, tmp) { marks->glyphs[k].gid = s->gid; marks->glyphs[k].name = s->name; markArray->records[k] = s->markrec; k++; HASH_DEL(hm, s); free(s); }
int main(int argc, char *argv[]) { struct timeval starttime,endtime; double runTime = 0.0; gettimeofday(&starttime,NULL); // read file in a array FILE *f = fopen("filename.txt", "rb"); fseek(f, 0, SEEK_END); long fsize = ftell(f); fseek(f, 0, SEEK_SET); char *buffer = malloc(fsize + 1); fread(buffer, fsize, 1, f); fclose(f); buffer[fsize] = 0; int convert; char temp[1]; char word[30] = ""; char emp[30] = ""; const char **n, *names[] = { "joe", "bob", "betty", NULL }; struct my_struct *s, *tmp, *wordcount = NULL; int i=0; #pragma acc region { #pragma acc loop for ( i = 0; i < fsize; i++) { if (buffer[i] != NULL) { convert = buffer[i]; temp[0] = buffer[i]; if (((convert >= 65) && (convert <= 90)) || ((convert >= 97) && (convert <= 122))) { strncat(word, temp, 1); } else if (strcmp (word,emp) != 0) { HASH_FIND_STR(wordcount, word, s); if (s != NULL) { s->id += 1; strcpy(word, emp); } else { s = (struct my_struct*)malloc(sizeof(struct my_struct)); strncpy(s->name, word,30); s->id = 1; HASH_ADD_STR( wordcount, name, s ); strcpy(word, emp); } } } } } HASH_SORT(wordcount, name_sort); for(s=wordcount; s != NULL; s=s->hh.next) { printf("%s : %d\n", s->name, s->id); } /* free the hash table contents */ HASH_ITER(hh, wordcount, s, tmp) { HASH_DEL(wordcount, s); free(s); }
struct my_struct * sort_by_fre(struct my_struct *users) {/* for sort by frequency*/ HASH_SORT(users, fre_sort); return users; }
void hash_sort_by_model_id() { HASH_SORT(models, hash_model_id_sort); }
void sort_by_name() { HASH_SORT(users, name_sort); }
void sort_by_id() { HASH_SORT(users, id_sort); }
void hashsort() { HASH_SORT(indexs, sort_by_token); }
void update_st_stats(struct pkt_record *pkt) { struct pkt_record *table_entry; struct pkt_list_entry *ple, *tmp, *titer; struct timeval max_age = {.tv_sec = 0, .tv_usec = 5E5 }; /* maintain a long-term history of packets */ ple = malloc(sizeof(struct pkt_list_entry)); ple->pkt = *pkt; DL_APPEND(st_pkt_list_head, ple); DL_FOREACH_SAFE(st_pkt_list_head, titer, tmp) { if (has_aged(ple, titer, max_age)) { HASH_FIND(hh, st_flow_table, &(titer->pkt.flow), sizeof(struct flow), table_entry); assert(table_entry); table_entry->len -= titer->pkt.len; if (0 == table_entry->len) { HASH_DEL(st_flow_table, table_entry); } DL_DELETE(st_pkt_list_head, titer); free(titer); } else { break; } } /* Update the flow accounting table */ /* id already in the hash? */ HASH_FIND(hh, st_flow_table, &(pkt->flow), sizeof(struct flow), table_entry); if (!table_entry) { table_entry = (struct pkt_record *)malloc(sizeof(struct pkt_record)); memset(table_entry, 0, sizeof(struct pkt_record)); memcpy(table_entry, pkt, sizeof(struct pkt_record)); HASH_ADD(hh, st_flow_table, flow, sizeof(struct flow), table_entry); } else { table_entry->len += pkt->len; } HASH_SORT(st_flow_table, bytes_cmp); } void update_lt_stats(struct pkt_record *pkt) { struct pkt_record *table_entry; struct pkt_list_entry *ple, *tmp, *titer; struct timeval max_age = {.tv_sec = 60, .tv_usec = 0 }; /* maintain a long-term history of packets */ ple = malloc(sizeof(struct pkt_list_entry)); ple->pkt = *pkt; DL_APPEND(lt_pkt_list_head, ple); DL_FOREACH_SAFE(lt_pkt_list_head, titer, tmp) { if (has_aged(ple, titer, max_age)) { HASH_FIND(hh, lt_flow_table, &(titer->pkt.flow), sizeof(struct flow), table_entry); assert(table_entry); table_entry->len -= titer->pkt.len; if (0 == table_entry->len) { HASH_DEL(lt_flow_table, table_entry); } DL_DELETE(lt_pkt_list_head, titer); free(titer); } else { break; } } /* Update the flow accounting table */ /* id already in the hash? */ HASH_FIND(hh, lt_flow_table, &(pkt->flow), sizeof(struct flow), table_entry); if (!table_entry) { table_entry = (struct pkt_record *)malloc(sizeof(struct pkt_record)); memset(table_entry, 0, sizeof(struct pkt_record)); memcpy(table_entry, pkt, sizeof(struct pkt_record)); HASH_ADD(hh, lt_flow_table, flow, sizeof(struct flow), table_entry); } else { table_entry->len += pkt->len; } HASH_SORT(lt_flow_table, bytes_cmp); } void update_stats_tables(struct pkt_record *pkt) { update_st_stats(pkt); update_lt_stats(pkt); } void handle_packet(uint8_t *user, const struct pcap_pkthdr *pcap_hdr, const uint8_t *wirebits) { static const struct pkt_record ZeroPkt = { 0 }; struct pkt_record *pkt; char errstr[DECODE_ERRBUF_SIZE]; pkt = malloc(sizeof(struct pkt_record)); *pkt = ZeroPkt; if (0 == decode_ethernet(pcap_hdr, wirebits, pkt, errstr)) { update_stats_tables(pkt); } else { mvprintw(ERR_LINE_OFFSET, 0, "%-80s", errstr); } free(pkt); } void grab_packets(int fd, pcap_t *handle) { struct timespec timeout_ts = {.tv_sec = 0, .tv_nsec = 1E8 }; struct pollfd fds[] = { {.fd = fd, .events = POLLIN, .revents = POLLHUP } }; int ch; while (1) { if (ppoll(fds, 1, &timeout_ts, NULL)) { pcap_dispatch(handle, 10000, handle_packet, NULL); } if ((ch = getch()) == ERR) { /* normal case - no input */ ; } else { switch (ch) { case 'q': endwin(); /* End curses mode */ return; } } print_top_n(5); refresh(); /* ncurses screen update */ } } void init_curses() { initscr(); /* Start curses mode */ raw(); /* Line buffering disabled */ keypad(stdscr, TRUE); /* We get F1, F2 etc.. */ noecho(); /* Don't echo() while we do getch */ nodelay(stdscr, TRUE); }
void sort_by_token() { HASH_SORT(tokenHash, token_compare); }
void sort_symbol_table(visibility_level_t vl) { HASH_SORT( symbol_tables[vl], name_sort ); }
void tokenset_sort( struct tokenset *p ) { HASH_SORT( p->tokens, _text_sort ); }