void jrst_filter_init (void) { char *filename = getenv ("JRST_FILTER"); if (!filename) { return; } FILE *file = fopen (filename, "r"); if (!file){ return; } hcreate_r (1000, &classes); hcreate_r (1000, &methods); hash_initialized = 1; char p1[JRST_MAX_STRING]; ParseState state = ParseStart; while (!feof(file)){ fscanf (file, "%s", p1); switch (state){ case ParseStart: switch (p1[0]){ case 'C': state = ParseClass; break; case 'M': state = ParseMethod; break; } break; case ParseClass: { ENTRY e, *ep = NULL; e.key = p1; e.data = NULL; hsearch_r (e, FIND, &ep, &classes); if (ep == NULL){ e.key = strdup (p1); e.data = NULL; hsearch_r (e, ENTER, &ep, &classes); } } state = ParseStart; break; case ParseMethod: { ENTRY e, *ep = NULL; e.key = p1; e.data = NULL; hsearch_r (e, FIND, &ep, &methods); if (ep == NULL){ e.key = strdup (p1); e.data = NULL; hsearch_r (e, ENTER, &ep, &methods); } } state = ParseStart; break; } } fclose(file); }
void init_cgi(char *query) { int len, nel; char *q, *name, *value; /* Clear variables */ if (!query) { hdestroy_r(&htab); return; } /* Parse into individual assignments */ q = query; len = strlen(query); nel = 1; while (strsep(&q, "&;")) nel++; hcreate_r(nel, &htab); for (q = query; q < (query + len);) { /* Unescape each assignment */ unescape(name = value = q); /* Skip to next assignment */ for (q += strlen(q); q < (query + len) && !*q; q++); /* Assign variable */ name = strsep(&value, "="); if (value) { // printf("set_cgi: name=%s, value=%s.\n", name , value); // N12 test set_cgi(name, value); } } }
void modules_init(void) { unsigned int modcnt; int i, status; ENTRY entry, *ret; status = hcreate_r(100, &table); assert(status); modcnt = &__modules_end-&__modules_begin-1; INFO("modules (%d):", modcnt); for (i = 0; i < modcnt; i++) { entry.key = mk_module_key(modtab[i].channel, modtab[i].name); entry.data = &modtab[i]; status = hsearch_r(entry, ENTER, &ret, &table); assert(status); if (verbose) { fprintf(stderr, " %s.%s", modtab[i].channel, modtab[i].name); } } if (verbose) { fprintf(stderr,"\n"); } }
void webcgi_init(char *query) { int nel; char *q, *end, *name, *value; if (htab.table) hdestroy_r(&htab); if (query == NULL) return; // cprintf("query = %s\n", query); end = query + strlen(query); q = query; nel = 1; while (strsep(&q, "&;")) { nel++; } hcreate_r(nel, &htab); for (q = query; q < end; ) { value = q; q += strlen(q) + 1; unescape(value); name = strsep(&value, "="); if (value) webcgi_set(name, value); } }
int mod_static_init() { int i; int j; size_t size; ENTRY item, *ret; char **ext = NULL; size = sizeof(standard_types) / sizeof(standard_types[0]); bzero(&std_mime_type_hash, sizeof(struct hsearch_data)); if (hcreate_r(size * 2, &std_mime_type_hash) == 0) { error("Error creating standard MIME type hash"); return -1; } for (i = 0; i < size; i++) { for (ext = standard_types[i].exts, j = 0; *ext != NULL && j < FILE_TYPE_COUNT; ext++, j++) { item.key = *ext; item.data = standard_types[i].content_type; debug("Registering standard MIME type %s:%s", *ext, standard_types[i].content_type); if (hsearch_r(item, ENTER, &ret, &std_mime_type_hash) == 0) { error("Error entering standard MIME type"); } } } return 0; }
/* * Allocate a hash table for fs handles. Returns 0 on success, * -1 on failure. */ int allocFsTable(void) { assert(NULL == fsTable); fsTable = calloc(1, sizeof(struct hsearch_data)); if (0 == hcreate_r(MAX_ELEMENTS, fsTable)) { ERROR("Unable to initialize connection table"); return -1; } return 0; }
/** * new AIDE_CONTEXT */ AIDE_CONTEXT * newAideContext() { int rc; AIDE_CONTEXT *ctx; ctx = xmalloc(sizeof(AIDE_CONTEXT)); if (ctx == NULL) { LOG(LOG_ERR, "no memory"); return NULL; } memset(ctx, 0, sizeof(AIDE_CONTEXT)); /* hash tables */ // TODO set the size in openpts.h ctx->aide_md_table = xmalloc(sizeof(struct hsearch_data)); // TODO ck null memset(ctx->aide_md_table, 0, sizeof(struct hsearch_data)); rc = hcreate_r(AIDE_HASH_TABLE_SIZE, ctx->aide_md_table); // hash table for metadata if (rc == 0) { LOG(LOG_ERR, "hcreate faild, errno=%x\n", errno); goto error; } ctx->aide_md_table_size = 0; ctx->aide_in_table = xmalloc(sizeof(struct hsearch_data)); if (ctx->aide_in_table == NULL) { LOG(LOG_ERR, "no memory"); goto error; } memset(ctx->aide_in_table, 0, sizeof(struct hsearch_data)); // 4096 full rc = hcreate_r(AIDE_HASH_TABLE_SIZE, ctx->aide_in_table); // hash table for ignore name if (rc == 0) { LOG(LOG_ERR, "hcreate faild\n"); goto error; } ctx->aide_in_table_size = 0; DEBUG("newAideContext %p\n", ctx); return ctx; error: if (ctx != NULL) xfree(ctx); return NULL; }
Graph* graph_new (size_t max_nodes) { Graph *graph = malloc (sizeof(Graph)); graph->num_nodes = 0; graph->nodes = NULL; graph->nodes_arr_size = 0; hcreate_r (max_nodes, &graph->htab); return graph; }
struct hsearch_data * hash_create(ssize_t len) { struct hsearch_data * table; table = malloc(sizeof(table)); check_mem(table); check(hcreate_r(len, table), "hash_create: fail to create an htable -> return NULL"); return table; error: if (table) free(table); return NULL; }
int main(int argc, char *argv[]) { int err, i; struct ibc_opts ibc_opts; struct inotify_event *evt; char buf[BUF_LEN], output[FILEPATH_BUF_SZ]; const char *fp; memset(&ibc, 0, sizeof(struct ibc)); err = hcreate_r(HTAB_SIZE, &ibc.htab); if (err == 0) { perror("hcreate_r"); goto hcreate_error; } err = parse_opts(&ibc_opts, argc, argv); if (err == -1) { goto parsing_error; } for (i = 0; i < argc; i++) memset(argv[i], 0, strlen(argv[i])); err = ibc.fd = inotify_init(); if (err == -1) { perror("inotify_init"); goto inotify_init_error; } err = add_watches(&ibc_opts); if (err == -1) { goto add_watches_error; } while(read(ibc.fd, buf, BUF_LEN) > 0) { evt = (struct inotify_event *) buf; fp = get_inotify_event_path(evt->wd, evt->name); if (fp) { snprintf(output, FILEPATH_BUF_SZ, "%s/%s", ibc_opts.output_dir, evt->name); cp(output, fp); } } add_watches_error: close(ibc.fd); inotify_init_error: free_opts(&ibc_opts); parsing_error: hcreate_error: hdestroy_r(&ibc.htab); return err; }
int cln_init() { int ret; assert(ctx_count==0); ret = hcreate_r(RFS_MAX_CLIENT_CONNECTION, &ctx_hosts); if(ret!=0) { fprintf(stderr, "<%s> failed hcreate_r() to create connection table! " "ERROR: %s\n", __func__, strerror(errno)); abort(); } return 0; }
static void stat_add(char *name, TSMgmtInt amount, TSStatPersistence persist_type, TSMutex create_mutex) { int stat_id = -1; ENTRY search, *result = NULL; static __thread struct hsearch_data stat_cache; static __thread bool hash_init = false; if (unlikely(!hash_init)) { hcreate_r(TS_MAX_API_STATS << 1, &stat_cache); hash_init = true; TSDebug(DEBUG_TAG, "stat cache hash init"); } search.key = name; search.data = 0; hsearch_r(search, FIND, &result, &stat_cache); if (unlikely(result == NULL)) { // This is an unlikely path because we most likely have the stat cached // so this mutex won't be much overhead and it fixes a race condition // in the RecCore. Hopefully this can be removed in the future. TSMutexLock(create_mutex); if (TS_ERROR == TSStatFindName((const char *)name, &stat_id)) { stat_id = TSStatCreate((const char *)name, TS_RECORDDATATYPE_INT, persist_type, TS_STAT_SYNC_SUM); if (stat_id == TS_ERROR) { TSDebug(DEBUG_TAG, "Error creating stat_name: %s", name); } else { TSDebug(DEBUG_TAG, "Created stat_name: %s stat_id: %d", name, stat_id); } } TSMutexUnlock(create_mutex); if (stat_id >= 0) { search.key = TSstrdup(name); search.data = (void *)((intptr_t)stat_id); hsearch_r(search, ENTER, &result, &stat_cache); TSDebug(DEBUG_TAG, "Cached stat_name: %s stat_id: %d", name, stat_id); } } else { stat_id = (int)((intptr_t)result->data); } if (likely(stat_id >= 0)) { TSStatIntIncrement(stat_id, amount); } else { TSDebug(DEBUG_TAG, "stat error! stat_name: %s stat_id: %d", name, stat_id); } }
ENTRY * hsearch(ENTRY item, ACTION action) { ENTRY *retval; /* Create global hash table if needed. */ if (!global_hashtable_initialized) { if (hcreate_r(0, &global_hashtable) == 0) return (NULL); global_hashtable_initialized = true; } if (hsearch_r(item, action, &retval, &global_hashtable) == 0) return (NULL); return (retval); }
hashtable *hashtable_create(int entries) { hashtable *tmp = NULL; tmp = calloc(1, sizeof(hashtable)); if (!tmp) { lerror("calloc"); return NULL; } if (hcreate_r(entries, tmp) < 1) { return NULL; } return tmp; }
void webcgi_set(char *name, char *value) { ENTRY e, *ep; if (!htab.table) { hcreate_r(16, &htab); } e.key = name; hsearch_r(e, FIND, &ep, &htab); if (ep) { ep->data = value; } else { e.data = value; hsearch_r(e, ENTER, &ep, &htab); } }
int ewf_hashtable_create( ewf_hashtable_t ** htab, size_t size ) { /* From the man page: * "The struct it points to must be zeroed before the first call to hcreate_r()." */ *htab = calloc( 1, sizeof( struct hsearch_data ) ); if ( *htab == NULL ) { nbu_log_error( "hash table allocation failed" ); return EWF_ERROR; } if ( hcreate_r( size, *htab ) == 0 ) { nbu_log_error( "hash table creation failed" ); return EWF_ERROR; } return EWF_SUCCESS; }
void* hook_add(const char *func_name, void *func_ptr) { #define MAX_HTAB_ENTRIES 400 if (htab == NULL) { htab = calloc(1, sizeof(struct hsearch_data)); hcreate_r(MAX_HTAB_ENTRIES, htab); } ENTRY e, *ep=NULL; e.key = strdup(func_name); e.data = func_ptr; int rv = hsearch_r(e, ENTER, &ep, htab); if (ep == NULL) { fprintf(stderr, "entry failed:%s %s\n",func_name, strerror(errno)); return NULL; } LOGD("HOOKED:%s",func_name); return ep->data; }
void init_cgi(char *query) { int len, nel; char *q, *name, *value; htab_count = 0; //cprintf("\nIn init_cgi(), query = %s\n", query); /* Clear variables */ if (!query) { hdestroy_r(&htab); return; } /* Parse into individual assignments */ q = query; len = strlen(query); nel = 1; while (strsep(&q, "&;")) nel++; hcreate_r(nel, &htab); //cprintf("\nIn init_cgi(), nel = %d\n", nel); for (q = query; q < (query + len);) { /* Unescape each assignment */ unescape(name = value = q); /* Skip to next assignment */ for (q += strlen(q); q < (query + len) && !*q; q++) ; /* Assign variable */ name = strsep(&value, "="); if (value) set_cgi(name, value); } //cprintf("\nIn init_cgi(), AFTER PROCESS query = %s\n", query); }
/***************************************************************************** 函 数 名 : subdesc_create_hashtable 功能描述 : 根据doc中包括的有name的元素的数量创建hash表 输入参数 : doc sub文档 root 结构指针 输出参数 : 无 返 回 值 : ERR_SUCCESS成功 其他失败 调用函数 : 被调函数 : ============================================================================ 修改历史 : 1.日 期 : 2008年8月26日 修改内容 : 新生成函数 *****************************************************************************/ static int subdesc_create_hashtable(xmlDocPtr doc, SUB_ROOT * root) { xmlXPathContextPtr xpathCtx; int ret = ERROR_SUCCESS; /* Create xpath evaluation context */ xpathCtx = xmlXPathNewContext(doc); if(xpathCtx == NULL) { return ERROR_SYSTEM; } xmlXPathObjectPtr xpathObj; /* Evaluate xpath expression */ xpathObj = xmlXPathEvalExpression((unsigned char*)"//*[@name]", xpathCtx); if(xpathObj != NULL) { if(NULL == xpathObj->nodesetval || xpathObj->nodesetval->nodeNr ==0 || 0 == hcreate_r(xpathObj->nodesetval->nodeNr, (struct hsearch_data *)(root->_hashtable))) { ret = ERROR_SYSTEM; } xmlXPathFreeObject(xpathObj); } else { ret = ERROR_SYSTEM; } xmlXPathFreeContext(xpathCtx); return ret; }
int himport_r(struct hsearch_data *htab, const char *env, size_t size, const char sep, int flag, int crlf_is_lf, int nvars, char * const vars[]) { char *data, *sp, *dp, *name, *value; char *localvars[nvars]; int i; /* Test for correct arguments. */ if (htab == NULL) { __set_errno(EINVAL); return 0; } /* we allocate new space to make sure we can write to the array */ if ((data = malloc(size + 1)) == NULL) { debug("himport_r: can't malloc %lu bytes\n", (ulong)size + 1); __set_errno(ENOMEM); return 0; } memcpy(data, env, size); data[size] = '\0'; dp = data; /* make a local copy of the list of variables */ if (nvars) memcpy(localvars, vars, sizeof(vars[0]) * nvars); if ((flag & H_NOCLEAR) == 0) { /* Destroy old hash table if one exists */ debug("Destroy Hash Table: %p table = %p\n", htab, htab->table); if (htab->table) hdestroy_r(htab); } /* * Create new hash table (if needed). The computation of the hash * table size is based on heuristics: in a sample of some 70+ * existing systems we found an average size of 39+ bytes per entry * in the environment (for the whole key=value pair). Assuming a * size of 8 per entry (= safety factor of ~5) should provide enough * safety margin for any existing environment definitions and still * allow for more than enough dynamic additions. Note that the * "size" argument is supposed to give the maximum environment size * (CONFIG_ENV_SIZE). This heuristics will result in * unreasonably large numbers (and thus memory footprint) for * big flash environments (>8,000 entries for 64 KB * environment size), so we clip it to a reasonable value. * On the other hand we need to add some more entries for free * space when importing very small buffers. Both boundaries can * be overwritten in the board config file if needed. */ if (!htab->table) { int nent = CONFIG_ENV_MIN_ENTRIES + size / 8; if (nent > CONFIG_ENV_MAX_ENTRIES) nent = CONFIG_ENV_MAX_ENTRIES; debug("Create Hash Table: N=%d\n", nent); if (hcreate_r(nent, htab) == 0) { free(data); return 0; } } if (!size) { free(data); return 1; /* everything OK */ } if(crlf_is_lf) { /* Remove Carriage Returns in front of Line Feeds */ unsigned ignored_crs = 0; for(;dp < data + size && *dp; ++dp) { if(*dp == '\r' && dp < data + size - 1 && *(dp+1) == '\n') ++ignored_crs; else *(dp-ignored_crs) = *dp; } size -= ignored_crs; dp = data; } /* Parse environment; allow for '\0' and 'sep' as separators */ do { ENTRY e, *rv; /* skip leading white space */ while (isblank(*dp)) ++dp; /* skip comment lines */ if (*dp == '#') { while (*dp && (*dp != sep)) ++dp; ++dp; continue; } /* parse name */ for (name = dp; *dp != '=' && *dp && *dp != sep; ++dp) ; /* deal with "name" and "name=" entries (delete var) */ if (*dp == '\0' || *(dp + 1) == '\0' || *dp == sep || *(dp + 1) == sep) { if (*dp == '=') *dp++ = '\0'; *dp++ = '\0'; /* terminate name */ debug("DELETE CANDIDATE: \"%s\"\n", name); if (!drop_var_from_set(name, nvars, localvars)) continue; if (hdelete_r(name, htab, flag) == 0) debug("DELETE ERROR ##############################\n"); continue; } *dp++ = '\0'; /* terminate name */ /* parse value; deal with escapes */ for (value = sp = dp; *dp && (*dp != sep); ++dp) { if ((*dp == '\\') && *(dp + 1)) ++dp; *sp++ = *dp; } *sp++ = '\0'; /* terminate value */ ++dp; if (*name == 0) { debug("INSERT: unable to use an empty key\n"); __set_errno(EINVAL); free(data); return 0; } /* Skip variables which are not supposed to be processed */ if (!drop_var_from_set(name, nvars, localvars)) continue; /* enter into hash table */ e.key = name; e.data = value; hsearch_r(e, ENTER, &rv, htab, flag); if (rv == NULL) printf("himport_r: can't insert \"%s=%s\" into hash table\n", name, value); debug("INSERT: table %p, filled %d/%d rv %p ==> name=\"%s\" value=\"%s\"\n", htab, htab->filled, htab->size, rv, name, value); } while ((dp < data + size) && *dp); /* size check needed for text */ /* without '\0' termination */ debug("INSERT: free(data = %p)\n", data); free(data); /* process variables which were not considered */ for (i = 0; i < nvars; i++) { if (localvars[i] == NULL) continue; /* * All variables which were not deleted from the variable list * were not present in the imported env * This could mean two things: * a) if the variable was present in current env, we delete it * b) if the variable was not present in current env, we notify * it might be a typo */ if (hdelete_r(localvars[i], htab, flag) == 0) printf("WARNING: '%s' neither in running nor in imported env!\n", localvars[i]); else printf("WARNING: '%s' not in imported env, deleting it!\n", localvars[i]); } debug("INSERT: done\n"); return 1; /* everything OK */ }
int lt_args_add_enum(struct lt_config_shared *cfg, char *name, struct lt_list_head *h) { ENTRY e, *ep; struct lt_enum_elem *elem, *last = NULL; struct lt_enum *en; int i = 0; if (NULL == (en = malloc(sizeof(*en)))) return -1; memset(en, 0x0, sizeof(*en)); en->name = name; /* Initialize the hash table holding enum names */ if (!enum_init) { if (!hcreate_r(LT_ARGS_DEF_ENUM_NUM, &args_enum_tab)) { perror("failed to create has table:"); free(en); return -1; } enum_init = 1; } e.key = en->name; e.data = en; if (!hsearch_r(e, ENTER, &ep, &args_enum_tab)) { perror("hsearch_r failed"); free(en); return 1; } /* We've got enum inside the hash, let's prepare the enum itself. The 'elems' field is going to be the qsorted list of 'struct enum_elem's */ lt_list_for_each_entry(elem, h, list) en->cnt++; if (NULL == (en->elems = malloc(sizeof(struct lt_enum_elem) * en->cnt))) return -1; PRINT_VERBOSE(cfg, 3, "enum %s (%d elems) not fixed\n", en->name, en->cnt); /* * The enum element can be: * * 1) defined * 2) undefined * 3) defined via string reference * * ad 1) no work * ad 2) value of previous element is used * ad 3) we look for the string reference in defined elements' names * * This being said, following actions will happen now: * * - copy all the values to the prepared array * - fix the values based on the above * - sort the array */ lt_list_for_each_entry(elem, h, list) { PRINT_VERBOSE(cfg, 3, "\t %s = %d/%s\n", elem->name, elem->val, elem->strval); en->elems[i++] = *elem; }
// read symbol table from elf_file struct symtab *build_symtab(const char *elf_file) { int fd; Elf *elf; Elf32_Ehdr *ehdr; char *names; struct symtab *symtab = NULL; if ((fd = open(elf_file, O_RDONLY)) < 0) { perror("open"); return NULL; } elf = elf_begin(fd, ELF_C_READ, NULL); if (elf == NULL || elf_kind(elf) != ELF_K_ELF) { // not an elf close(fd); return NULL; } // read ELF header if ((ehdr = elf32_getehdr(elf)) != NULL) { Elf_Scn *scn; struct elf_section *scn_cache, *scn_cache_ptr; int cnt; // read section headers into scn_cache scn_cache = (struct elf_section *) malloc(ehdr->e_shnum * sizeof(struct elf_section)); scn_cache_ptr = scn_cache; scn_cache_ptr++; for (scn = NULL; scn = elf_nextscn(elf, scn); scn_cache_ptr++) { scn_cache_ptr->c_shdr = elf32_getshdr(scn); scn_cache_ptr->c_data = elf_getdata(scn, NULL); } for (cnt = 1; cnt < ehdr->e_shnum; cnt++) { Elf32_Shdr *shdr = scn_cache[cnt].c_shdr; if (shdr->sh_type == SHT_SYMTAB) { Elf32_Sym *syms; int j, n, rslt; size_t size; // FIXME: there could be multiple data buffers associated with the // same ELF section. Here we can handle only one buffer. See man page // for elf_getdata on Solaris. guarantee(symtab == NULL, "multiple symtab"); symtab = (struct symtab *)calloc(1, sizeof(struct symtab)); // the symbol table syms = (Elf32_Sym *)scn_cache[cnt].c_data->d_buf; // number of symbols n = shdr->sh_size / shdr->sh_entsize; // create hash table, we use hcreate_r, hsearch_r and hdestroy_r to // manipulate the hash table. symtab->hash_table = calloc(1, sizeof(struct hsearch_data)); rslt = hcreate_r(n, symtab->hash_table); guarantee(rslt, "unexpected failure: hcreate_r"); // shdr->sh_link points to the section that contains the actual strings // for symbol names. the st_name field in Elf32_Sym is just the // string table index. we make a copy of the string table so the // strings will not be destroyed by elf_end. size = scn_cache[shdr->sh_link].c_data->d_size; symtab->strs = (char *)malloc(size); memcpy(symtab->strs, scn_cache[shdr->sh_link].c_data->d_buf, size); // allocate memory for storing symbol offset and size; symtab->symbols = (struct elf_symbol *)malloc(n * sizeof(struct elf_symbol)); // copy symbols info our symtab and enter them info the hash table for (j = 0; j < n; j++, syms++) { ENTRY item, *ret; char *sym_name = symtab->strs + syms->st_name; symtab->symbols[j].name = sym_name; symtab->symbols[j].offset = syms->st_value; symtab->symbols[j].size = syms->st_size; // skip empty strings if (*sym_name == '\0') continue; item.key = sym_name; item.data = (void *)&(symtab->symbols[j]); hsearch_r(item, ENTER, &ret, symtab->hash_table); } } } free(scn_cache); } elf_end(elf); close(fd); return symtab; }
int hcreate (size_t nel) { return hcreate_r(nel, the_global_hsearch_data); }
int main (int argc, char *argv[]) { char line[1000000]; FILE *in, *out; int line_length; int total_num_root_entities; int total_num_sub_entities; long num_entities; unsigned long *temp; int index; int ret; int fd_entities_path, fd_hash_path; int result; int roots, subs; int num_lines = 0; int num_dups = 0; int json = 1; struct entity *entities = 0; struct hsearch_data htab; if (hcreate_r (LARGE_HASH_TABLE, &htab) == 0) { perror ("Could not create hash table"); exit (2); } // Begin MMAP stuff: // // Open an mmap file for writing. // - Creating the file if it doesn't exist. // - Truncating it to 0 size if it already exists. (not really needed) // Note: "O_WRONLY" mode is not sufficient when mmaping. // fd_entities_path = open (ENTITIES_FILEPATH, O_RDWR | O_CREAT | O_TRUNC, (mode_t) 0600); if (fd_entities_path == -1) { perror ("Error opening mmap file for writing"); exit (EXIT_FAILURE); } fd_hash_path = open (HASH_FILEPATH, O_RDWR | O_CREAT | O_TRUNC, (mode_t) 0600); if (fd_hash_path == -1) { perror ("Error opening mmap file for writing"); exit (EXIT_FAILURE); } // Stretch the file size to the size of the (mmapped) array of ints // result = lseek (fd_entities_path, ENTITIES_FILESIZE - 1, SEEK_SET); if (result == -1) { close (fd_entities_path); perror ("Error calling lseek() to 'stretch' the file"); exit (EXIT_FAILURE); } result = lseek (fd_hash_path, HASH_FILESIZE - 1, SEEK_SET); if (result == -1) { close (fd_hash_path); perror ("Error calling lseek() to 'stretch' the file"); exit (EXIT_FAILURE); } // Something needs to be written at the end of the file to // have the file actually have the new size. // Just writing an empty string at the current file position will do. // // Note: // - The current position in the file is at the end of the stretched // file due to the call to lseek(). // - An empty string is actually a single '\0' character, so a zero-byte // will be written at the last byte of the file. // result = write (fd_entities_path, "", 1); if (result != 1) { close (fd_entities_path); perror ("Error writing last byte of the file"); exit (EXIT_FAILURE); } result = write (fd_hash_path, "", 1); if (result != 1) { close (fd_hash_path); perror ("Error writing last byte of the file"); exit (EXIT_FAILURE); } // Now the file is ready to be mmapped. // entities = mmap (0, ENTITIES_FILESIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd_entities_path, 0); if (entities == MAP_FAILED) { close (fd_entities_path); perror ("Error mmapping the entities file"); exit (EXIT_FAILURE); } htab = mmap (0, HASH_FILESIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd_hash_path, 0); if (htab == MAP_FAILED) { close (fd_hash_path); perror ("Error mmapping the entities file"); exit (EXIT_FAILURE); } // End MMAP stuff in = fopen ("in.txt", "r"); if (!json) printf ("Creating the in memory table... \n"); num_entities = -1; while (1 == fscanf (in, "%[^\n]%n\n", line, &line_length)) { //read one line char *word; unsigned long focal_root_entity = 0; //Used when the Root Entity already exists char *ptr; { char *root = strtok_r (line, ",", &ptr); int i = 0; // First check whether the entry already exists: i = find_entity (root, &htab, num_entities); if (i > num_entities) { // Initialise this Root Entity: add_ent (&entities, &num_entities, root, &htab); focal_root_entity = num_entities; total_num_root_entities++; } else { // If the root entity has been found: if (!json && DEBUG) printf ("***At line %d Root Entity %s was already found \n", num_lines, root); num_dups++; focal_root_entity = i; } // if (found == 0) } for (; word = strtok_r (NULL, ",", &ptr);) { unsigned long sub_entity = 0; //First check whether the entity already exists : sub_entity = find_entity (word, &htab, num_entities); if (sub_entity > num_entities) { //Initialise this Sub Entity: add_ent (&entities, &num_entities, word, &htab); total_num_sub_entities++; sub_entity = num_entities; } //End found==0 else { if (!json && DEBUG) printf ("***At line %d Sub Entity %s was already found \n", num_lines, word); num_dups++; } // Now link the Sub Entity to the focal_root_entity using the index of the entity arrays : add_link (&entities[focal_root_entity], sub_entity); add_link (&entities[sub_entity], focal_root_entity); } // End for pos num_lines++; } // End while fscanf fclose (in); // Now print out the entire set of Entities: if (json) printf ("[\n"); { int i; roots = subs = 0; for (i = 0; i <= num_entities; i++) { int j; if (entities[i].num_links >= 0) { if (!json) printf ("Root Entity '%s' discovered with %d sub links\n", entities[i].entity_name, entities[i].num_links); roots++; for (j = 0; j <= entities[i].num_links; j++) { if (((i > 0) || (j > 0)) && (json)) printf (","); if (!json) printf ("Sub Entity is %s\n", entities[entities[i].links[j]].entity_name); if (json) printf ("{\n \"source\" : \"%s\",\n \"target\" : \"%s\",\n \"type\" : \"suit\"\n}\n", entities[i].entity_name, entities[entities[i].links[j]].entity_name); subs++; } printf ("\n"); } } } if (json) printf ("]\n"); if (!json) { printf ("The number of root entities found were %d and the number of subs found were %d\n", roots, subs); printf ("The total number of Entities are %d read in %d lines with %d duplications.\n", num_entities, num_lines, num_dups); } // Don't forget to free the mmapped memory // if (munmap (entities, ENTITIES_FILESIZE) == -1) { perror ("Error un-mmapping the file"); /* Decide here whether to close(fd) and exit() or not. Depends... */ } if (munmap (htab, HASH_FILESIZE) == -1) { perror ("Error un-mmapping the file"); /* Decide here whether to close(fd) and exit() or not. Depends... */ } // Un-mmaping doesn't close the file, so we still need to do that. // close (fd_entities_path); close (fd_hash_path); return 0; }
hash_alignment_block get_next_alignment_hash(maf_linear_parser parser){ hash_alignment_block new_align = NULL; int in_block=0; int hc=0; long bytesread; int sizeLeftover=0; int bLoopCompleted = 0; char *temp; char *datum; char *npos; ENTRY *ret_val; do{ if(parser->fill_buf){ bytesread = fread(parser->buf+sizeLeftover, 1, sizeof(parser->buf)-1-sizeLeftover, parser->maf_file); if (bytesread<1){ bLoopCompleted = 1; bytesread = 0; continue; } if(ferror(parser->maf_file) != 0){ fprintf(stderr, "File stream error: %s\nError: %s", parser->filename,strerror(errno)); return NULL; } parser->buf[sizeLeftover+bytesread]=0; parser->curr_pos=0; parser->pos=parser->buf; --parser->fill_buf; } npos = strchr(parser->pos,'\n'); if(npos==NULL){ sizeLeftover = strlen(parser->pos); memmove(parser->buf,parser->buf+(sizeof(parser->buf))-sizeLeftover-1,sizeLeftover); ++parser->fill_buf; continue; } *npos=0; datum = parser->pos; parser->pos = npos+1; //If we've yet to enter an alignment block, and the first character //of the line isn't 'a', then skip over it. if(!in_block && datum[0]!='a') continue; //***HANDLE SCORE/PASS/DATA here**i else if(datum[0]=='a'){ //If we find an 'a' after entering a block, then this is a new block //so rewind the file pointer and break out of read loop. if(in_block){ *npos='\n'; parser->pos = datum; break; } //Else we're starting a new alignment block, initialize the data //structure and set in_block to true. new_align=malloc(sizeof(*new_align)); assert(new_align != NULL); new_align->species = malloc(256*sizeof(char *)); assert(new_align->species != NULL); new_align->sequences = calloc(1,sizeof(struct hsearch_data)); assert(new_align->sequences != NULL); hc = hcreate_r(256,new_align->sequences); if(hc == 0){ fprintf(stderr,"Failed to create hash table: %s\n", strerror(errno)); exit(1); } new_align->size=0; new_align->max=128; new_align->data = NULL; in_block=1; continue; } //If in a block and find 's', then it's a sequence to add to the //current alignment block, parse it, reallocate alignment block's //sequence array if necessary, and store the new sequence. else if(datum[0]=='s'){ seq new_seq = get_sequence(datum); new_align->seq_length = new_seq->size; if(new_seq == NULL){ fprintf(stderr, "Invalid sequence entry %s\n",datum); return NULL; } if(new_align->size >= new_align->max){ fprintf(stderr, "WARNING: Alignment block hash table over half full" "consider increasing max alignment hash size.\n" "Current size: %d\nMax size: %d\n",new_align->size, new_align->max); }temp = strdup(new_seq->src); assert(temp!=NULL); char *species_name=strtok(temp,"."); ENTRY new_ent={species_name,new_seq}; hc = hsearch_r(new_ent,ENTER,&ret_val,new_align->sequences); if(hc == 0){ fprintf(stderr,"Failed to insert into hash table: %s\n", strerror(errno)); exit(1); }if(ret_val->data != new_ent.data){ fprintf(stderr, "Entry for species %s already present\n",species_name); continue; } // printf("Entry inserted: %s\n", genome_names[i]); new_align->species[new_align->size++] = species_name; continue; } //If we hit a character other than 'a' or 's', then we've exited //the current alignment block, break out of the read loop and return //the current alignment block. else break; }while(!bLoopCompleted); return new_align; }
/* * MAIN */ int main(int argc, char *argv[]) { int sock_fd; int err; int optval; int conn; struct sockaddr_in *addr, *client_addr; socklen_t client_addr_size; struct hsearch_data *htab; pthread_t thread; thdata *thread_data = malloc(sizeof(thdata)); // Create hashmap storage htab = calloc(1, sizeof(struct hsearch_data)); if (hcreate_r(10000, htab) == -1) { printf("Error on hcreate\n"); } // Create socket sock_fd = socket(AF_INET, SOCK_STREAM, 0); if (sock_fd == -1) { printf("Error creating socket: %d\n", errno); } // Allow address reuse optval = 1; err = setsockopt(sock_fd, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(int)); if (err == -1) { printf("Error setting SO_REUSEADDR on socket: %d\n", errno); } // bind addr = calloc(1, sizeof(struct sockaddr_in)); addr->sin_family = AF_INET; addr->sin_port = htons(11211); // htons: Convert to network byte order addr->sin_addr.s_addr = INADDR_ANY; err = bind(sock_fd, (struct sockaddr *)addr, sizeof(struct sockaddr)); free(addr); if (err == -1) { printf("bind error: %d\n", errno); } err = listen(sock_fd, 1); if (err == -1) { printf("listen error: %d\n", errno); } if (is_single(argc, argv)) { client_addr = malloc(sizeof(struct sockaddr_in)); client_addr_size = sizeof(struct sockaddr); conn = accept(sock_fd, (struct sockaddr *)client_addr, &client_addr_size); free(client_addr); thread_data->conn = conn; thread_data->htab = htab; handle_conn(thread_data); close(conn); } else { while (1) { client_addr = malloc(sizeof(struct sockaddr_in)); client_addr_size = sizeof(struct sockaddr); conn = accept( sock_fd, (struct sockaddr *)client_addr, &client_addr_size); free(client_addr); thread_data->conn = conn; thread_data->htab = htab; pthread_create( &thread, NULL, handle_conn, thread_data); } } close(sock_fd); free(thread_data); hdestroy_r(htab); free(htab); return 0; }
PCFState * load_pcf_file(const char * fname, void * key0, void * key1, void *(*copy_key)(void*)) { FILE * input; PCFState * ret; char line[LINE_MAX]; uint32_t icount = 0; uint32_t i = 0; ret = (PCFState*)malloc(sizeof(struct PCFState)); check_alloc(ret); ret->alice_outputs = 0; ret->bob_outputs = 0; ret->inp_i = 0; ret->constant_keys[0] = copy_key(key0); ret->constant_keys[1] = copy_key(key1); ret->copy_key = copy_key; ret->call_stack = 0; ret->done = 0; ret->labels = (struct hsearch_data *)malloc(sizeof(struct hsearch_data)); check_alloc(ret->labels); ret->wires = (struct wire *)malloc(1000000 * sizeof(struct wire)); check_alloc(ret->wires); for(i = 0; i < 200000; i++) { ret->wires[i].flags = KNOWN_WIRE; ret->wires[i].value = 0; ret->wires[i].keydata = copy_key(key0); } memset(ret->labels, 0, sizeof(struct hsearch_data)); ret->done = 0; ret->base = 1; ret->PC = 0; fprintf(stderr, "%s\n", fname); input = fopen(fname, "r"); if(input == 0) { fprintf(stderr, "%s: %s\n", fname, strerror(errno)); assert(0); } while(!feof(input)) { fgets(line, LINE_MAX-1, input); icount++; } if(hcreate_r(icount, ret->labels) == 0) { fprintf(stderr, "Unable to allocate hash table: %s\n", strerror(errno)); abort(); // exit(-1); } ret->icount = icount; ret->ops = (PCFOP*)malloc(icount * sizeof(PCFOP)); check_alloc(ret->ops); assert(fseek(input, 0, SEEK_SET) == 0); icount = 0; while(!feof(input)) { PCFOP * op; fgets(line, LINE_MAX-1, input); op = read_instr(ret, line, icount); ret->ops[icount] = *op; free(op); icount++; } fclose(input); ret->wires[0].value = 1; ret->wires[0].keydata = ret->copy_key(ret->constant_keys[1]); ret->wires[0].flags = KNOWN_WIRE; return ret; }
static void sc_map_init() { // initialize the map linked list sc_map_entries = malloc(sizeof(*sc_map_entries)); if (sc_map_entries == NULL) die("Out of memory creating sc_map_entries"); sc_map_entries->list = NULL; sc_map_entries->count = 0; // build up the map linked list // man 2 socket - domain sc_map_add(AF_UNIX); sc_map_add(AF_LOCAL); sc_map_add(AF_INET); sc_map_add(AF_INET6); sc_map_add(AF_IPX); sc_map_add(AF_NETLINK); sc_map_add(AF_X25); sc_map_add(AF_AX25); sc_map_add(AF_ATMPVC); sc_map_add(AF_APPLETALK); sc_map_add(AF_PACKET); sc_map_add(AF_ALG); // linux/can.h sc_map_add(AF_CAN); // man 2 socket - type sc_map_add(SOCK_STREAM); sc_map_add(SOCK_DGRAM); sc_map_add(SOCK_SEQPACKET); sc_map_add(SOCK_RAW); sc_map_add(SOCK_RDM); sc_map_add(SOCK_PACKET); // man 2 prctl #ifndef PR_CAP_AMBIENT #define PR_CAP_AMBIENT 47 #define PR_CAP_AMBIENT_IS_SET 1 #define PR_CAP_AMBIENT_RAISE 2 #define PR_CAP_AMBIENT_LOWER 3 #define PR_CAP_AMBIENT_CLEAR_ALL 4 #endif // PR_CAP_AMBIENT sc_map_add(PR_CAP_AMBIENT); sc_map_add(PR_CAP_AMBIENT_RAISE); sc_map_add(PR_CAP_AMBIENT_LOWER); sc_map_add(PR_CAP_AMBIENT_IS_SET); sc_map_add(PR_CAP_AMBIENT_CLEAR_ALL); sc_map_add(PR_CAPBSET_READ); sc_map_add(PR_CAPBSET_DROP); sc_map_add(PR_SET_CHILD_SUBREAPER); sc_map_add(PR_GET_CHILD_SUBREAPER); sc_map_add(PR_SET_DUMPABLE); sc_map_add(PR_GET_DUMPABLE); sc_map_add(PR_SET_ENDIAN); sc_map_add(PR_GET_ENDIAN); sc_map_add(PR_SET_FPEMU); sc_map_add(PR_GET_FPEMU); sc_map_add(PR_SET_FPEXC); sc_map_add(PR_GET_FPEXC); sc_map_add(PR_SET_KEEPCAPS); sc_map_add(PR_GET_KEEPCAPS); sc_map_add(PR_MCE_KILL); sc_map_add(PR_MCE_KILL_GET); sc_map_add(PR_SET_MM); sc_map_add(PR_SET_MM_START_CODE); sc_map_add(PR_SET_MM_END_CODE); sc_map_add(PR_SET_MM_START_DATA); sc_map_add(PR_SET_MM_END_DATA); sc_map_add(PR_SET_MM_START_STACK); sc_map_add(PR_SET_MM_START_BRK); sc_map_add(PR_SET_MM_BRK); sc_map_add(PR_SET_MM_ARG_START); sc_map_add(PR_SET_MM_ARG_END); sc_map_add(PR_SET_MM_ENV_START); sc_map_add(PR_SET_MM_ENV_END); sc_map_add(PR_SET_MM_AUXV); sc_map_add(PR_SET_MM_EXE_FILE); #ifndef PR_MPX_ENABLE_MANAGEMENT #define PR_MPX_ENABLE_MANAGEMENT 43 #endif // PR_MPX_ENABLE_MANAGEMENT sc_map_add(PR_MPX_ENABLE_MANAGEMENT); #ifndef PR_MPX_DISABLE_MANAGEMENT #define PR_MPX_DISABLE_MANAGEMENT 44 #endif // PR_MPX_DISABLE_MANAGEMENT sc_map_add(PR_MPX_DISABLE_MANAGEMENT); sc_map_add(PR_SET_NAME); sc_map_add(PR_GET_NAME); sc_map_add(PR_SET_NO_NEW_PRIVS); sc_map_add(PR_GET_NO_NEW_PRIVS); sc_map_add(PR_SET_PDEATHSIG); sc_map_add(PR_GET_PDEATHSIG); sc_map_add(PR_SET_PTRACER); sc_map_add(PR_SET_SECCOMP); sc_map_add(PR_GET_SECCOMP); sc_map_add(PR_SET_SECUREBITS); sc_map_add(PR_GET_SECUREBITS); #ifndef PR_SET_THP_DISABLE #define PR_SET_THP_DISABLE 41 #endif // PR_SET_THP_DISABLE sc_map_add(PR_SET_THP_DISABLE); sc_map_add(PR_TASK_PERF_EVENTS_DISABLE); sc_map_add(PR_TASK_PERF_EVENTS_ENABLE); #ifndef PR_GET_THP_DISABLE #define PR_GET_THP_DISABLE 42 #endif // PR_GET_THP_DISABLE sc_map_add(PR_GET_THP_DISABLE); sc_map_add(PR_GET_TID_ADDRESS); sc_map_add(PR_SET_TIMERSLACK); sc_map_add(PR_GET_TIMERSLACK); sc_map_add(PR_SET_TIMING); sc_map_add(PR_GET_TIMING); sc_map_add(PR_SET_TSC); sc_map_add(PR_GET_TSC); sc_map_add(PR_SET_UNALIGN); sc_map_add(PR_GET_UNALIGN); // man 2 getpriority sc_map_add(PRIO_PROCESS); sc_map_add(PRIO_PGRP); sc_map_add(PRIO_USER); // man 2 setns sc_map_add(CLONE_NEWIPC); sc_map_add(CLONE_NEWNET); sc_map_add(CLONE_NEWNS); sc_map_add(CLONE_NEWPID); sc_map_add(CLONE_NEWUSER); sc_map_add(CLONE_NEWUTS); // initialize the htab for our map memset((void *)&sc_map_htab, 0, sizeof(sc_map_htab)); if (hcreate_r(sc_map_entries->count, &sc_map_htab) == 0) die("could not create map"); // add elements from linked list to map struct sc_map_entry *p = sc_map_entries->list; while (p != NULL) { errno = 0; if (hsearch_r(*p->e, ENTER, &p->ep, &sc_map_htab) == 0) die("hsearch_r failed"); if (&p->ep == NULL) die("could not initialize map"); p = p->next; } }
void init_store(void) { store = calloc(1, sizeof(struct hsearch_data)); hcreate_r(100, store); }
int main (int argc, char *argv[]) { char line[1000000]; FILE *in, *out; int line_length; int total_num_root_entities; int total_num_sub_entities; int num_entities; unsigned long *temp; int index; int ret; int roots, subs; int json = 1; if (hcreate_r (200000, &htab) == 0) { perror ("Failed to create hash"); exit (1); } struct entity *entities = 0; in = fopen ("in.txt", "r"); out = fopen ("out.txt", "w"); num_entities = -1; while (1 == fscanf (in, "%[^\n]%n\n", line, &line_length)) { //read one line char *word; struct entity *focal_root = 0; //Used when the Root Entity already exists char *ptr; { char *root = strtok_r (line, ",", &ptr); int i = 0; // First check whether the entry already exists: focal_root = find_entity (entities, num_entities, root); if (focal_root == NULL) { // Initialise this Root Entity: focal_root = add_ent (root); fprintf (out, "%s\n", root); total_num_root_entities++; } } for (; word = strtok_r (NULL, ",", &ptr);) { struct entity *entity = 0; //First check whether the entity already exists : entity = find_entity (entities, num_entities, word); if (entity == NULL) { //Initialise this Sub Entity: entity = add_ent (word); } // Now link the Sub Entity to the focal_root_entity using the index of the entity arrays : add_link (focal_root, entity); add_link (entity, focal_root); // Echo the word to the o/p file : fprintf (out, "%s\n", word); } // End for pos } fclose (out); fclose (in); // Now print out the entire set of Entities: if (json) printf ("[\n"); { int i; roots = subs = 0; for (i = 0; i <= num_entities; i++) { int j; if (entities[i].num_links >= 0) { if (!json) printf ("Root Entity '%s' discovered with %d sub links\n", entities[i].entity_name, entities[i].num_links); roots++; for (j = 0; j <= entities[i].num_links; j++) { if (((i > 0) || (j > 0)) && (json)) printf (","); if (!json) printf ("Sub Entity is %s\n", entities[entities[i].links[j]].entity_name); if (json) printf ("{\n \"source\" : \"%s\",\n \"target\" : \"%s\",\n \"type\" : \"suit\"\n}\n", entities[i].entity_name, entities[entities[i].links[j]].entity_name); subs++; } printf ("\n"); } } } if (json) printf ("]\n"); if (!json) { printf ("The number of root entities found were %d and the number of subs found were %d\n", roots, subs); printf ("The total number of Entities are %d\n", num_entities); printf ("The total number of Root Entities are %d\n", total_num_root_entities); printf ("The total number of Sub Entities are %d\n", total_num_sub_entities); } hdestroy_r (&htab); return (0); // fwrite the array of structs out to save them: out = fopen ("entities.bin", "wb"); ret = fwrite (entities, sizeof (entities), 1, out); fclose (out); // fread the array of structs in test: in = fopen ("entities.bin", "rb"); ret = fread (entities, sizeof (entities), 1, in); fclose (in); return 0; }