static void free_init_resources(struct fman_mac *dtsec) { fman_unregister_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id, FMAN_INTR_TYPE_ERR); fman_unregister_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id, FMAN_INTR_TYPE_NORMAL); /* release the driver's group hash table */ free_hash_table(dtsec->multicast_addr_hash); dtsec->multicast_addr_hash = NULL; /* release the driver's individual hash table */ free_hash_table(dtsec->unicast_addr_hash); dtsec->unicast_addr_hash = NULL; }
/* * Close connections and release memory */ void clean_trusted(void) { if (hash_table_1) { free_hash_table(hash_table_1); hash_table_1 = NULL; } if (hash_table_2) { free_hash_table(hash_table_2); hash_table_2 = NULL; } if (hash_table) { shm_free(hash_table); hash_table = NULL; } }
/* * memory cleanup routine which is called at the end of the crm114 run. * * Note: this routine *also* called when an error occurred (e.g. out of memory) * so tread carefully here: do not assume all these pointers are filled. */ static void crm_final_cleanup(void) { // GROT GROT GROT // // move every malloc/free to use xmalloc/xcalloc/xrealloc/xfree, so we can be sure // [x]free() will be able to cope with NULL pointers as it is. crm_munmap_all(); free_hash_table(vht, vht_size); vht = NULL; //free_arg_parseblock(apb); //apb = NULL; free_regex_cache(); cleanup_expandvar_allocations(); free(newinputbuf); newinputbuf = NULL; free(inbuf); inbuf = NULL; free(outbuf); outbuf = NULL; free(tempbuf); tempbuf = NULL; free_debugger_data(); crm_terminate_analysis(&analysis_cfg); cleanup_stdin_out_err_as_os_handles(); }
static int check_for_duplicate_block_names (void) { /* Checks that all blocks have duplicate names. Returns the number of * * duplicate names. */ int error, iblk; struct s_hash **block_hash_table, *h_ptr; struct s_hash_iterator hash_iterator; error = 0; block_hash_table = alloc_hash_table (); for (iblk=0;iblk<num_blocks;iblk++) h_ptr = insert_in_hash_table (block_hash_table, block[iblk].name, iblk); hash_iterator = start_hash_table_iterator (); h_ptr = get_next_hash (block_hash_table, &hash_iterator); while (h_ptr != NULL) { if (h_ptr->count != 1) { printf ("Error: %d blocks are named %s. Block names must be unique." "\n", h_ptr->count, h_ptr->name); error++; } h_ptr = get_next_hash (block_hash_table, &hash_iterator); } free_hash_table (block_hash_table); return (error); }
void index_file(char * file, struct hash_table * table) { /* get a reverse hash table of the terms in the file */ FILE * file_fd; struct Parser * parser; char * word; file_fd = fopen(file, "r"); parser = parser_new(file_fd); if (file_fd == NULL) { fprintf(stderr, "couldn't get handler for %s\n", file); free_hash_table(table); } while ( (word = parser_next_word(parser)) ) { strtolower(word); struct hash_node * tmp = hash_table_get(table, word); if(tmp) { hash_node_add_occurrence(tmp, file); } else { struct hash_node * node = new_hash_node(word); node->appears_in = new_file_node(file); hash_table_store(table, word, node); } free(word); } parser_destroy(parser); }
struct s_table* init_hash_table() { int i; /*allocs the table*/ tm_table= (struct s_table*)shm_malloc( sizeof( struct s_table ) ); if ( !tm_table) { LOG(L_ERR, "ERROR: init_hash_table: no shmem for TM table\n"); goto error0; } memset( tm_table, 0, sizeof (struct s_table ) ); /* try first allocating all the structures needed for syncing */ if (lock_initialize()==-1) goto error1; /* inits the entrys */ for( i=0 ; i<TABLE_ENTRIES; i++ ) { init_entry_lock( tm_table, (tm_table->entrys)+i ); tm_table->entrys[i].next_label = rand(); } return tm_table; error1: free_hash_table( ); error0: return 0; }
static void free_parse (void) { /* Release memory needed only during circuit netlist parsing. */ free (num_driver); free_hash_table (hash_table); free (temp_num_pins); }
void CMM_finalize(CMM_handle* cmm_handle) { if (cmm_handle == NULL) return; free_hash_table(cmm_handle); free_mem_chain(cmm_handle); free(cmm_handle); cmm_handle = NULL; }
int conflict_cost(Graph* graph, char** conflict_vertices, int size){ int cost = 0; hash_table_t* rep = create_hash_table(graph->iterator_size, NULL); for (int i = 0; i < size; i++){ count_cost(graph, conflict_vertices[i], rep, &cost, NULL); } free_hash_table(rep); return cost; }
FRISO_API void friso_dic_free( friso_dic_t dic ) { register uint_t t; for ( t = 0; t < __FRISO_LEXICON_LENGTH__; t++ ) { //free the hash table free_hash_table( dic[t], default_fdic_callback ); } FRISO_FREE( dic ); }
EXPORTED void search_query_free(search_query_t *query) { int i; if (!query) return; free_hash_table(&query->subs_by_folder, subquery_free); free_hash_table(&query->subs_by_indexed, subquery_free); search_expr_free(query->global_sub.expr); ptrarray_fini(&query->folders_by_id); free_hash_table(&query->folders_by_name, folder_free); ptrarray_fini(&query->merged_msgdata); /* free pending MsgData arrays */ for (i = 0 ; i < query->saved_msgdata.count ; i++) { struct search_saved_msgdata *saved = ptrarray_nth(&query->saved_msgdata, i); index_msgdata_free(saved->msgdata, saved->n); free(saved); } ptrarray_fini(&query->saved_msgdata); free(query); }
static void release_picking(struct ref* ref) { struct edit_picking* picking = NULL; assert(ref); picking = CONTAINER_OF(ref, struct edit_picking, ref); EDIT(imgui_ref_put(picking->imgui)); EDIT(model_instance_selection_ref_put(picking->instance_selection)); APP(ref_put(picking->app)); SL(free_hash_table(picking->picked_instances_htbl)); MEM_FREE(picking->allocator, picking); }
void tm_shutdown() { DBG("DEBUG: tm_shutdown : start\n"); /* destroy the hash table */ DBG("DEBUG: tm_shutdown : emptying hash table\n"); free_hash_table( ); DBG("DEBUG: tm_shutdown : removing semaphores\n"); lock_cleanup(); DBG("DEBUG: tm_shutdown : destroying tmcb lists\n"); destroy_tmcb_lists(); free_tm_stats(); DBG("DEBUG: tm_shutdown : done\n"); }
void free_gc_cache(struct gc_cache *volatile cache) { struct gc_cache_cell *volatile rest, *volatile next; rest = cache->head; while (rest) { XFreeGC(cache->dpy, rest->gc); next = rest->next; xfree(rest); rest = next; } #ifdef GCCACHE_HASH free_hash_table(cache->table); #endif xfree(cache); }
/** * delete_search * * Terminate the current search and free all the memory involved. */ void Wordrec::delete_search(SEARCH_RECORD *the_search) { float closeness; closeness = (the_search->num_joints ? (hamming_distance(reinterpret_cast<uinT32*>(the_search->first_state), reinterpret_cast<uinT32*>(the_search->best_state), 2) / (float) the_search->num_joints) : 0.0f); free_state (the_search->first_state); free_state (the_search->best_state); free_hash_table(the_search->closed_states); FreeHeapData (the_search->open_states, (void_dest) free_state); memfree(the_search); }
void tm_shutdown(void) { LM_DBG("tm_shutdown : start\n"); unlink_timer_lists(); /* destroy the hash table */ LM_DBG("emptying hash table\n"); free_hash_table( ); LM_DBG("releasing timers\n"); free_timer_table(); LM_DBG("removing semaphores\n"); lock_cleanup(); LM_DBG("destroying callback lists\n"); destroy_tmcb_lists(); LM_DBG("tm_shutdown : done\n"); }
int main(int argc, char **argv) { item *hashTable[HASH_SIZE]; int j; for (j = 0; j < HASH_SIZE; j++) hashTable[j] = NULL; item *n = create_node(-1, "test"); insert(hashTable, &n); n = create_node(10, NULL); insert(hashTable, &n); item *f = find(hashTable, -1, "test"); print(f); f = find(hashTable, 10, NULL); print(f); free_hash_table(hashTable); return 0; }
/** * Find all packing patterns in architecture * [0..num_packing_patterns-1] * * Limitations: Currently assumes that forced pack nets must be single-fanout as this covers all the reasonable architectures we wanted. More complicated structures should probably be handled either downstream (general packing) or upstream (in tech mapping) * If this limitation is too constraining, code is designed so that this limitation can be removed */ t_pack_patterns *alloc_and_load_pack_patterns(OUTP int *num_packing_patterns) { int i, j, ncount; int L_num_blocks; struct s_hash **nhash; t_pack_patterns *list_of_packing_patterns; t_pb_graph_edge *expansion_edge; /* alloc and initialize array of packing patterns based on architecture complex blocks */ nhash = alloc_hash_table(); ncount = 0; for (i = 0; i < num_types; i++) { discover_pattern_names_in_pb_graph_node( type_descriptors[i].pb_graph_head, nhash, &ncount); } list_of_packing_patterns = alloc_and_init_pattern_list_from_hash(ncount, nhash); /* load packing patterns by traversing the edges to find edges belonging to pattern */ for (i = 0; i < ncount; i++) { for (j = 0; j < num_types; j++) { expansion_edge = find_expansion_edge_of_pattern(i, type_descriptors[j].pb_graph_head); if (expansion_edge == NULL) { continue; } L_num_blocks = 0; list_of_packing_patterns[i].base_cost = 0; backward_expand_pack_pattern_from_edge(expansion_edge, list_of_packing_patterns, i, NULL, NULL, &L_num_blocks); list_of_packing_patterns[i].num_blocks = L_num_blocks; break; } } free_hash_table(nhash); *num_packing_patterns = ncount; return list_of_packing_patterns; }
EXPORTED int smtpclient_close(smtpclient_t **smp) { if (!smp || !*smp) { return 0; } int r = 0; smtpclient_t *sm = *smp; /* Close backend */ backend_disconnect(sm->backend); if (sm->free_context) { r = sm->free_context(sm->backend); } free(sm->backend); sm->backend = NULL; /* Close log */ if (sm->logfd != -1) { close(sm->logfd); } sm->logfd = -1; /* Free internal state */ if (sm->have_exts) { free_hash_table(sm->have_exts, free); free(sm->have_exts); sm->have_exts = NULL; } buf_free(&sm->buf); free(sm->by); free(sm->ret); free(sm->notify); free(sm->authid); buf_free(&sm->resp.text); free(sm); *smp = NULL; return r; }
static int Zoltan_LB( ZZ *zz, int include_parts, /* Flag indicating whether to generate part informtion; 0 if called by Zoltan_LB_Balance, 1 if called by Zoltan_LB_Partition. */ int *changes, /* Set to zero or one depending on if Zoltan determines a new decomposition or not: zero - No changes to the decomposition were made by the load-balancing algorithm; migration is not needed. one - A new decomposition is suggested by the load-balancer; migration is needed to establish the new decomposition. */ int *num_gid_entries, /* The number of array entries in a global ID; set to be the max over all processors in zz->Communicator of the parameter Num_Global_ID_Entries. */ int *num_lid_entries, /* The number of array entries in a local ID; set to be the max over all processors in zz->Communicator of the parameter Num_Local_ID_Entries. */ int *num_import_objs, /* The number of non-local objects in the processor's new decomposition. */ ZOLTAN_ID_PTR *import_global_ids,/* Array of global IDs for non-local objects (i.e., objs to be imported) in the processor's new decomposition. */ ZOLTAN_ID_PTR *import_local_ids, /* Array of local IDs for non-local objects (i.e., objs to be imported) in the processor's new decomposition. */ int **import_procs, /* Array of processor IDs for processors currently owning non-local objects (i.e., objs to be imported) in this processor's new decomposition. */ int **import_to_part, /* Partition to which the objects should be imported. */ int *num_export_objs, /* The number of local objects that need to be exported from the processor to establish the new decomposition. */ ZOLTAN_ID_PTR *export_global_ids,/* Array of global IDs for objects that need to be exported (assigned and sent to other processors) to establish the new decomposition. */ ZOLTAN_ID_PTR *export_local_ids, /* Array of local IDs for objects that need to be exported (assigned and sent to other processors) to establish the new decomposition. */ int **export_procs, /* Array of destination processor IDs for objects that need to be exported to establish the new decomposition. */ int **export_to_part /* Partition to which objects should be exported. */ ) { /* * Main load-balancing routine. * Input: a Zoltan structure with appropriate function pointers set. * Output: * changes * num_import_objs * import_global_ids * import_local_ids * import_procs * import_to_part * num_export_objs * export_global_ids * export_local_ids * export_procs * export_to_part * Return values: * Zoltan error code. */ char *yo = "Zoltan_LB"; int gmax; /* Maximum number of imported/exported objects over all processors. */ int error = ZOLTAN_OK; /* Error code */ double start_time, end_time; double lb_time[2] = {0.0,0.0}; char msg[256]; int comm[3],gcomm[3]; float *part_sizes = NULL, *fdummy = NULL; int wgt_dim, part_dim; int all_num_obj, i, ts, idIdx; struct Hash_Node **ht; int *export_all_procs, *export_all_to_part, *parts=NULL; ZOLTAN_ID_PTR all_global_ids=NULL, all_local_ids=NULL; ZOLTAN_ID_PTR gid; #ifdef ZOLTAN_OVIS struct OVIS_parameters ovisParameters; #endif ZOLTAN_TRACE_ENTER(zz, yo); if (zz->Proc == zz->Debug_Proc && zz->Debug_Level >= ZOLTAN_DEBUG_PARAMS){ printf("Build configuration:\n"); Zoltan_Print_Configuration(" "); printf("\n"); Zoltan_Print_Key_Params(zz); } start_time = Zoltan_Time(zz->Timer); #ifdef ZOLTAN_OVIS Zoltan_OVIS_Setup(zz, &ovisParameters); if (zz->Proc == 0) printf("OVIS PARAMETERS %s %s %d %f\n", ovisParameters.hello, ovisParameters.dll, ovisParameters.outputLevel, ovisParameters.minVersion); ovis_enabled(zz->Proc, ovisParameters.dll); #endif /* * Compute Max number of array entries per ID over all processors. * Compute Max number of return arguments for Zoltan_LB_Balance. * This is a sanity-maintaining step; we don't want different * processors to have different values for these numbers. */ comm[0] = zz->Num_GID; comm[1] = zz->Num_LID; comm[2] = zz->LB.Return_Lists; MPI_Allreduce(comm, gcomm, 3, MPI_INT, MPI_MAX, zz->Communicator); zz->Num_GID = *num_gid_entries = gcomm[0]; zz->Num_LID = *num_lid_entries = gcomm[1]; zz->LB.Return_Lists = gcomm[2]; /* assume no changes */ *changes = 0; *num_import_objs = *num_export_objs = 0; *import_global_ids = NULL; *import_local_ids = NULL; *import_procs = NULL; *import_to_part = NULL; *export_global_ids = NULL; *export_local_ids = NULL; *export_procs = NULL; *export_to_part = NULL; /* * Return if this processor is not in the Zoltan structure's * communicator. */ if (ZOLTAN_PROC_NOT_IN_COMMUNICATOR(zz)) goto End; if (zz->LB.Method == NONE) { if (zz->Proc == zz->Debug_Proc && zz->Debug_Level >= ZOLTAN_DEBUG_PARAMS) printf("%s Balancing method selected == NONE; no balancing performed\n", yo); error = ZOLTAN_WARN; goto End; } /* * Sync the random number generator across processors. */ Zoltan_Srand_Sync(Zoltan_Rand(NULL), NULL, zz->Communicator); /* Since generating a new partition, need to free old mapping vector */ zz->LB.OldRemap = zz->LB.Remap; zz->LB.Remap = NULL; error = Zoltan_LB_Build_PartDist(zz); if (error != ZOLTAN_OK && error != ZOLTAN_WARN) goto End; if (zz->Debug_Level >= ZOLTAN_DEBUG_ALL) { int i, np, fp; for (i = 0; i < zz->Num_Proc; i++) { Zoltan_LB_Proc_To_Part(zz, i, &np, &fp); printf("%d Proc_To_Part Proc %d NParts %d FPart %d\n", zz->Proc, i, np, fp); } } /* * Generate parts sizes. */ #ifdef ZOLTAN_OVIS /* set part sizes computed by OVIS, if requested. Processes set only their own value */ { float part_sizes[1]; int part_ids[1], wgt_idx[1]; wgt_idx[0] = 0; part_ids[0] = 0; ovis_getPartsize(&(part_sizes[0])); printf("Rank %d ps %f\n",zz->Proc, part_sizes[0]); /* clear out old part size info first */ Zoltan_LB_Set_Part_Sizes(zz, 0, -1, NULL, NULL, NULL); Zoltan_LB_Set_Part_Sizes(zz, 0, 1, part_ids, wgt_idx, part_sizes); } #endif wgt_dim = zz->Obj_Weight_Dim; part_dim = ((wgt_dim > 0) ? wgt_dim : 1); part_sizes = (float *) ZOLTAN_MALLOC(sizeof(float) * part_dim * zz->LB.Num_Global_Parts); if (part_sizes == NULL) { ZOLTAN_PRINT_ERROR(zz->Proc, yo, "Memory error."); error = ZOLTAN_MEMERR; goto End; } /* Get part sizes. */ Zoltan_LB_Get_Part_Sizes(zz, zz->LB.Num_Global_Parts, part_dim, part_sizes); #ifdef ZOLTAN_OVIS /* if (ovisParameters.outputlevel > 3) */ { int myRank = zz->Proc; if (myRank == 0){ int i, j; for (i = 0; i < zz->LB.Num_Global_Parts; i++){ for (j = 0; j < part_dim; j++){ printf("Rank %d AG: part_sizes[%d] = %f (Num_Global_Parts = %d, part_dim = %d)\n",zz->Proc, (i*part_dim+j), part_sizes[i*part_dim+j],zz->LB.Num_Global_Parts, part_dim); } } } } #endif /* * Call the actual load-balancing function. */ error = zz->LB.LB_Fn(zz, part_sizes, num_import_objs, import_global_ids, import_local_ids, import_procs, import_to_part, num_export_objs, export_global_ids, export_local_ids, export_procs, export_to_part); ZOLTAN_FREE(&part_sizes); if (error == ZOLTAN_FATAL || error == ZOLTAN_MEMERR){ sprintf(msg, "Partitioning routine returned code %d.", error); #ifdef HOST_LINUX if ((error == ZOLTAN_MEMERR) && (Zoltan_Memory_Get_Debug() > 0)){ Zoltan_write_linux_meminfo(0, "State of /proc/meminfo after malloc failure\n", 0); } #endif ZOLTAN_PRINT_ERROR(zz->Proc, yo, msg); goto End; } else if (error){ if (zz->Debug_Level >ZOLTAN_DEBUG_NONE) { sprintf(msg, "Partitioning routine returned code %d.", error); ZOLTAN_PRINT_WARN(zz->Proc, yo, msg); } } ZOLTAN_TRACE_DETAIL(zz, yo, "Done partitioning"); if (*num_import_objs >= 0) MPI_Allreduce(num_import_objs, &gmax, 1, MPI_INT, MPI_MAX, zz->Communicator); else /* use export data */ MPI_Allreduce(num_export_objs, &gmax, 1, MPI_INT, MPI_MAX, zz->Communicator); if (gmax == 0) { /* * Decomposition was not changed by the load balancing; no migration * is needed. */ if (zz->Proc == zz->Debug_Proc && zz->Debug_Level >= ZOLTAN_DEBUG_PARAMS) printf("%s No changes to the decomposition due to partitioning; " "no migration is needed.\n", yo); /* * Reset num_import_objs and num_export_objs; don't want to return * -1 for the arrays that weren't returned by ZOLTAN_LB_FN. */ *num_import_objs = *num_export_objs = 0; if (zz->LB.Return_Lists == ZOLTAN_LB_COMPLETE_EXPORT_LISTS){ /* * This parameter setting requires that all local objects * and their assignments appear in the export list. */ error= Zoltan_Get_Obj_List_Special_Malloc(zz, num_export_objs, export_global_ids, export_local_ids, wgt_dim, &fdummy, export_to_part); if (error == ZOLTAN_OK){ ZOLTAN_FREE(&fdummy); if (Zoltan_Special_Malloc(zz, (void **)export_procs, *num_export_objs, ZOLTAN_SPECIAL_MALLOC_INT)){ for (i=0; i<*num_export_objs; i++) (*export_procs)[i] = zz->Proc; } else{ error = ZOLTAN_MEMERR; } } } goto End; } /* * Check whether we know the import data, export data, or both. * * If we were given the import data, * we know what the new decomposition should look like on the * processor, but we don't know which of our local objects we have * to export to other processors to establish the new decomposition. * Reverse the argument if we were given the export data. * * Unless we were given both maps, compute the inverse map. */ if (zz->LB.Return_Lists == ZOLTAN_LB_NO_LISTS) { if (*num_import_objs >= 0) Zoltan_LB_Special_Free_Part(zz, import_global_ids, import_local_ids, import_procs, import_to_part); if (*num_export_objs >= 0) Zoltan_LB_Special_Free_Part(zz, export_global_ids, export_local_ids, export_procs, export_to_part); *num_import_objs = *num_export_objs = -1; } if (*num_import_objs >= 0){ if (*num_export_objs >= 0) { /* Both maps already available; nothing to do. */; } else if (zz->LB.Return_Lists == ZOLTAN_LB_ALL_LISTS || zz->LB.Return_Lists == ZOLTAN_LB_EXPORT_LISTS || zz->LB.Return_Lists == ZOLTAN_LB_COMPLETE_EXPORT_LISTS) { /* Export lists are requested; compute export map */ error = Zoltan_Invert_Lists(zz, *num_import_objs, *import_global_ids, *import_local_ids, *import_procs, *import_to_part, num_export_objs, export_global_ids, export_local_ids, export_procs, export_to_part); if (error != ZOLTAN_OK && error != ZOLTAN_WARN) { sprintf(msg, "Error building return arguments; " "%d returned by Zoltan_Compute_Destinations\n", error); ZOLTAN_PRINT_ERROR(zz->Proc, yo, msg); goto End; } if (zz->LB.Return_Lists == ZOLTAN_LB_EXPORT_LISTS || zz->LB.Return_Lists == ZOLTAN_LB_COMPLETE_EXPORT_LISTS) { /* Method returned import lists, but only export lists were desired. */ /* Import lists not needed; free them. */ *num_import_objs = -1; Zoltan_LB_Special_Free_Part(zz, import_global_ids, import_local_ids, import_procs, import_to_part); } } } else { /* (*num_import_objs < 0) */ if (*num_export_objs >= 0) { /* Only export lists have been returned. */ if (zz->LB.Return_Lists == ZOLTAN_LB_ALL_LISTS || zz->LB.Return_Lists == ZOLTAN_LB_IMPORT_LISTS) { /* Compute import map */ error = Zoltan_Invert_Lists(zz, *num_export_objs, *export_global_ids, *export_local_ids, *export_procs, *export_to_part, num_import_objs, import_global_ids, import_local_ids, import_procs, import_to_part); if (error != ZOLTAN_OK && error != ZOLTAN_WARN) { sprintf(msg, "Error building return arguments; " "%d returned by Zoltan_Compute_Destinations\n", error); ZOLTAN_PRINT_ERROR(zz->Proc, yo, msg); goto End; } if (zz->LB.Return_Lists == ZOLTAN_LB_IMPORT_LISTS) { /* Method returned export lists, but only import lists are desired. */ /* Export lists not needed; free them. */ *num_export_objs = -1; Zoltan_LB_Special_Free_Part(zz, export_global_ids, export_local_ids, export_procs, export_to_part); } } } else { /* *num_export_objs < 0 && *num_import_objs < 0) */ if (zz->LB.Return_Lists) { /* No map at all available */ ZOLTAN_PRINT_ERROR(zz->Proc, yo, "Load-balancing function returned " "neither import nor export data."); error = ZOLTAN_WARN; goto End; } } } if (zz->LB.Return_Lists == ZOLTAN_LB_COMPLETE_EXPORT_LISTS) { /* * Normally, Zoltan_LB returns in the export lists all local * objects that are moving off processor, or that are assigned * to a part on the local processor that is not the * default part. This setting of Return_Lists requests * that all local objects be included in the export list. */ if (*num_export_objs == 0){ /* all local objects are remaining on processor */ error= Zoltan_Get_Obj_List_Special_Malloc(zz, num_export_objs, export_global_ids, export_local_ids, wgt_dim, &fdummy, export_to_part); if (error == ZOLTAN_OK){ ZOLTAN_FREE(&fdummy); if (*num_export_objs) { if (Zoltan_Special_Malloc(zz, (void **)export_procs, *num_export_objs, ZOLTAN_SPECIAL_MALLOC_INT)){ for (i=0; i<*num_export_objs; i++) (*export_procs)[i] = zz->Proc; } else{ error = ZOLTAN_MEMERR; } } } if ((error != ZOLTAN_OK) && (error != ZOLTAN_WARN)) goto End; } else{ all_num_obj = zz->Get_Num_Obj(zz->Get_Num_Obj_Data, &error); if (*num_export_objs < all_num_obj){ /* Create a lookup table for exported IDs */ ts = Zoltan_Recommended_Hash_Size(*num_export_objs); ht = create_hash_table(zz, *export_global_ids, *num_export_objs, ts); /* Create a list of all gids, lids and parts */ error= Zoltan_Get_Obj_List_Special_Malloc(zz, &all_num_obj, &all_global_ids, &all_local_ids, wgt_dim, &fdummy, &parts); if ((error == ZOLTAN_OK) || (error == ZOLTAN_WARN)){ ZOLTAN_FREE(&fdummy); if ((Zoltan_Special_Malloc(zz, (void **)(void*)&export_all_procs, all_num_obj, ZOLTAN_SPECIAL_MALLOC_INT)==0) || (Zoltan_Special_Malloc(zz, (void **)(void*)&export_all_to_part, all_num_obj, ZOLTAN_SPECIAL_MALLOC_INT)==0)){ error = ZOLTAN_MEMERR; } } if ((error != ZOLTAN_OK) && (error != ZOLTAN_WARN)){ sprintf(msg, "Error building complete export list; " "%d returned by Zoltan_Get_Obj_List\n", error); ZOLTAN_PRINT_ERROR(zz->Proc, yo, msg); goto End; } gid = all_global_ids; for (i=0; i < all_num_obj; i++, gid += zz->Num_GID){ idIdx = search_hash_table(zz, gid, ht, ts); if (idIdx >= 0){ export_all_procs[i] = (*export_procs)[idIdx]; export_all_to_part[i] = (*export_to_part)[idIdx]; } else{ export_all_procs[i] = zz->Proc; export_all_to_part[i] = parts[i]; } } free_hash_table(ht, ts); Zoltan_LB_Special_Free_Part(zz, export_global_ids, export_local_ids, export_procs, export_to_part); Zoltan_Special_Free(zz, (void **)(void*)&parts, ZOLTAN_SPECIAL_MALLOC_INT); *export_global_ids = all_global_ids; *export_local_ids = all_local_ids; *export_procs = export_all_procs; *export_to_part = export_all_to_part; *num_export_objs = all_num_obj; } } } ZOLTAN_TRACE_DETAIL(zz, yo, "Done building return arguments"); end_time = Zoltan_Time(zz->Timer); lb_time[0] = end_time - start_time; if (zz->Debug_Level >= ZOLTAN_DEBUG_LIST) { int i; Zoltan_Print_Sync_Start(zz->Communicator, TRUE); printf("ZOLTAN: Objects to be imported to Proc %d\n", zz->Proc); for (i = 0; i < *num_import_objs; i++) { printf(" Obj: "); ZOLTAN_PRINT_GID(zz, &((*import_global_ids)[i*zz->Num_GID])); printf(" To part: %4d", (*import_to_part != NULL ? (*import_to_part)[i] : zz->Proc)); printf(" From processor: %4d\n", (*import_procs)[i]); } printf("\n"); printf("ZOLTAN: Objects to be exported from Proc %d\n", zz->Proc); for (i = 0; i < *num_export_objs; i++) { printf(" Obj: "); ZOLTAN_PRINT_GID(zz, &((*export_global_ids)[i*zz->Num_GID])); printf(" To part: %4d", (*export_to_part != NULL ? (*export_to_part)[i] : (*export_procs)[i])); printf(" To processor: %4d\n", (*export_procs)[i]); } Zoltan_Print_Sync_End(zz->Communicator, TRUE); } /* * If the Help_Migrate flag is set, perform migration for the application. */ if (zz->Migrate.Auto_Migrate) { ZOLTAN_TRACE_DETAIL(zz, yo, "Begin auto-migration"); start_time = Zoltan_Time(zz->Timer); error = Zoltan_Migrate(zz, *num_import_objs, *import_global_ids, *import_local_ids, *import_procs, *import_to_part, *num_export_objs, *export_global_ids, *export_local_ids, *export_procs, *export_to_part); if (error != ZOLTAN_OK && error != ZOLTAN_WARN) { sprintf(msg, "Error in auto-migration; %d returned from " "Zoltan_Help_Migrate\n", error); ZOLTAN_PRINT_ERROR(zz->Proc, yo, msg); goto End; } end_time = Zoltan_Time(zz->Timer); lb_time[1] = end_time - start_time; ZOLTAN_TRACE_DETAIL(zz, yo, "Done auto-migration"); } /* Print timing info */ if (zz->Debug_Level >= ZOLTAN_DEBUG_ZTIME) { if (zz->Proc == zz->Debug_Proc) { printf("ZOLTAN Times: \n"); } Zoltan_Print_Stats (zz->Communicator, zz->Debug_Proc, lb_time[0], "ZOLTAN Partition: "); if (zz->Migrate.Auto_Migrate) Zoltan_Print_Stats (zz->Communicator, zz->Debug_Proc, lb_time[1], "ZOLTAN Migrate: "); } *changes = 1; End: ZOLTAN_TRACE_EXIT(zz, yo); return (error); }
void read_user_pad_loc(char *pad_loc_file) { /* Reads in the locations of the IO pads from a file. */ struct s_hash **hash_table, *h_ptr; int iblk, i, j, xtmp, ytmp, bnum, k; FILE *fp; char buf[BUFSIZE], bname[BUFSIZE], *ptr; printf("\nReading locations of IO pads from %s.\n", pad_loc_file); linenum = 0; fp = my_fopen(pad_loc_file, "r"); hash_table = alloc_hash_table(); for(iblk = 0; iblk < num_blocks; iblk++) { if(block[iblk].type == IO_TYPE) { h_ptr = insert_in_hash_table(hash_table, block[iblk].name, iblk); block[iblk].x = OPEN; /* Mark as not seen yet. */ } } for(i = 0; i <= nx + 1; i++) { for(j = 0; j <= ny + 1; j++) { if(grid[i][j].type == IO_TYPE) { for(k = 0; k < IO_TYPE->capacity; k++) grid[i][j].blocks[k] = OPEN; /* Flag for err. check */ } } } ptr = my_fgets(buf, BUFSIZE, fp); while(ptr != NULL) { ptr = my_strtok(buf, TOKENS, fp, buf); if(ptr == NULL) { ptr = my_fgets(buf, BUFSIZE, fp); continue; /* Skip blank or comment lines. */ } strcpy(bname, ptr); ptr = my_strtok(NULL, TOKENS, fp, buf); if(ptr == NULL) { printf("Error: line %d is incomplete.\n", linenum); exit(1); } sscanf(ptr, "%d", &xtmp); ptr = my_strtok(NULL, TOKENS, fp, buf); if(ptr == NULL) { printf("Error: line %d is incomplete.\n", linenum); exit(1); } sscanf(ptr, "%d", &ytmp); ptr = my_strtok(NULL, TOKENS, fp, buf); if(ptr == NULL) { printf("Error: line %d is incomplete.\n", linenum); exit(1); } sscanf(ptr, "%d", &k); ptr = my_strtok(NULL, TOKENS, fp, buf); if(ptr != NULL) { printf("Error: extra characters at end of line %d.\n", linenum); exit(1); } h_ptr = get_hash_entry(hash_table, bname); if(h_ptr == NULL) { printf("Error: block %s on line %d: no such IO pad.\n", bname, linenum); exit(1); } bnum = h_ptr->index; i = xtmp; j = ytmp; if(block[bnum].x != OPEN) { printf ("Error: line %d. Block %s listed twice in pad file.\n", linenum, bname); exit(1); } if(i < 0 || i > nx + 1 || j < 0 || j > ny + 1) { printf("Error: block #%d (%s) location\n", bnum, bname); printf("(%d,%d) is out of range.\n", i, j); exit(1); } block[bnum].x = i; /* Will be reloaded by initial_placement anyway. */ block[bnum].y = j; /* I need to set .x only as a done flag. */ if(grid[i][j].type != IO_TYPE) { printf("Error: attempt to place IO block %s in \n", bname); printf("an illegal location (%d, %d).\n", i, j); exit(1); } if(k >= IO_TYPE->capacity || k < 0) { printf ("Error: Block %s subblock number (%d) on line %d is out of " "range.\n", bname, k, linenum); exit(1); } grid[i][j].blocks[k] = bnum; grid[i][j].usage++; ptr = my_fgets(buf, BUFSIZE, fp); } for(iblk = 0; iblk < num_blocks; iblk++) { if(block[iblk].type == IO_TYPE && block[iblk].x == OPEN) { printf ("Error: IO block %s location was not specified in " "the pad file.\n", block[iblk].name); exit(1); } } fclose(fp); free_hash_table(hash_table); printf("Successfully read %s.\n\n", pad_loc_file); }
/* * Close connections and release memory */ void clean_trusted(void) { if (hash_table_1) free_hash_table(hash_table_1); if (hash_table_2) free_hash_table(hash_table_2); if (hash_table) shm_free(hash_table); }
/* * Initialize data structures */ int init_trusted(void) { /* Check if hash table needs to be loaded from trusted table */ if (!db_url.s) { LM_INFO("db_url parameter of permissions module not set, " "disabling allow_trusted\n"); return 0; } else { if (db_bind_mod(&db_url, &perm_dbf) < 0) { LM_ERR("load a database support module\n"); return -1; } if (!DB_CAPABILITY(perm_dbf, DB_CAP_QUERY)) { LM_ERR("database module does not implement 'query' function\n"); return -1; } } hash_table_1 = hash_table_2 = 0; hash_table = 0; if (db_mode == ENABLE_CACHE) { db_handle = perm_dbf.init(&db_url); if (!db_handle) { LM_ERR("unable to connect database\n"); return -1; } if(db_check_table_version(&perm_dbf, db_handle, &trusted_table, TABLE_VERSION) < 0) { LM_ERR("error during table version check.\n"); perm_dbf.close(db_handle); return -1; } hash_table_1 = new_hash_table(); if (!hash_table_1) return -1; hash_table_2 = new_hash_table(); if (!hash_table_2) goto error; hash_table = (struct trusted_list ***)shm_malloc (sizeof(struct trusted_list **)); if (!hash_table) goto error; *hash_table = hash_table_1; if (reload_trusted_table() == -1) { LM_CRIT("reload of trusted table failed\n"); goto error; } perm_dbf.close(db_handle); db_handle = 0; } return 0; error: if (hash_table_1) { free_hash_table(hash_table_1); hash_table_1 = 0; } if (hash_table_2) { free_hash_table(hash_table_2); hash_table_2 = 0; } if (hash_table) { shm_free(hash_table); hash_table = 0; } perm_dbf.close(db_handle); db_handle = 0; return -1; }
void bfs_bbr(int upper_bound) /* 1. This function uses breadth first search (BFS) branch bound and remember (BBR) to find an optimal solution for the simple assembly line balancing problem. 2. upper_bound = upper bound on the number of stations needed. Search for a solution with fewer than upper_bound stations. 3. Written 3/3/06. */ { char LB; int count, i, j, index, LB1, level, n_eligible, status, t_sum; double cpu; clock_t start_time; start_time = clock(); UB = upper_bound; initialize_hash_table(); reinitialize_states(); // Add the root problem to the hash table and the list of states. t_sum = 0; for(i = 1; i <= n_tasks; i++) { count = 0; t_sum += t[i]; for(j = 1; j <= n_tasks; j++) { if(predecessor_matrix[j][i] == 1) count++; } degrees[i] = count; } LB1 = (int) ceil((double) t_sum / (double) cycle); if(LB1 < UB) { LB = (char) LB1; index = find_or_insert(0.0, degrees, 0, LB, 0, 0, -1, 0, &status); } if(bin_pack_flag == -1) bin_pack_flag = bin_pack_lb; // Main loop // Modified 5/19/09 to call gen_loads iff states[index].open = 1. index = get_state(); level = 0; count = 0; while( index >= 0) { cpu = (double) (clock() - search_info.start_time) / CLOCKS_PER_SEC; if (cpu > CPU_LIMIT) { printf("Time limit reached\n"); verified_optimality = 0;; break; } if(state_space_exceeded == 1) { verified_optimality = 0; break; } if (states[index].n_stations > level) { level = states[index].n_stations; printf("%2d %10d %10d\n", level, count, last_state - first_state + 2); count = 0; //prn_states(level); //if(level >= 3) return; } if(states[index].open == 1) { states[index].open = 0; count++; search_info.n_explored++; station = states[index].n_stations + 1; idle = states[index].idle; hash_value = states[index].hash_value; previous = states[index].previous; for(i = 1; i <= n_tasks; i++) degrees[i] = states[index].degrees[i]; n_eligible = 0; for(i = 1; i <= n_tasks; i++) { assert((-1 <= degrees[i]) && (degrees[i] <= n_tasks)); if(degrees[i] == 0) { eligible[++n_eligible] = i; } } gen_loads(1, cycle, 1, n_eligible); } else { states[index].open = 0; } index = get_state(); } search_info.bfs_bbr_cpu += (double) (clock() - start_time) / CLOCKS_PER_SEC; free_hash_table(); }
int spai (matrix *A, matrix **spai_mat, FILE *messages_arg, /* file for warning messages */ double epsilon_arg, /* tolerance */ int nbsteps_arg, /* max number of "improvement" steps per line */ int max_arg, /* max dimensions of I, q, etc. */ int maxnew_arg, /* max number of new entries per step */ int cache_size_arg, /* one of (1,2,3,4,5,6) indicting size of cache */ /* cache_size == 0 indicates no caching */ int verbose_arg, int spar_arg, int lower_diag_arg, int upper_diag_arg, double tau_arg) { matrix *M; int col,ierr; int cache_sizes[6]; /* Only create resplot for the numprocs=1 case. */ if (debug && (A->numprocs == 1)) { resplot_fptr = fopen("resplot","w"); fprintf(resplot_fptr, "ep=%5.5lf ns=%d mn=%d bs=%d\n", epsilon_arg,nbsteps_arg,maxnew_arg,A->bs); fprintf(resplot_fptr,"\n"); fprintf(resplot_fptr,"scol: scalar column number\n"); fprintf(resplot_fptr,"srn: scalar resnorm\n"); fprintf(resplot_fptr,"bcol: block column number\n"); fprintf(resplot_fptr,"brn: block resnorm\n"); fprintf(resplot_fptr,"* indicates epsilon not attained\n"); fprintf(resplot_fptr,"\n"); fprintf(resplot_fptr," scol srn bcol brn\n"); } start_col = 0; num_bad_cols = 0; cache_sizes[0] = 101; cache_sizes[1] = 503; cache_sizes[2] = 2503; cache_sizes[3] = 12503; cache_sizes[4] = 62501; cache_sizes[5] = 104743; if (verbose_arg && !A->myid) { if (spar_arg == 0) printf("\n\nComputing SPAI: epsilon = %f\n",epsilon_arg); else if (spar_arg == 1) printf("\n\nComputing SPAI: tau = %f\n",tau_arg); else if (spar_arg == 2) printf("\n\nComputing SPAI: # diagonals = %d\n", lower_diag_arg+upper_diag_arg+1); fflush(stdout); } epsilon = epsilon_arg; message = messages_arg; maxnew = maxnew_arg; max_dim = max_arg; /* Determine maximum number of scalar nonzeros for any column of M */ if (spar_arg == 0) { nbsteps = nbsteps_arg; maxapi = A->max_block_size * (1 + maxnew*nbsteps); } else if(spar_arg == 1) { nbsteps = A->maxnz; maxapi = A->max_block_size * (1 + nbsteps); } else { nbsteps = lower_diag_arg+upper_diag_arg+1; maxapi = A->max_block_size * (1 + nbsteps); } allocate_globals(A); #ifdef MPI MPI_Barrier(A->comm); #endif if ((cache_size_arg < 0) || (cache_size_arg > 6)) { fprintf(stderr,"illegal cache size in spai\n"); exit(1); } if (cache_size_arg > 0) ht = init_hash_table(cache_sizes[cache_size_arg-1]); M = clone_matrix(A); ndone = 0; Im_done = 0; all_done = 0; next_line = 0; /* Timing of SPAI starts here. In a "real production" code everything before this could be static. */ if (verbose_arg) start_timer(ident_spai); if ((ierr = precompute_column_square_inverses(A)) != 0) return ierr; #ifdef MPI MPI_Barrier(A->comm); #endif for (;;) { col = grab_Mline(A, M, A->comm); if (debug && col >= 0) { fprintf(fptr_dbg,"col=%d of %d\n",col,A->n); fflush(fptr_dbg); } if (col < 0 ) break; if ((ierr = spai_line(A,col,spar_arg,lower_diag_arg,upper_diag_arg,tau_arg,M)) != 0) return ierr; } #ifdef MPI say_Im_done(A,M); do { com_server(A,M); } while (! all_done); MPI_Barrier(A->comm); #endif #ifdef MPI MPI_Barrier(A->comm); #endif if (verbose_arg) { stop_timer(ident_spai); report_times(ident_spai,"spai",0,A->comm); } free_globals(nbsteps); free_hash_table(ht); if (resplot_fptr) fclose(resplot_fptr); *spai_mat = M; return 0; }
int main_Tokenize(int argc,char* const argv[]) { if (argc==1) { usage(); return 0; } char alphabet[FILENAME_MAX]=""; char token_file[FILENAME_MAX]=""; Encoding encoding_output = DEFAULT_ENCODING_OUTPUT; int bom_output = DEFAULT_BOM_OUTPUT; int mask_encoding_compatibility_input = DEFAULT_MASK_ENCODING_COMPATIBILITY_INPUT; int val,index=-1; int mode=NORMAL; struct OptVars* vars=new_OptVars(); while (EOF!=(val=getopt_long_TS(argc,argv,optstring_Tokenize,lopts_Tokenize,&index,vars))) { switch(val) { case 'a': if (vars->optarg[0]=='\0') { fatal_error("You must specify a non empty alphabet file name\n"); } strcpy(alphabet,vars->optarg); break; case 'c': mode=CHAR_BY_CHAR; break; case 'w': mode=NORMAL; break; case 't': if (vars->optarg[0]=='\0') { fatal_error("You must specify a non empty token file name\n"); } strcpy(token_file,vars->optarg); break; case 'k': if (vars->optarg[0]=='\0') { fatal_error("Empty input_encoding argument\n"); } decode_reading_encoding_parameter(&mask_encoding_compatibility_input,vars->optarg); break; case 'q': if (vars->optarg[0]=='\0') { fatal_error("Empty output_encoding argument\n"); } decode_writing_encoding_parameter(&encoding_output,&bom_output,vars->optarg); break; case 'h': usage(); return 0; case ':': if (index==-1) fatal_error("Missing argument for option -%c\n",vars->optopt); else fatal_error("Missing argument for option --%s\n",lopts_Tokenize[index].name); case '?': if (index==-1) fatal_error("Invalid option -%c\n",vars->optopt); else fatal_error("Invalid option --%s\n",vars->optarg); break; } index=-1; } if (vars->optind!=argc-1) { fatal_error("Invalid arguments: rerun with --help\n"); } U_FILE* text; U_FILE* out; U_FILE* output; U_FILE* enter; char tokens_txt[FILENAME_MAX]; char text_cod[FILENAME_MAX]; char enter_pos[FILENAME_MAX]; Alphabet* alph=NULL; get_snt_path(argv[vars->optind],text_cod); strcat(text_cod,"text.cod"); get_snt_path(argv[vars->optind],tokens_txt); strcat(tokens_txt,"tokens.txt"); get_snt_path(argv[vars->optind],enter_pos); strcat(enter_pos,"enter.pos"); text=u_fopen_existing_versatile_encoding(mask_encoding_compatibility_input,argv[vars->optind],U_READ); if (text==NULL) { fatal_error("Cannot open text file %s\n",argv[vars->optind]); } if (alphabet[0]!='\0') { alph=load_alphabet(alphabet); if (alph==NULL) { error("Cannot load alphabet file %s\n",alphabet); u_fclose(text); return 1; } } out=u_fopen(BINARY,text_cod,U_WRITE); if (out==NULL) { error("Cannot create file %s\n",text_cod); u_fclose(text); if (alph!=NULL) { free_alphabet(alph); } return 1; } enter=u_fopen(BINARY,enter_pos,U_WRITE); if (enter==NULL) { error("Cannot create file %s\n",enter_pos); u_fclose(text); u_fclose(out); if (alph!=NULL) { free_alphabet(alph); } return 1; } vector_ptr* tokens=new_vector_ptr(4096); vector_int* n_occur=new_vector_int(4096); vector_int* n_enter_pos=new_vector_int(4096); struct hash_table* hashtable=new_hash_table((HASH_FUNCTION)hash_unichar,(EQUAL_FUNCTION)u_equal, (FREE_FUNCTION)free,NULL,(KEYCOPY_FUNCTION)keycopy); if (token_file[0]!='\0') { load_token_file(token_file,mask_encoding_compatibility_input,tokens,hashtable,n_occur); } output=u_fopen_creating_versatile_encoding(encoding_output,bom_output,tokens_txt,U_WRITE); if (output==NULL) { error("Cannot create file %s\n",tokens_txt); u_fclose(text); u_fclose(out); u_fclose(enter); if (alph!=NULL) { free_alphabet(alph); } free_hash_table(hashtable); free_vector_ptr(tokens,free); free_vector_int(n_occur); free_vector_int(n_enter_pos); return 1; } u_fprintf(output,"0000000000\n"); int SENTENCES=0; int TOKENS_TOTAL=0; int WORDS_TOTAL=0; int DIGITS_TOTAL=0; u_printf("Tokenizing text...\n"); if (mode==NORMAL) { normal_tokenization(text,out,output,alph,tokens,hashtable,n_occur,n_enter_pos, &SENTENCES,&TOKENS_TOTAL,&WORDS_TOTAL,&DIGITS_TOTAL); } else { char_by_char_tokenization(text,out,output,alph,tokens,hashtable,n_occur,n_enter_pos, &SENTENCES,&TOKENS_TOTAL,&WORDS_TOTAL,&DIGITS_TOTAL); } u_printf("\nDone.\n"); save_new_line_positions(enter,n_enter_pos); u_fclose(enter); u_fclose(text); u_fclose(out); u_fclose(output); write_number_of_tokens(tokens_txt,encoding_output,bom_output,tokens->nbelems); // we compute some statistics get_snt_path(argv[vars->optind],tokens_txt); strcat(tokens_txt,"stats.n"); output=u_fopen_creating_versatile_encoding(encoding_output,bom_output,tokens_txt,U_WRITE); if (output==NULL) { error("Cannot write %s\n",tokens_txt); } else { compute_statistics(output,tokens,alph,SENTENCES,TOKENS_TOTAL,WORDS_TOTAL,DIGITS_TOTAL); u_fclose(output); } // we save the tokens by frequence get_snt_path(argv[vars->optind],tokens_txt); strcat(tokens_txt,"tok_by_freq.txt"); output=u_fopen_creating_versatile_encoding(encoding_output,bom_output,tokens_txt,U_WRITE); if (output==NULL) { error("Cannot write %s\n",tokens_txt); } else { sort_and_save_by_frequence(output,tokens,n_occur); u_fclose(output); } // we save the tokens by alphabetical order get_snt_path(argv[vars->optind],tokens_txt); strcat(tokens_txt,"tok_by_alph.txt"); output=u_fopen_creating_versatile_encoding(encoding_output,bom_output,tokens_txt,U_WRITE); if (output==NULL) { error("Cannot write %s\n",tokens_txt); } else { sort_and_save_by_alph_order(output,tokens,n_occur); u_fclose(output); } free_hash_table(hashtable); free_vector_ptr(tokens,free); free_vector_int(n_occur); free_vector_int(n_enter_pos); if (alph!=NULL) { free_alphabet(alph); } free_OptVars(vars); return 0; }
int main(int argc, char **argv) { int opt, r = 0; char *alt_config = NULL, *pub = NULL, *ver = NULL, *winfile = NULL; char prefix[2048]; enum { REBUILD, WINZONES, NONE } op = NONE; if ((geteuid()) == 0 && (become_cyrus(/*ismaster*/0) != 0)) { fatal("must run as the Cyrus user", EC_USAGE); } while ((opt = getopt(argc, argv, "C:r:vw:")) != EOF) { switch (opt) { case 'C': /* alt config file */ alt_config = optarg; break; case 'r': if (op == NONE) { op = REBUILD; pub = optarg; ver = strchr(optarg, ':'); if (ver) *ver++ = '\0'; else usage(); } else usage(); break; case 'v': verbose = 1; break; case 'w': if (op == NONE) { op = WINZONES; winfile = optarg; } else usage(); break; default: usage(); } } cyrus_init(alt_config, "ctl_zoneinfo", 0, 0); signals_set_shutdown(&shut_down); signals_add_handlers(0); snprintf(prefix, sizeof(prefix), "%s%s", config_dir, FNAME_ZONEINFODIR); switch (op) { case REBUILD: { struct hash_table tzentries; struct zoneinfo *info; struct txn *tid = NULL; char buf[1024]; FILE *fp; construct_hash_table(&tzentries, 500, 1); /* Add INFO record (overall lastmod and TZ DB source version) */ info = xzmalloc(sizeof(struct zoneinfo)); info->type = ZI_INFO; appendstrlist(&info->data, pub); appendstrlist(&info->data, ver); hash_insert(INFO_TZID, info, &tzentries); /* Add LEAP record (last updated and hash) */ snprintf(buf, sizeof(buf), "%s%s", prefix, FNAME_LEAPSECFILE); if (verbose) printf("Processing leap seconds file %s\n", buf); if (!(fp = fopen(buf, "r"))) { fprintf(stderr, "Could not open leap seconds file %s\n", buf); } else { struct zoneinfo *leap = xzmalloc(sizeof(struct zoneinfo)); leap->type = ZI_INFO; while(fgets(buf, sizeof(buf), fp)) { if (buf[0] == '#') { /* comment line */ if (buf[1] == '$') { /* last updated */ unsigned long last; sscanf(buf+2, "\t%lu", &last); leap->dtstamp = last - NIST_EPOCH_OFFSET; } else if (buf[1] == 'h') { /* hash */ char *p, *hash = buf+3 /* skip "#h\t" */; /* trim trailing whitespace */ for (p = hash + strlen(hash); isspace(*--p); *p = '\0'); appendstrlist(&leap->data, hash); } } } fclose(fp); hash_insert(LEAP_TZID, leap, &tzentries); info->dtstamp = leap->dtstamp; } /* Add ZONE/LINK records */ do_zonedir(prefix, &tzentries, info); zoneinfo_open(NULL); /* Store records */ hash_enumerate(&tzentries, &store_zoneinfo, &tid); zoneinfo_close(tid); free_hash_table(&tzentries, &free_zoneinfo); break; } case WINZONES: { xmlParserCtxtPtr ctxt; xmlDocPtr doc; xmlNodePtr node; struct buf tzidbuf = BUF_INITIALIZER; struct buf aliasbuf = BUF_INITIALIZER; if (verbose) printf("Processing Windows Zone file %s\n", winfile); /* Parse the XML file */ ctxt = xmlNewParserCtxt(); if (!ctxt) { fprintf(stderr, "Failed to create XML parser context\n"); break; } doc = xmlCtxtReadFile(ctxt, winfile, NULL, 0); xmlFreeParserCtxt(ctxt); if (!doc) { fprintf(stderr, "Failed to parse XML document\n"); break; } node = xmlDocGetRootElement(doc); if (!node || xmlStrcmp(node->name, BAD_CAST "supplementalData")) { fprintf(stderr, "Incorrect root node\n"); goto done; } for (node = xmlFirstElementChild(node); node && xmlStrcmp(node->name, BAD_CAST "windowsZones"); node = xmlNextElementSibling(node)); if (!node) { fprintf(stderr, "Missing windowsZones node\n"); goto done; } node = xmlFirstElementChild(node); if (!node || xmlStrcmp(node->name, BAD_CAST "mapTimezones")) { fprintf(stderr, "Missing mapTimezones node\n"); goto done; } if (chdir(prefix)) { fprintf(stderr, "chdir(%s) failed\n", prefix); goto done; } for (node = xmlFirstElementChild(node); node; node = xmlNextElementSibling(node)) { if (!xmlStrcmp(node->name, BAD_CAST "mapZone") && !xmlStrcmp(xmlGetProp(node, BAD_CAST "territory"), BAD_CAST "001")) { const char *tzid, *alias; buf_setcstr(&tzidbuf, (const char *) xmlGetProp(node, BAD_CAST "type")); buf_appendcstr(&tzidbuf, ".ics"); tzid = buf_cstring(&tzidbuf); buf_setcstr(&aliasbuf, (const char *) xmlGetProp(node, BAD_CAST "other")); buf_appendcstr(&aliasbuf, ".ics"); alias = buf_cstring(&aliasbuf); if (verbose) printf("\tLINK: %s -> %s\n", alias, tzid); if (symlink(tzid, alias)) { if (errno == EEXIST) { struct stat sbuf; if (stat(alias, &sbuf)) { fprintf(stderr, "stat(%s) failed: %s\n", alias, strerror(errno)); errno = EEXIST; } else if (sbuf.st_mode & S_IFLNK) { char link[MAX_MAILBOX_PATH+1]; int n = readlink(alias, link, MAX_MAILBOX_PATH); if (n == -1) { fprintf(stderr, "readlink(%s) failed: %s\n", alias, strerror(errno)); errno = EEXIST; } else if (n == (int) strlen(tzid) && !strncmp(tzid, link, n)) { errno = 0; } } } if (errno) { fprintf(stderr, "symlink(%s, %s) failed: %s\n", tzid, alias, strerror(errno)); } } } } done: buf_free(&aliasbuf); buf_free(&tzidbuf); xmlFreeDoc(doc); break; } case NONE: r = 2; usage(); break; } cyrus_done(); return r; }
static int __init test_init(void){ unsigned mask = 0x7FFFFFFF; printk(KERN_INFO "test_init() starts\n"); struct hash_table* table = malloc(sizeof(struct hash_table)); if (init_hash_table(table, 20) < 0){ printk(KERN_ERR "Error!\n"); } unsigned i; unsigned keys[30]; // add 30 random key value pairs printk(KERN_INFO "Adding 30 random key value pairs...\n"); for (i = 0; i < 30; i++){ unsigned key = 0; get_random_bytes(&key, sizeof(unsigned)); key = key & mask; keys[i] = key; int value = -key; put(table, key, value); } // print them out printk(KERN_INFO "Printing those 30 newly added key value pairs...\n"); for (i = 0; i < 30; i++){ int* map = get(table, keys[i]); if (!map){ printk(KERN_INFO "key %u does not exist\n", keys[i]); continue; } printk(KERN_INFO "%u : %d\n", keys[i], *map); } // print 10 random keys printk(KERN_INFO "Printing 10 random key value pairs...\n"); for (i = 0; i < 10; i++){ unsigned key = 0; get_random_bytes(&key, sizeof(unsigned)); key = key & mask; int* map = get(table, key); if (!map){ printk(KERN_INFO "key %u does not exist\n", key); continue; } printk(KERN_INFO "%u : %d\n", key, *map); } // remove the first 10 key printk(KERN_INFO "Removing the first 10 keys...\n"); for (i = 0; i < 10; i++){ if (erase(table, keys[i]) < 0){ printk(KERN_ERR "erase(): key %u does not exist\n", keys[i]); } } // print all out printk(KERN_INFO "Printing all key value pairs...\n"); for (i = 0; i < 30; i++){ int* map = get(table, keys[i]); if (!map){ printk(KERN_INFO "key %u does not exist\n", keys[i]); continue; } printk(KERN_INFO "%u : %d\n", keys[i], *map); } // remove 10 random keys printk(KERN_INFO "Removing 10 random key value pairs...\n"); for (i = 0; i < 10; i++){ unsigned key = 0; get_random_bytes(&key, sizeof(unsigned)); key = key & mask; if (erase(table, key) < 0){ printk(KERN_ERR "erase(): key %u does not exist\n", key); } } // print all out printk(KERN_INFO "Printing all key value pairs...\n"); for (i = 0; i < 30; i++){ int* map = get(table, keys[i]); if (!map){ printk(KERN_INFO "key %u does not exist\n", keys[i]); continue; } printk(KERN_INFO "%u : %d\n", keys[i], *map); } free_hash_table(table); free(table); printk(KERN_INFO "test_init() ends\n"); return 0; }
/* * Construct an iCalendar property value from XML content. */ static icalvalue *xml_element_to_icalvalue(xmlNodePtr xtype, icalvalue_kind kind) { icalvalue *value = NULL; xmlNodePtr node; xmlChar *content = NULL; switch (kind) { case ICAL_GEO_VALUE: { struct icalgeotype geo; node = xmlFirstElementChild(xtype); if (!node) { syslog(LOG_WARNING, "Missing <latitude> XML element"); break; } else if (xmlStrcmp(node->name, BAD_CAST "latitude")) { syslog(LOG_WARNING, "Expected <latitude> XML element, received %s", node->name); break; } content = xmlNodeGetContent(node); geo.lat = atof((const char *) content); node = xmlNextElementSibling(node); if (!node) { syslog(LOG_WARNING, "Missing <longitude> XML element"); break; } else if (xmlStrcmp(node->name, BAD_CAST "longitude")) { syslog(LOG_WARNING, "Expected <longitude> XML element, received %s", node->name); break; } xmlFree(content); content = xmlNodeGetContent(node); geo.lon = atof((const char *) content); value = icalvalue_new_geo(geo); break; } case ICAL_PERIOD_VALUE: { struct icalperiodtype p; p.start = p.end = icaltime_null_time(); p.duration = icaldurationtype_from_int(0); node = xmlFirstElementChild(xtype); if (!node) { syslog(LOG_WARNING, "Missing <start> XML element"); break; } else if (xmlStrcmp(node->name, BAD_CAST "start")) { syslog(LOG_WARNING, "Expected <start> XML element, received %s", node->name); break; } content = xmlNodeGetContent(node); p.start = icaltime_from_string((const char *) content); if (icaltime_is_null_time(p.start)) break; node = xmlNextElementSibling(node); if (!node) { syslog(LOG_WARNING, "Missing <end> / <duration> XML element"); break; } else if (!xmlStrcmp(node->name, BAD_CAST "end")) { xmlFree(content); content = xmlNodeGetContent(node); p.end = icaltime_from_string((const char *) content); if (icaltime_is_null_time(p.end)) break; } else if (!xmlStrcmp(node->name, BAD_CAST "duration")) { xmlFree(content); content = xmlNodeGetContent(node); p.duration = icaldurationtype_from_string((const char *) content); if (icaldurationtype_as_int(p.duration) == 0) break; } else { syslog(LOG_WARNING, "Expected <end> / <duration> XML element, received %s", node->name); break; } value = icalvalue_new_period(p); break; } case ICAL_RECUR_VALUE: { struct buf rrule = BUF_INITIALIZER; struct hash_table byrules; struct icalrecurrencetype rt; char *sep = ""; construct_hash_table(&byrules, 10, 1); /* create an iCal RRULE string from xCal <recur> sub-elements */ for (node = xmlFirstElementChild(xtype); node; node = xmlNextElementSibling(node)) { content = xmlNodeGetContent(node); if (!xmlStrncmp(node->name, BAD_CAST "by", 2)) { /* BY* rules can have a list of values - assemble them using a hash table */ struct buf *vals = hash_lookup((const char *) node->name, &byrules); if (vals) { /* append this value to existing list */ buf_printf(vals, ",%s", (char *) content); } else { /* create new list with this valiue */ vals = xzmalloc(sizeof(struct buf)); buf_setcstr(vals, (char *) content); hash_insert((char *) node->name, vals, &byrules); } } else { /* single value rpart */ buf_printf(&rrule, "%s%s=%s", sep, ucase((char *) node->name), (char *) content); sep = ";"; } xmlFree(content); content = NULL; } /* append the BY* rules to RRULE buffer */ hash_enumerate(&byrules, (void (*)(const char*, void*, void*)) &append_byrule, &rrule); free_hash_table(&byrules, NULL); /* parse our iCal RRULE string */ rt = icalrecurrencetype_from_string(buf_cstring(&rrule)); buf_free(&rrule); if (rt.freq != ICAL_NO_RECURRENCE) value = icalvalue_new_recur(rt); break; } case ICAL_REQUESTSTATUS_VALUE: { struct icalreqstattype rst = { ICAL_UNKNOWN_STATUS, NULL, NULL }; short maj, min; node = xmlFirstElementChild(xtype); if (!node) { syslog(LOG_WARNING, "Missing <code> XML element"); break; } else if (xmlStrcmp(node->name, BAD_CAST "code")) { syslog(LOG_WARNING, "Expected <code> XML element, received %s", node->name); break; } content = xmlNodeGetContent(node); if (sscanf((const char *) content, "%hd.%hd", &maj, &min) == 2) { rst.code = icalenum_num_to_reqstat(maj, min); } if (rst.code == ICAL_UNKNOWN_STATUS) { syslog(LOG_WARNING, "Unknown request-status code"); break; } node = xmlNextElementSibling(node); if (!node) { syslog(LOG_WARNING, "Missing <description> XML element"); break; } else if (xmlStrcmp(node->name, BAD_CAST "description")) { syslog(LOG_WARNING, "Expected <description> XML element, received %s", node->name); break; } xmlFree(content); content = xmlNodeGetContent(node); rst.desc = (const char *) content; node = xmlNextElementSibling(node); if (node) { if (xmlStrcmp(node->name, BAD_CAST "data")) { syslog(LOG_WARNING, "Expected <data> XML element, received %s", node->name); break; } xmlFree(content); content = xmlNodeGetContent(node); rst.debug = (const char *) content; } value = icalvalue_new_requeststatus(rst); break; } case ICAL_UTCOFFSET_VALUE: { int n, utcoffset, hours, minutes, seconds = 0; char sign; content = xmlNodeGetContent(xtype); n = sscanf((const char *) content, "%c%02d:%02d:%02d", &sign, &hours, &minutes, &seconds); if (n < 3) { syslog(LOG_WARNING, "Unexpected utc-offset format"); break; } utcoffset = hours*3600 + minutes*60 + seconds; if (sign == '-') utcoffset = -utcoffset; value = icalvalue_new_utcoffset(utcoffset); break; } default: content = xmlNodeGetContent(xtype); value = icalvalue_new_from_string(kind, (const char *) content); break; } if (content) xmlFree(content); return value; }
int main_RebuildTfst(int argc,char* const argv[]) { if (argc==1) { usage(); return SUCCESS_RETURN_CODE; } VersatileEncodingConfig vec=VEC_DEFAULT; int val, index=-1; bool only_verify_arguments = false; UnitexGetOpt options; int save_statistics=1; while (EOF!=(val=options.parse_long(argc,argv,optstring_RebuildTfst,lopts_RebuildTfst,&index))) { switch (val) { case 'k': if (options.vars()->optarg[0]=='\0') { error("Empty input_encoding argument\n"); return USAGE_ERROR_CODE; } decode_reading_encoding_parameter(&(vec.mask_encoding_compatibility_input),options.vars()->optarg); break; case 'q': if (options.vars()->optarg[0]=='\0') { error("Empty output_encoding argument\n"); return USAGE_ERROR_CODE; } decode_writing_encoding_parameter(&(vec.encoding_output),&(vec.bom_output),options.vars()->optarg); break; case 'S': save_statistics = 0; break; case 'V': only_verify_arguments = true; break; case 'h': usage(); return SUCCESS_RETURN_CODE; case ':': index==-1 ? error("Missing argument for option -%c\n", options.vars()->optopt) : error("Missing argument for option --%s\n", lopts_RebuildTfst[index].name); return USAGE_ERROR_CODE; case '?': index==-1 ? error("Invalid option -%c\n", options.vars()->optopt) : error("Invalid option --%s\n", options.vars()->optarg); return USAGE_ERROR_CODE; } index=-1; } if (options.vars()->optind!=argc-1) { error("Invalid arguments: rerun with --help\n"); return USAGE_ERROR_CODE; } if (only_verify_arguments) { // freeing all allocated memory return SUCCESS_RETURN_CODE; } char input_tfst[FILENAME_MAX]; char input_tind[FILENAME_MAX]; strcpy(input_tfst,argv[options.vars()->optind]); remove_extension(input_tfst,input_tind); strcat(input_tind,".tind"); u_printf("Loading %s...\n",input_tfst); Tfst* tfst = open_text_automaton(&vec,input_tfst); if (tfst==NULL) { error("Unable to load %s automaton\n",input_tfst); return DEFAULT_ERROR_CODE; } char basedir[FILENAME_MAX]; get_path(input_tfst,basedir); char output_tfst[FILENAME_MAX]; sprintf(output_tfst, "%s.new.tfst",input_tfst); char output_tind[FILENAME_MAX]; sprintf(output_tind, "%s.new.tind",input_tfst); U_FILE* f_tfst; if ((f_tfst = u_fopen(&vec,output_tfst,U_WRITE)) == NULL) { error("Unable to open %s for writing\n", output_tfst); close_text_automaton(tfst); return DEFAULT_ERROR_CODE; } U_FILE* f_tind; if ((f_tind = u_fopen(BINARY,output_tind,U_WRITE)) == NULL) { u_fclose(f_tfst); close_text_automaton(tfst); error("Unable to open %s for writing\n", output_tind); return DEFAULT_ERROR_CODE; } /* We use this hash table to rebuild files tfst_tags_by_freq/alph.txt */ struct hash_table* form_frequencies=new_hash_table((HASH_FUNCTION)hash_unichar,(EQUAL_FUNCTION)u_equal, (FREE_FUNCTION)free,NULL,(KEYCOPY_FUNCTION)keycopy); u_fprintf(f_tfst,"%010d\n",tfst->N); for (int i = 1; i <= tfst->N; i++) { if ((i % 100) == 0) { u_printf("%d/%d sentences rebuilt...\n", i, tfst->N); } load_sentence(tfst,i); char grfname[FILENAME_MAX]; sprintf(grfname, "%ssentence%d.grf", basedir, i); unichar** tags=NULL; int n_tags=-1; if (fexists(grfname)) { /* If there is a .grf for the current sentence, then we must * take it into account */ if (0==pseudo_main_Grf2Fst2(&vec,grfname,0,NULL,1,1,NULL,NULL,0)) { /* We proceed only if the graph compilation was a success */ char fst2name[FILENAME_MAX]; sprintf(fst2name, "%ssentence%d.fst2", basedir, i); struct FST2_free_info fst2_free; Fst2* fst2=load_abstract_fst2(&vec,fst2name,0,&fst2_free); af_remove(fst2name); free_SingleGraph(tfst->automaton,NULL); tfst->automaton=create_copy_of_fst2_subgraph(fst2,1); tags=create_tfst_tags(fst2,&n_tags); free_abstract_Fst2(fst2,&fst2_free); } else { error("Error: %s is not a valid sentence automaton\n",grfname); } } save_current_sentence(tfst,f_tfst,f_tind,tags,n_tags,form_frequencies); if (tags!=NULL) { /* If necessary, we free the tags we created */ for (int count_tags=0;count_tags<n_tags;count_tags++) { free(tags[count_tags]); } free(tags); } } u_printf("Text automaton rebuilt.\n"); u_fclose(f_tind); u_fclose(f_tfst); close_text_automaton(tfst); /* Finally, we save statistics */ if (save_statistics) { char tfst_tags_by_freq[FILENAME_MAX]; char tfst_tags_by_alph[FILENAME_MAX]; strcpy(tfst_tags_by_freq, basedir); strcat(tfst_tags_by_freq, "tfst_tags_by_freq.txt"); strcpy(tfst_tags_by_alph, basedir); strcat(tfst_tags_by_alph, "tfst_tags_by_alph.txt"); U_FILE* f_tfst_tags_by_freq = u_fopen(&vec, tfst_tags_by_freq, U_WRITE); if (f_tfst_tags_by_freq == NULL) { error("Cannot open %s\n", tfst_tags_by_freq); } U_FILE* f_tfst_tags_by_alph = u_fopen(&vec, tfst_tags_by_alph, U_WRITE); if (f_tfst_tags_by_alph == NULL) { error("Cannot open %s\n", tfst_tags_by_alph); } sort_and_save_tfst_stats(form_frequencies, f_tfst_tags_by_freq, f_tfst_tags_by_alph); u_fclose(f_tfst_tags_by_freq); u_fclose(f_tfst_tags_by_alph); } free_hash_table(form_frequencies); /* make a backup and replace old automaton with new */ char backup_tfst[FILENAME_MAX]; char backup_tind[FILENAME_MAX]; sprintf(backup_tfst,"%s.bck",input_tfst); sprintf(backup_tind,"%s.bck",input_tind); /* We remove the existing backup files, if any */ af_remove(backup_tfst); af_remove(backup_tind); af_rename(input_tfst,backup_tfst); af_rename(input_tind,backup_tind); af_rename(output_tfst,input_tfst); af_rename(output_tind,input_tind); u_printf("\nYou can find a backup of the original files in:\n %s\nand %s\n", backup_tfst,backup_tind); return SUCCESS_RETURN_CODE; }