static inline int __container_hash_map_create(container_t *ct,uint8_t lock_type) { dbg_str(DBG_CONTAINER_DETAIL,"__container_hash_map_create"); ct->priv.hmap = hash_map_create(ct->allocator,lock_type); if(ct->priv.hmap == NULL){ return 0; }else{ return 1; } }
static int mount_root(int argc, char ** argv) { Dprintf("%s()\n", __FUNCTION__); if (!(root = calloc(1, sizeof(*root)))) return -ENOMEM; // We can't use FUSE_ARGS_INIT() here so assert we are initing the // whole structure static_assert(sizeof(root->args) == sizeof(argc) + sizeof(argv) + sizeof(int)); root->args.argc = argc; root->args.argv = argv; root->args.allocated = 0; if (!(root->fstitch_path = strdup(""))) return -ENOMEM; if (!(root->parents = hash_map_create())) return -ENOMEM; root->cfs = NULL; // set later via fuse_serve_mount_set_root() if (fuse_parse_cmdline(&root->args, &root->mountpoint, NULL, NULL) == -1) { fprintf(stderr, "%s(): fuse_parse_cmdline() failed\n", __FUNCTION__); return -1; } if ((root->channel_fd = fuse_mount(root->mountpoint, &root->args)) == -1) { fprintf(stderr, "%s():%d: fuse_mount(\"%s\") failed\n", __FUNCTION__, __LINE__, root->mountpoint); return -1; } if (!(root->session = fuse_lowlevel_new(&root->args, ops, ops_len, root))) { fprintf(stderr, "%s(): fuse_lowlevel_new() failed\n", __FUNCTION__); return -1; } if (!(root->channel = fuse_kern_chan_new(root->channel_fd))) { fprintf(stderr, "%s(): fuse_kern_chan_new() failed\n", __FUNCTION__); return -1; } fuse_session_add_chan(root->session, root->channel); mounts_insert(root); root->mounted = 1; return 0; }
void set_tls_data(int32_t key,void *data) { hash_map_t h = (hash_map_t)pthread_getspecific(thread_key); if(!h) { h = hash_map_create(128,sizeof(key),sizeof(void*),tls_hash_func,tls_hash_key_eq,0); } hash_map_iter it = hash_map_find(h,(void*)&key); if(0 == hash_map_is_vaild_iter(it)) HASH_MAP_INSERT(int32_t,void*,h,key,data); else hash_map_iter_set_val(it,data); }
void set_tls_data(int32_t key,void *data) { hash_map_t h = (hash_map_t)pthread_getspecific(thread_key); if(!h) { h = hash_map_create(128,sizeof(key),sizeof(void*),tls_hash_func,tls_hash_key_eq); } hash_map_iter it = hash_map_find(h,(void*)&key); hash_map_iter end = hash_map_end(h); if(IT_EQ(it,end)) HASH_MAP_INSERT(int32_t,void*,h,key,data); else IT_SET_VAL(void*,it,data); }
void *get_tls_data(int32_t key) { hash_map_t h = (hash_map_t)pthread_getspecific(thread_key); if(!h) { h = hash_map_create(128,sizeof(key),sizeof(void*),tls_hash_func,tls_hash_key_eq); return NULL; } hash_map_iter it = hash_map_find(h,(void*)&key); hash_map_iter end = hash_map_end(h); if(!IT_EQ(it,end)) return IT_GET_VAL(void*,it); return NULL; }
BD_t * crashsim_bd(BD_t * disk, uint32_t threshold) { struct crashsim_info * info; BD_t * bd; info = malloc(sizeof(*info)); if(!info) return NULL; bd = &info->my_bd; info->blocks = hash_map_create(); if(!info->blocks) { free(info); return NULL; } BD_INIT(bd, crashsim_bd); info->bd = disk; info->threshold = threshold; info->crashed = 0; info->absorbed = 0; info->total = 0; bd->blocksize = disk->blocksize; bd->numblocks = disk->numblocks; bd->atomicsize = disk->atomicsize; bd->level = disk->level; bd->graph_index = disk->graph_index + 1; if(bd->graph_index >= NBDINDEX) { DESTROY(bd); return NULL; } if(modman_add_anon_bd(bd, __FUNCTION__)) { DESTROY(bd); return NULL; } if(modman_inc_bd(disk, bd, NULL) < 0) { modman_rem_bd(bd); DESTROY(bd); return NULL; } printf("Crash simulator block device initialized (threshold %u)\n", threshold); return bd; }
void *get_tls_data(int32_t key) { hash_map_t h = (hash_map_t)pthread_getspecific(thread_key); if(!h) { h = hash_map_create(128,sizeof(key),sizeof(void*),tls_hash_func,tls_hash_key_eq,0); return 0; } hash_map_iter it = hash_map_find(h,(void*)&key); if(0 == hash_map_is_vaild_iter(it)) { return hash_map_iter_get_val(it); } return 0; }
/* * Инициализировать хранилище. * Этот метод должен вызываться во всех методах работы с хранилищем. */ static void init_container_map() { if (is_init) return; _container.max_id = 0; struct hash_map_params params; params.key_type = KEY_INT; params.flags = HASH_MAP_OWNER_KEY | HASH_MAP_OWNER_VALUE; _container.map_id = hash_map_create(params); _container.free_ids = 0; _container.count = 0; is_init = 1; }
var_t new_var(uint8_t tt,...){ va_list vl; va_start(vl,tt); var_t v = calloc(1,sizeof(*v)); switch(tt){ case VAR_8: case VAR_16: case VAR_32: case VAR_64: { v->n = va_arg(vl,uint64_t); break; } case VAR_DOUBLE: { v->d = va_arg(vl,double); break; } case VAR_STR: { const char *str = va_arg(vl,const char *); v->s = kn_new_string(str); break; } case VAR_TABLE: { v->t = hash_map_create(16,var_hash_func,var_key_cmp,var_snd_hash_func); break; } default:{ free(v); v = NULL; break; } } return v; }
int kernel_patchgroup_scopes_init(void) { int r; scope_map = hash_map_create(); if(!scope_map) return -ENOMEM; r = fstitch_register_module(&ops); if(r < 0) { kernel_patchgroup_scopes_shutdown(NULL); return r; } r = fstitchd_register_shutdown_module(kernel_patchgroup_scopes_shutdown, NULL, SHUTDOWN_PREMODULES); if(r < 0) { kernel_patchgroup_scopes_shutdown(NULL); return r; } return 0; }
int main() { hash_map_t h = hash_map_create(4096,sizeof(int32_t),sizeof(int32_t),_hash_func_,_hash_key_eq_); int32_t i = 1; for( ; i < 10; ++i) { hash_map_iter it = HASH_MAP_INSERT(int32_t,int32_t,h,i,i); printf("%d\n",IT_GET_VAL(int32_t,it)); } printf("----------------------\n"); { hash_map_iter it = HASH_MAP_FIND(int32_t,h,5); hash_map_iter end = hash_map_end(h); if(!IT_EQ(it,end)) printf("%d\n",IT_GET_VAL(int32_t,it)); } printf("----------------------\n"); { hash_map_iter it = HASH_MAP_FIND(int32_t,h,100); hash_map_iter end = hash_map_end(h); if(!IT_EQ(it,end)) printf("%d\n",IT_GET_VAL(int32_t,it)); else printf("can't find 100\n"); } printf("----------------------\n"); HASH_MAP_REMOVE(int32_t,h,5); { hash_map_iter it = hash_map_begin(h); hash_map_iter end = hash_map_end(h); for( ; !IT_EQ(it,end); IT_NEXT(it)) printf("%d\n",IT_GET_VAL(int32_t,it)); } return 0; };
BD_t * wb_cache_bd(BD_t * disk, uint32_t blocks) { uint32_t i; BD_t *bd; struct cache_info * info = malloc(sizeof(*info)); if(!info) return NULL; bd = &info->my_bd; /* allocate an extra cache slot: hash maps return NULL on failure, so we * can't have 0 be a valid index... besides, we need pointers to the * head and tail of the LRU block queue */ info->blocks = smalloc((blocks + 1) * sizeof(*info->blocks)); if(!info->blocks) { free(info); return NULL; } /* set up the block cache pointers... this could all be in * the loop, but it is unwound a bit for clarity here */ info->blocks[0].free_index = 1; info->blocks[0].lru = &info->blocks[0]; info->blocks[0].mru = &info->blocks[0]; info->blocks[1].block = NULL; if(blocks > 1) { info->blocks[1].next_index = 2; info->blocks[1].next = &info->blocks[2]; info->blocks[blocks].block = NULL; info->blocks[blocks].next_index = 0; info->blocks[blocks].next = NULL; } else { info->blocks[1].next_index = 0; info->blocks[1].next = NULL; } for(i = 2; i < blocks; i++) { info->blocks[i].block = NULL; info->blocks[i].next_index = i + 1; info->blocks[i].next = &info->blocks[i + 1]; } info->block_map = hash_map_create(); if(!info->block_map) { sfree(info->blocks, (blocks + 1) * sizeof(*info->blocks)); free(info); return NULL; } BD_INIT(bd, wb_cache_bd); OBJMAGIC(bd) = WB_CACHE_MAGIC; info->bd = disk; info->size = blocks; bd->numblocks = disk->numblocks; bd->blocksize = disk->blocksize; bd->atomicsize = disk->atomicsize; /* we generally delay blocks, so our level goes up */ bd->level = disk->level + 1; bd->graph_index = disk->graph_index + 1; if(bd->graph_index >= NBDINDEX) { DESTROY(bd); return NULL; } /* set up the callback */ if(sched_register(wb_cache_bd_callback, bd, FLUSH_PERIOD) < 0) { DESTROY(bd); return NULL; } if(modman_add_anon_bd(bd, __FUNCTION__)) { DESTROY(bd); return NULL; } if(modman_inc_bd(disk, bd, NULL) < 0) { modman_rem_bd(bd); DESTROY(bd); return NULL; } return bd; }
int fuse_serve_mount_add(CFS_t * cfs, const char * path) { mount_t * m; queue_entry_t * qe; int r; Dprintf("%s(%s, \"%s\")\n", __FUNCTION__, modman_name_cfs(cfs), path); if (shutdown_has_started()) return -EBUSY; // We might be able to allow this; but at least for now, reject if (!(m = calloc(1, sizeof(*m)))) return -ENOMEM; if (!(qe = calloc(1, sizeof(*qe)))) { r = -ENOMEM; goto error_m; } qe->mount = m; qe->action = QEMOUNT; m->mounted = 0; if (!(m->fstitch_path = strdup(path))) { r = -ENOMEM; goto error_qe; } if (!(m->parents = hash_map_create())) { r = -ENOMEM; goto error_path; } m->cfs = cfs; if ((r = CALL(cfs, get_root, &m->root_ino)) < 0) goto error_parents; if ((r = hash_map_insert(m->parents, (void *) m->root_ino, (void *) m->root_ino)) < 0) goto error_parents; if ((r = fuse_args_copy(&root->args, &m->args)) < 0) goto error_parents; m->mountpoint = malloc(strlen(root->mountpoint) + strlen(path) + 1); if (!m->mountpoint) { r = -ENOMEM; goto error_args; } strcpy(m->mountpoint, root->mountpoint); strcpy(m->mountpoint + strlen(root->mountpoint), path); // add to mounts list mounts_insert(m); // helper_thread takes care of the channel_fd field and on down if (enqueue_helper_request(qe)) goto error_insert; if (ensure_helper_is_running() < 0) { // As it is not expected that ensure_helper_is_running() will error // and as recovering would require a single-use dequeue function, // for now we just error and let things go as they will. fprintf(stderr, "%s: ensure_helper_is_running failed. WARNING: request remains in the queue.\n", __FUNCTION__); goto error_insert; } return 0; error_insert: mounts_remove(m); free(m->mountpoint); error_args: fuse_opt_free_args(&m->args); error_parents: hash_map_destroy(m->parents); error_path: free(m->fstitch_path); error_qe: memset(qe, 0, sizeof(*qe)); free(qe); error_m: memset(m, 0, sizeof(*m)); free(m); return r; }
void run() { unsigned i, time; gasnett_tick_t start, end; hash_map_create(params[HASHMAP_SIZE], (grt_bool_t) params[ON_PTHREAD]); grt_barrier(); #ifdef LOCKS grt_lock_state_t state; #endif for (i = 0; i < MY_NUM_OPS; ++i) { grt_word_t key = keys[i], val = values[i]; #ifdef LOCKS hash_t hash = compute_hash(key); hash_map_lock(hash.proc, hash.offset, WRITE, &state); #endif hash_map_insert(key, val); #ifdef LOCKS hash_map_unlock(hash.proc, hash.offset); #endif } BARRIER(); start = gasnett_ticks_now(); #ifdef LOCKS grt_lock_state_t state1, state2; #endif for (i = 0; i < MY_NUM_OPS; ++i) { unsigned idx = grt_random_next() * MY_NUM_OPS; grt_word_t key1 = keys[i]; unsigned second_idx = grt_random_next() * MY_NUM_OPS; grt_word_t key2 = keys[second_idx]; #ifdef LOCKS lock(key1, key2, &state1, &state2); #endif grt_word_t val1, val2; #ifndef LOCKS #ifndef NOLOCKS stm_start(grt_id); #endif #endif grt_bool_t found1 = hash_map_find(key1, &val1); grt_bool_t found2 = hash_map_find(key2, &val2); hash_map_insert(key1, val2); hash_map_insert(key2, val1); #ifndef LOCKS #ifndef NOLOCKS stm_commit(grt_id); #endif #endif #if LOCKS unlock(key1, key2); #endif } end = gasnett_ticks_now(); time = ((unsigned) gasnett_ticks_to_us(end - start)); printf("processor %u: execution time=%f us\n", grt_id, (double) time); fflush(stdout); grt_write(0, time, ×[grt_id]); BARRIER(); if (grt_id == 0) { time = 0, max_time = 0; for (i = 0; i < grt_num_procs; ++i) { gasnett_tick_t this_time = times[i]; time += this_time; if (this_time >= max_time) max_time = this_time; } time_per_op = ((float) time) / params[NUM_OPS]; printf("total CPU time=%f us\n", (double) time); printf("time per operation=%f us\n", time_per_op); printf("max time=%f us\n", (double) max_time); } BARRIER(); hash_map_destroy(); BARRIER(); }
list* search_a_star(void* state, void* state_world, search_is_goal state_goal_func, search_gen_successors state_gen_func, search_link_parent state_link_func, search_goal_backtrace state_back_func, search_trans_cost state_trans_func, search_heuristic state_heur_func, search_set_f_cost state_f_cost_set_func, hash_func state_hash_alg, generic_comp state_comp_func, generic_cpy state_copy_func, generic_op state_free_func, heap_comp state_heap_func) { int* g_cost_ptr, *f_cost_ptr, f_cost, tmp_f, g_cost, found; void* current_state, *successor_state, *heap_memory_location; list* states_overflow, *successor_list, *path; hash_table* states_closed_set, *states_open_set; hash_map* states_g_cost, *states_f_cost, *states_heap_index; heap* states_heap; states_overflow = list_create(NULL, NULL, state_free_func); states_closed_set = hash_table_create(89, .75, state_hash_alg, state_comp_func, state_copy_func, state_free_func); states_open_set = hash_table_create(89, .75, state_hash_alg, state_comp_func, state_copy_func, state_free_func); states_g_cost = hash_map_create(89, .75, state_hash_alg, state_comp_func, NULL, NULL, NULL, state_free_func, (generic_op)free); states_f_cost = hash_map_create(89, .75, state_hash_alg, state_comp_func, NULL, NULL, NULL, state_free_func, (generic_op)free); states_heap_index = hash_map_create(89, .75, state_hash_alg, state_comp_func, NULL, NULL, NULL, NULL, NULL); states_heap = heap_create(89, state_heap_func, state_comp_func, state_copy_func, state_free_func); current_state = state; f_cost = state_heur_func(current_state, NULL); state_f_cost_set_func(current_state, f_cost); g_cost = 0; g_cost_ptr = malloc(sizeof(int)); *g_cost_ptr = g_cost; f_cost_ptr = malloc(sizeof(int)); *f_cost_ptr = f_cost; hash_map_insert(states_g_cost, current_state, g_cost_ptr, 0); heap_memory_location = heap_add(states_heap, state_copy_func(current_state)); hash_table_insert(states_open_set, state_copy_func(current_state), 0); hash_map_insert(states_f_cost, state_copy_func(current_state), f_cost_ptr, 0); hash_map_insert(states_heap_index, current_state, heap_memory_location, 1); path = NULL; found = 0; while(!heap_is_empty(states_heap) && !found) { current_state = state_copy_func(heap_peek(states_heap)); heap_remove(states_heap); hash_table_remove(states_open_set, current_state); hash_map_remove(states_heap_index, current_state); if(state_goal_func(current_state, state_world)) { path = state_back_func(current_state); found = 1; } else { if(!hash_table_insert(states_closed_set, current_state, 0)) { list_push_front(states_overflow, current_state); } successor_list = state_gen_func(current_state, state_world); while(!list_is_empty(successor_list)) { successor_state = list_front(successor_list); g_cost = *(int*)hash_map_get(states_g_cost, current_state) + state_trans_func(current_state, successor_state, state_world); f_cost = g_cost + state_heur_func(successor_state, state_world); tmp_f = hash_map_contains_key(states_f_cost, successor_state) ? *(int*)hash_map_get(states_f_cost, successor_state) : UINT_MAX; if(hash_table_contains(states_closed_set, successor_state) && f_cost > tmp_f) { list_remove_front(successor_list); continue; } if(!hash_table_contains(states_open_set, successor_state) || f_cost < tmp_f) { state_f_cost_set_func(successor_state, f_cost); state_link_func(successor_state, current_state); g_cost_ptr = malloc(sizeof(int)); f_cost_ptr = malloc(sizeof(int)); *g_cost_ptr = g_cost; *f_cost_ptr = f_cost; if(!hash_table_contains(states_open_set, successor_state)) { hash_table_insert(states_open_set, successor_state, 0); heap_memory_location = heap_add(states_heap, state_copy_func(successor_state)); hash_map_insert(states_heap_index, successor_state, heap_memory_location, 1); } else { heap_memory_location = hash_map_get(states_heap_index, successor_state); heap_up_mod_data(states_heap, heap_memory_location, successor_state); } if(!hash_map_set(states_g_cost, successor_state, g_cost_ptr)) { hash_map_insert(states_g_cost, state_copy_func(successor_state), g_cost_ptr, 0); } if(!hash_map_set(states_f_cost, successor_state, f_cost_ptr)) { hash_map_insert(states_f_cost, state_copy_func(successor_state), f_cost_ptr, 0); } list_pop(successor_list); } else { list_remove_front(successor_list); } } list_kill(successor_list); } } heap_kill(states_heap); list_kill(states_overflow); hash_map_kill(states_g_cost); hash_map_kill(states_f_cost); hash_table_kill(states_open_set); hash_table_kill(states_closed_set); hash_map_dissolve(states_heap_index); return path; }