boolean task_processor_initialize( task_processor* processor ) { memory_pool_initialize( &processor->mempool, 512, 4 ); if ( !hash_map_initialize( &processor->task_queues, sizeof(event_group_element_type), sizeof(task_queue*), &processor->mempool ) ) { memory_pool_destroy( &processor->mempool ); return FALSE; } processor->event_group = synchronize_create_event_group(); if ( NULL == processor->event_group ) { hash_map_destroy( &processor->task_queues ); memory_pool_destroy( &processor->mempool ); return FALSE; } processor->mutex = synchronize_create_mutex(); if ( NULL == processor->mutex ) { synchronize_destroy_event_group( processor->event_group ); hash_map_destroy( &processor->task_queues ); memory_pool_destroy( &processor->mempool ); return FALSE; } return TRUE; }
static void kernel_patchgroup_scopes_shutdown(void * ignore) { /* check return value? */ fstitch_unregister_module(&ops); hash_map_destroy(scope_map); scope_map = NULL; }
static int unmount_root(void) { int r; if (!root->mounted) return -EINVAL; mounts_remove(root); if (root->session) fuse_session_destroy(root->session); // also destroys root->channel if (root->channel_fd >= 0) (void) close(root->channel_fd); // only use fuse_unmount if there are no nested mounts if (nmounts == 0) fuse_unmount(root->mountpoint); fuse_opt_free_args(&root->args); free(root->mountpoint); free(root->fstitch_path); hash_map_destroy(root->parents); memset(root, 0, sizeof(*root)); free(root); root = NULL; if ((r = helper_shutdown()) < 0) fprintf(stderr, "%s(): helper_shutdown() failed (%d), continuing anyway\n", __FUNCTION__, r); destroy_locals(); return 0; }
void release_var(var_t v){ switch(v->tt){ case VAR_8: case VAR_16: case VAR_32: case VAR_64: case VAR_DOUBLE: { break; } case VAR_STR:{ kn_release_string(v->s); v->s = NULL; break; } case VAR_TABLE:{ hash_map_destroy(v->t,(hash_destroy)release_var); v->t = NULL; break; } default:{ return; } } free(v); }
int fuse_serve_mount_start_shutdown(void) { char b = 1; int i = 0; bool failed_found; Dprintf("%s()\n", __FUNCTION__); if (shutdown_has_started()) return -1; helper.shutdown_started = 1; // NOTE: we can probably update this and helper_thread's code // so that calling this function shortly after an add or remove is // safe. while (helper.alive) { if (++i > 4*MAX_START_SHUTDOWN_WAIT) { fprintf(stderr, "%s(): Mounts or unmounts still in progress. Good luck with the shutdown!\n", __FUNCTION__); break; } jsleep(HZ / 4); } // Purge failed mounts do { mount_t ** mp; failed_found = 0; for (mp = mounts; mp && *mp; mp++) if (!(*mp)->mounted) { mount_t * m = *mp; failed_found = 1; mounts_remove(m); free(m->fstitch_path); fuse_opt_free_args(&m->args); free(m->mountpoint); hash_map_destroy(m->parents); memset(m, 0, sizeof(*m)); free(m); break; } } while (failed_found); // If only root is mounted unmount it and return shutdown if (nmounts == 1) return unmount_root(); // Start the calling of fuse_serve_mount_step_shutdown() if (write(unmount_pipe[1], &b, 1) != 1) { perror("fuse_serve_mount_start_shutdown(): write"); helper.shutdown_started = 0; return -1; } return 0; }
void task_processor_destroy( task_processor* processor ) { task_processor_clear_task_queue( processor ); synchronize_destroy_mutex( processor->mutex ); synchronize_destroy_event_group( processor->event_group ); hash_map_destroy( &processor->task_queues ); memory_pool_destroy( &processor->mempool ); }
// Do an unmount for helper_thread() static void helper_thread_unmount(mount_t * m) { Dprintf("%s(\"%s\")\n", __FUNCTION__, m->fstitch_path); fuse_unmount(m->mountpoint); free(m->mountpoint); free(m->fstitch_path); hash_map_destroy(m->parents); memset(m, 0, sizeof(*m)); free(m); }
void clear_tls() { if(is_init) { if(COMPARE_AND_SWAP(&is_init,1,0) == 1) { pthread_key_delete(thread_key); mutex_lock(tls_mtx); list_iter it = list_begin(tls_list); list_iter end = list_end(tls_list); for( ; !IT_LIST_EQUAL(it,end); IT_LIST_NEXT(it)) { hash_map_t h = IT_LIST_GET(hash_map_t,it); hash_map_destroy(&h); } mutex_unlock(tls_mtx); mutex_destroy(&tls_mtx); list_destroy(&tls_list); } } }
static int crashsim_bd_destroy(BD_t * bd) { struct crashsim_info * info = (struct crashsim_info *) bd; int r = modman_rem_bd(bd); hash_map_it2_t it; if(r < 0) return r; modman_dec_bd(info->bd, bd); it = hash_map_it2_create(info->blocks); while(hash_map_it2_next(&it)) { bdesc_t * block = (bdesc_t *) it.val; bdesc_release(&block); } hash_map_destroy(info->blocks); printf("Crash simulator absorbed %u/%u block writes\n", info->absorbed, info->total); memset(info, 0, sizeof(*info)); free(info); return 0; }
static int wb_cache_bd_destroy(BD_t * bd) { struct cache_info * info = (struct cache_info *) bd; uint32_t block; int r; if(wb_cache_dirty_count(bd) != 0) { r = CALL(bd, flush, FLUSH_DEVICE, NULL); if(r < 0) return -EBUSY; } assert(!wb_cache_dirty_count(bd)); r = modman_rem_bd(bd); if(r < 0) return r; modman_dec_bd(info->bd, bd); sched_unregister(wb_cache_bd_callback, bd); hash_map_destroy(info->block_map); /* the blocks are all clean, because we checked above - just release them */ for(block = 1; block <= info->size; block++) if(info->blocks[block].block) bdesc_release(&info->blocks[block].block); sfree(info->blocks, (info->size + 1) * sizeof(*info->blocks)); memset(info, 0, sizeof(*info)); free(info); TIMING_DUMP(wait, "wb_cache wait", "waits"); return 0; }
int fuse_serve_mount_add(CFS_t * cfs, const char * path) { mount_t * m; queue_entry_t * qe; int r; Dprintf("%s(%s, \"%s\")\n", __FUNCTION__, modman_name_cfs(cfs), path); if (shutdown_has_started()) return -EBUSY; // We might be able to allow this; but at least for now, reject if (!(m = calloc(1, sizeof(*m)))) return -ENOMEM; if (!(qe = calloc(1, sizeof(*qe)))) { r = -ENOMEM; goto error_m; } qe->mount = m; qe->action = QEMOUNT; m->mounted = 0; if (!(m->fstitch_path = strdup(path))) { r = -ENOMEM; goto error_qe; } if (!(m->parents = hash_map_create())) { r = -ENOMEM; goto error_path; } m->cfs = cfs; if ((r = CALL(cfs, get_root, &m->root_ino)) < 0) goto error_parents; if ((r = hash_map_insert(m->parents, (void *) m->root_ino, (void *) m->root_ino)) < 0) goto error_parents; if ((r = fuse_args_copy(&root->args, &m->args)) < 0) goto error_parents; m->mountpoint = malloc(strlen(root->mountpoint) + strlen(path) + 1); if (!m->mountpoint) { r = -ENOMEM; goto error_args; } strcpy(m->mountpoint, root->mountpoint); strcpy(m->mountpoint + strlen(root->mountpoint), path); // add to mounts list mounts_insert(m); // helper_thread takes care of the channel_fd field and on down if (enqueue_helper_request(qe)) goto error_insert; if (ensure_helper_is_running() < 0) { // As it is not expected that ensure_helper_is_running() will error // and as recovering would require a single-use dequeue function, // for now we just error and let things go as they will. fprintf(stderr, "%s: ensure_helper_is_running failed. WARNING: request remains in the queue.\n", __FUNCTION__); goto error_insert; } return 0; error_insert: mounts_remove(m); free(m->mountpoint); error_args: fuse_opt_free_args(&m->args); error_parents: hash_map_destroy(m->parents); error_path: free(m->fstitch_path); error_qe: memset(qe, 0, sizeof(*qe)); free(qe); error_m: memset(m, 0, sizeof(*m)); free(m); return r; }
void run() { unsigned i, time; gasnett_tick_t start, end; hash_map_create(params[HASHMAP_SIZE], (grt_bool_t) params[ON_PTHREAD]); grt_barrier(); #ifdef LOCKS grt_lock_state_t state; #endif for (i = 0; i < MY_NUM_OPS; ++i) { grt_word_t key = keys[i], val = values[i]; #ifdef LOCKS hash_t hash = compute_hash(key); hash_map_lock(hash.proc, hash.offset, WRITE, &state); #endif hash_map_insert(key, val); #ifdef LOCKS hash_map_unlock(hash.proc, hash.offset); #endif } BARRIER(); start = gasnett_ticks_now(); #ifdef LOCKS grt_lock_state_t state1, state2; #endif for (i = 0; i < MY_NUM_OPS; ++i) { unsigned idx = grt_random_next() * MY_NUM_OPS; grt_word_t key1 = keys[i]; unsigned second_idx = grt_random_next() * MY_NUM_OPS; grt_word_t key2 = keys[second_idx]; #ifdef LOCKS lock(key1, key2, &state1, &state2); #endif grt_word_t val1, val2; #ifndef LOCKS #ifndef NOLOCKS stm_start(grt_id); #endif #endif grt_bool_t found1 = hash_map_find(key1, &val1); grt_bool_t found2 = hash_map_find(key2, &val2); hash_map_insert(key1, val2); hash_map_insert(key2, val1); #ifndef LOCKS #ifndef NOLOCKS stm_commit(grt_id); #endif #endif #if LOCKS unlock(key1, key2); #endif } end = gasnett_ticks_now(); time = ((unsigned) gasnett_ticks_to_us(end - start)); printf("processor %u: execution time=%f us\n", grt_id, (double) time); fflush(stdout); grt_write(0, time, ×[grt_id]); BARRIER(); if (grt_id == 0) { time = 0, max_time = 0; for (i = 0; i < grt_num_procs; ++i) { gasnett_tick_t this_time = times[i]; time += this_time; if (this_time >= max_time) max_time = this_time; } time_per_op = ((float) time) / params[NUM_OPS]; printf("total CPU time=%f us\n", (double) time); printf("time per operation=%f us\n", time_per_op); printf("max time=%f us\n", (double) max_time); } BARRIER(); hash_map_destroy(); BARRIER(); }
static inline int __container_hash_map_destroy(container_t *ct) { return hash_map_destroy(ct->priv.hmap); }
void explore_node_map_destroy( explore_node_map* map ) { hash_map_destroy( &map->nodes ); object_pool_destroy( &map->node_pool ); }