static void bp_hash_rehash(bp_time_hash_t *hash, Uint n) { bp_data_time_item_t *item = NULL; Uint size = sizeof(bp_data_time_item_t)*n; Uint ix; Uint hval; item = (bp_data_time_item_t *)Alloc(size); sys_memzero(item, size); for( ix = 0; ix < n; ++ix) { item[ix].pid = NIL; } /* rehash, old hash -> new hash */ for( ix = 0; ix < hash->n; ix++) { if (hash->item[ix].pid != NIL) { hval = ((hash->item[ix].pid) >> 4) % n; /* new n */ while (item[hval].pid != NIL) { hval = (hval + 1) % n; } item[hval].pid = hash->item[ix].pid; item[hval].count = hash->item[ix].count; item[hval].s_time = hash->item[ix].s_time; item[hval].us_time = hash->item[ix].us_time; } }
/* ** init a pre allocated or static hash structure ** and allocate buckets. */ Hash* hash_init(ErtsAlcType_t type, Hash* h, char* name, int size, HashFunctions fun) { int sz; int ix = 0; h->type = type; while (h_size_table[ix] != -1 && h_size_table[ix] < size) ix++; if (h_size_table[ix] == -1) erl_exit(1, "panic: too large hash table size (%d)\n", size); size = h_size_table[ix]; sz = size*sizeof(HashBucket*); h->bucket = (HashBucket**) erts_alloc(h->type, sz); sys_memzero(h->bucket, sz); h->is_allocated = 0; h->name = name; h->fun = fun; h->size = size; h->size20percent = h->size/5; h->size80percent = (4*h->size)/5; h->ix = ix; h->used = 0; return h; }
/* ** init a pre allocated or static hash structure ** and allocate buckets. */ Hash* hash_init(int type, Hash* h, char* name, int size, HashFunctions fun) { int sz; int ix = 0; h->meta_alloc_type = type; while (h_size_table[ix] != -1 && h_size_table[ix] < size) ix++; if (h_size_table[ix] == -1) return NULL; size = h_size_table[ix]; sz = size*sizeof(HashBucket*); h->bucket = (HashBucket**) fun.meta_alloc(h->meta_alloc_type, sz); sys_memzero(h->bucket, sz); h->is_allocated = 0; h->name = name; h->fun = fun; h->size = size; h->size_ix = ix; h->min_size_ix = ix; h->nobjs = 0; set_thresholds(h); return h; }
static lc_thread_t * create_thread_data(char *thread_name) { lc_thread_t *thr = malloc(sizeof(lc_thread_t)); if (!thr) ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!"); thr->thread_name = strdup(thread_name ? thread_name : "unknown"); if (!thr->thread_name) ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!"); thr->emu_thread = 0; thr->tid = erts_thr_self(); thr->required.first = NULL; thr->required.last = NULL; thr->locked.first = NULL; thr->locked.last = NULL; thr->prev = NULL; thr->free_blocks = NULL; thr->chunks = NULL; sys_memzero(&thr->matrix, sizeof(thr->matrix)); lc_lock_threads(); thr->next = lc_threads; if (lc_threads) lc_threads->prev = thr; lc_threads = thr; lc_unlock_threads(); erts_tsd_set(locks_key, (void *) thr); return thr; }
void erts_lcnt_proc_lock_init(Process *p) { if (erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK) { if (p->common.id != ERTS_INVALID_PID) { erts_lcnt_init_lock_x(&(p->lock.lcnt_main), "proc_main", ERTS_LCNT_LT_PROCLOCK, p->common.id); erts_lcnt_init_lock_x(&(p->lock.lcnt_msgq), "proc_msgq", ERTS_LCNT_LT_PROCLOCK, p->common.id); erts_lcnt_init_lock_x(&(p->lock.lcnt_link), "proc_link", ERTS_LCNT_LT_PROCLOCK, p->common.id); erts_lcnt_init_lock_x(&(p->lock.lcnt_status), "proc_status", ERTS_LCNT_LT_PROCLOCK, p->common.id); } else { erts_lcnt_init_lock(&(p->lock.lcnt_main), "proc_main", ERTS_LCNT_LT_PROCLOCK); erts_lcnt_init_lock(&(p->lock.lcnt_msgq), "proc_msgq", ERTS_LCNT_LT_PROCLOCK); erts_lcnt_init_lock(&(p->lock.lcnt_link), "proc_link", ERTS_LCNT_LT_PROCLOCK); erts_lcnt_init_lock(&(p->lock.lcnt_status), "proc_status", ERTS_LCNT_LT_PROCLOCK); } } else { sys_memzero(&(p->lock.lcnt_main), sizeof(p->lock.lcnt_main)); sys_memzero(&(p->lock.lcnt_msgq), sizeof(p->lock.lcnt_msgq)); sys_memzero(&(p->lock.lcnt_link), sizeof(p->lock.lcnt_link)); sys_memzero(&(p->lock.lcnt_status), sizeof(p->lock.lcnt_status)); } }
static void bp_hash_init(bp_time_hash_t *hash, Uint n) { Uint size = sizeof(bp_data_time_item_t)*n; Uint i; hash->n = n; hash->used = 0; hash->item = (bp_data_time_item_t *)Alloc(size); sys_memzero(hash->item, size); for(i = 0; i < n; ++i) { hash->item[i].pid = NIL; } }
/* ** Init a pre allocated or static hash structure ** and allocate buckets. NOT SAFE */ SafeHash* safe_hash_init(ErtsAlcType_t type, SafeHash* h, char* name, int size, SafeHashFunctions fun) { int i, bytes; size = align_up_pow2(size); bytes = size * sizeof(SafeHashBucket*); h->type = type; h->tab = (SafeHashBucket**) erts_alloc(h->type, bytes); sys_memzero(h->tab, bytes); h->name = name; h->fun = fun; set_size(h,size); erts_smp_atomic_init(&h->is_rehashing, 0); erts_smp_atomic_init(&h->nitems, 0); for (i=0; i<SAFE_HASH_LOCK_CNT; i++) { erts_smp_mtx_init(&h->lock_vec[i].mtx,"safe_hash"); } return h; }
/* ** Rehash all objects */ static void rehash(SafeHash* h, int grow_limit) { if (erts_smp_atomic_xchg(&h->is_rehashing, 1) != 0) { return; /* already in progress */ } if (h->grow_limit == grow_limit) { int i, size, bytes; SafeHashBucket** new_tab; SafeHashBucket** old_tab = h->tab; int old_size = h->size_mask + 1; size = old_size * 2; /* double table size */ bytes = size * sizeof(SafeHashBucket*); new_tab = (SafeHashBucket **) erts_alloc(h->type, bytes); sys_memzero(new_tab, bytes); for (i=0; i<SAFE_HASH_LOCK_CNT; i++) { /* stop all traffic */ erts_smp_mtx_lock(&h->lock_vec[i].mtx); } h->tab = new_tab; set_size(h, size); for (i = 0; i < old_size; i++) { SafeHashBucket* b = old_tab[i]; while (b != NULL) { SafeHashBucket* b_next = b->next; int ix = b->hvalue & h->size_mask; b->next = new_tab[ix]; new_tab[ix] = b; b = b_next; } } for (i=0; i<SAFE_HASH_LOCK_CNT; i++) { erts_smp_mtx_unlock(&h->lock_vec[i].mtx); } erts_free(h->type, (void *) old_tab); } /*else already done */ erts_smp_atomic_set(&h->is_rehashing, 0); }
/* ** Rehash all objects */ static void rehash(Hash* h, int grow) { int sz; int old_size = h->size; HashBucket** new_bucket; int i; if (grow) { if ((h_size_table[h->ix+1]) == -1) return; h->ix++; } else { if (h->ix == 0) return; h->ix--; } h->size = h_size_table[h->ix]; h->size20percent = h->size/5; h->size80percent = (4*h->size)/5; sz = h->size*sizeof(HashBucket*); new_bucket = (HashBucket **) erts_alloc(h->type, sz); sys_memzero(new_bucket, sz); h->used = 0; for (i = 0; i < old_size; i++) { HashBucket* b = h->bucket[i]; while (b != (HashBucket*) 0) { HashBucket* b_next = b->next; int ix = b->hvalue % h->size; if (new_bucket[ix] == NULL) h->used++; b->next = new_bucket[ix]; new_bucket[ix] = b; b = b_next; } } erts_free(h->type, (void *) h->bucket); h->bucket = new_bucket; }
/* ** Rehash all objects */ static void rehash(Hash* h, int grow) { int sz; int old_size = h->size; HashBucket** new_bucket; int i; if (grow) { if ((h_size_table[h->size_ix+1]) == -1) return; h->size_ix++; } else { if (h->size_ix == 0) return; h->size_ix--; } h->size = h_size_table[h->size_ix]; sz = h->size*sizeof(HashBucket*); new_bucket = (HashBucket **) h->fun.meta_alloc(h->meta_alloc_type, sz); sys_memzero(new_bucket, sz); for (i = 0; i < old_size; i++) { HashBucket* b = h->bucket[i]; while (b != (HashBucket*) 0) { HashBucket* b_next = b->next; int ix = b->hvalue % h->size; b->next = new_bucket[ix]; new_bucket[ix] = b; b = b_next; } } h->fun.meta_free(h->meta_alloc_type, (void *) h->bucket); h->bucket = new_bucket; set_thresholds(h); }
Uint erts_instr_init(int stat, int map_stat) { Uint extra_sz; int i; am_tot = NULL; am_n = NULL; am_c = NULL; am_a = NULL; erts_instr_memory_map = 0; erts_instr_stat = 0; atoms_initialized = 0; if (!stat && !map_stat) return 0; stats = erts_alloc(ERTS_ALC_T_INSTR_INFO, sizeof(struct stats_)); erts_mtx_init(&instr_mutex, "instr"); mem_anchor = NULL; /* Install instrumentation functions */ ERTS_CT_ASSERT(sizeof(erts_allctrs) == sizeof(real_allctrs)); sys_memcpy((void *)real_allctrs,(void *)erts_allctrs,sizeof(erts_allctrs)); sys_memzero((void *) &stats->tot, sizeof(Stat_t)); sys_memzero((void *) stats->a, sizeof(Stat_t)*(ERTS_ALC_A_MAX+1)); sys_memzero((void *) stats->c, sizeof(Stat_t)*(ERTS_ALC_C_MAX+1)); sys_memzero((void *) stats->n, sizeof(Stat_t)*(ERTS_ALC_N_MAX+1)); for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) { if (erts_allctrs_info[i].enabled) stats->ap[i] = &stats->a[i]; else stats->ap[i] = &stats->a[ERTS_ALC_A_SYSTEM]; } if (map_stat) { erts_mtx_init(&instr_x_mutex, "instr_x"); erts_instr_memory_map = 1; erts_instr_stat = 1; for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) { erts_allctrs[i].alloc = map_stat_alloc; erts_allctrs[i].realloc = map_stat_realloc; erts_allctrs[i].free = map_stat_free; erts_allctrs[i].extra = (void *) &real_allctrs[i]; } instr_wrapper.lock = map_stat_pre_lock; instr_wrapper.unlock = map_stat_pre_unlock; extra_sz = MAP_STAT_BLOCK_HEADER_SIZE; } else { erts_instr_stat = 1; for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) { erts_allctrs[i].alloc = stat_alloc; erts_allctrs[i].realloc = stat_realloc; erts_allctrs[i].free = stat_free; erts_allctrs[i].extra = (void *) &real_allctrs[i]; } instr_wrapper.lock = stat_pre_lock; instr_wrapper.unlock = stat_pre_unlock; extra_sz = STAT_BLOCK_HEADER_SIZE; } erts_allctr_wrapper_prelock_init(&instr_wrapper); return extra_sz; }