void erts_lock_node_tables_and_entries(void) { int i; for (i = 0; i < ERTS_NO_OF_DIST_ENTRY_MUTEXES; i++) erts_smp_mtx_lock(&dist_entry_mutexes[i]); erts_smp_mtx_lock(&erts_node_table_mtx); erts_smp_mtx_lock(&erts_dist_table_mtx); }
void erts_print_node_info(int to, void *to_arg, Eterm sysname, int *no_sysname, int *no_total) { int lock = !ERTS_IS_CRASH_DUMPING; struct pn_data pnd; pnd.to = to; pnd.to_arg = to_arg; pnd.sysname = sysname; pnd.no_sysname = 0; pnd.no_total = 0; if (lock) erts_smp_mtx_lock(&erts_node_table_mtx); hash_foreach(&erts_node_table, print_node, (void *) &pnd); if (pnd.no_sysname != 0) { erts_print(to, to_arg, "\n"); } if (lock) erts_smp_mtx_unlock(&erts_node_table_mtx); if(no_sysname) *no_sysname = pnd.no_sysname; if(no_total) *no_total = pnd.no_total; }
/* get a timestamp */ void get_now(Uint* megasec, Uint* sec, Uint* microsec) { SysTimeval now; erts_smp_mtx_lock(&erts_timeofday_mtx); get_tolerant_timeofday(&now); do_erts_deliver_time(&now); /* Make sure time is later than last */ if (then.tv_sec > now.tv_sec || (then.tv_sec == now.tv_sec && then.tv_usec >= now.tv_usec)) { now = then; now.tv_usec++; } /* Check for carry from above + general reasonability */ if (now.tv_usec >= 1000000) { now.tv_usec = 0; now.tv_sec++; } then = now; erts_smp_mtx_unlock(&erts_timeofday_mtx); *megasec = (Uint) (now.tv_sec / 1000000); *sec = (Uint) (now.tv_sec % 1000000); *microsec = (Uint) (now.tv_usec); }
void elapsed_time_both(UWord *ms_user, UWord *ms_sys, UWord *ms_user_diff, UWord *ms_sys_diff) { UWord prev_total_user, prev_total_sys; UWord total_user, total_sys; SysTimes now; sys_times(&now); total_user = (now.tms_utime * 1000) / SYS_CLK_TCK; total_sys = (now.tms_stime * 1000) / SYS_CLK_TCK; if (ms_user != NULL) *ms_user = total_user; if (ms_sys != NULL) *ms_sys = total_sys; erts_smp_mtx_lock(&erts_timeofday_mtx); prev_total_user = (t_start.tms_utime * 1000) / SYS_CLK_TCK; prev_total_sys = (t_start.tms_stime * 1000) / SYS_CLK_TCK; t_start = now; erts_smp_mtx_unlock(&erts_timeofday_mtx); if (ms_user_diff != NULL) *ms_user_diff = total_user - prev_total_user; if (ms_sys_diff != NULL) *ms_sys_diff = total_sys - prev_total_sys; }
/* ** Get info about hash */ void safe_hash_get_info(SafeHashInfo *hi, SafeHash *h) { int size; int i, lock_ix; int max_depth = 0; int objects = 0; for (lock_ix=0; lock_ix<SAFE_HASH_LOCK_CNT; lock_ix++) { erts_smp_mtx_lock(&h->lock_vec[lock_ix].mtx); size = h->size_mask + 1; for (i = lock_ix; i < size; i += SAFE_HASH_LOCK_CNT) { int depth = 0; SafeHashBucket* b = h->tab[i]; while (b != NULL) { objects++; depth++; b = b->next; } if (depth > max_depth) max_depth = depth; } erts_smp_mtx_unlock(&h->lock_vec[lock_ix].mtx); } hi->name = h->name; hi->size = size; hi->objs = objects; hi->depth = max_depth; }
/* * Calller _must_ yield if we return 0 */ int erts_try_seize_code_write_permission(Process* c_p) { int success; #ifdef ERTS_SMP ASSERT(!erts_smp_thr_progress_is_blocking()); /* to avoid deadlock */ #endif ASSERT(c_p != NULL); erts_smp_mtx_lock(&code_write_permission_mtx); success = (code_writing_process == NULL); if (success) { code_writing_process = c_p; #ifdef ERTS_ENABLE_LOCK_CHECK erts_tsd_set(has_code_write_permission, (void *) 1); #endif } else { /* Already locked */ struct code_write_queue_item* qitem; ASSERT(code_writing_process != c_p); qitem = erts_alloc(ERTS_ALC_T_CODE_IX_LOCK_Q, sizeof(*qitem)); qitem->p = c_p; erts_proc_inc_refc(c_p); qitem->next = code_write_queue; code_write_queue = qitem; erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL); } erts_smp_mtx_unlock(&code_write_permission_mtx); return success; }
Export * erts_suspend_process_on_pending_purge_lambda(Process *c_p) { erts_smp_mtx_lock(&purge_state.mtx); if (is_value(purge_state.module)) { /* * The process c_p is about to call a fun in the code * that we are trying to purge. Suspend it and call * erts_code_purger:pending_purge_lambda/3. The process * will be resumed when the purge completes or aborts, * and will then try to do the call again. */ if (purge_state.sp_ix >= purge_state.sp_size) { Eterm *sprocs; purge_state.sp_size += 100; sprocs = erts_alloc(ERTS_ALC_T_PURGE_DATA, (sizeof(ErlFunEntry *) * purge_state.sp_size)); sys_memcpy((void *) sprocs, (void *) purge_state.sprocs, purge_state.sp_ix*sizeof(ErlFunEntry *)); if (purge_state.sprocs != &purge_state.def_sprocs[0]) erts_free(ERTS_ALC_T_PURGE_DATA, purge_state.sprocs); purge_state.sprocs = sprocs; } purge_state.sprocs[purge_state.sp_ix++] = c_p->common.id; erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL); ERTS_VBUMP_ALL_REDS(c_p); } erts_smp_mtx_unlock(&purge_state.mtx); return purge_state.pending_purge_lambda; }
/* Returns the amount of time left in ms until the timer 'p' is triggered. 0 is returned if 'p' isn't active. 0 is returned also if the timer is overdue (i.e., would have triggered immediately if it hadn't been cancelled). */ Uint erts_time_left(ErlTimer *p) { Uint left; erts_short_time_t dt; erts_smp_mtx_lock(&tiw_lock); if (!p->active) { erts_smp_mtx_unlock(&tiw_lock); return 0; } if (p->slot < tiw_pos) left = (p->count + 1) * TIW_SIZE + p->slot - tiw_pos; else left = p->count * TIW_SIZE + p->slot - tiw_pos; dt = do_time_read(); if (left < dt) left = 0; else left -= dt; erts_smp_mtx_unlock(&tiw_lock); return (Uint) left * TIW_ITIME; }
void erts_p_slpq(void) { int i; ErlTimer* p; erts_smp_mtx_lock(&tiw_lock); /* print the whole wheel, starting at the current position */ erts_printf("\ntiw_pos = %d tiw_nto %d\n", tiw_pos, tiw_nto); i = tiw_pos; if (tiw[i] != NULL) { erts_printf("%d:\n", i); for(p = tiw[i]; p != NULL; p = p->next) { erts_printf(" (count %d, slot %d)\n", p->count, p->slot); } } for(i = (i+1)%TIW_SIZE; i != tiw_pos; i = (i+1)%TIW_SIZE) { if (tiw[i] != NULL) { erts_printf("%d:\n", i); for(p = tiw[i]; p != NULL; p = p->next) { erts_printf(" (count %d, slot %d)\n", p->count, p->slot); } } } erts_smp_mtx_unlock(&tiw_lock); }
void erts_cancel_timer(ErlTimer* p) { erts_smp_mtx_lock(&tiw_lock); if (!p->active) { /* allow repeated cancel (drivers) */ erts_smp_mtx_unlock(&tiw_lock); return; } /* is it the 'min' timer, remove min */ if (p == tiw_min_ptr) { tiw_min_ptr = NULL; tiw_min = 0; } remove_timer(p); p->slot = p->count = 0; if (p->cancel != NULL) { erts_smp_mtx_unlock(&tiw_lock); (*p->cancel)(p->arg); return; } erts_smp_mtx_unlock(&tiw_lock); }
/* ** Find or insert an object in the hash table */ void* safe_hash_put(SafeHash* h, void* tmpl) { int grow_limit; SafeHashValue hval = h->fun.hash(tmpl); SafeHashBucket* b; SafeHashBucket** head; erts_smp_mtx_t* lock = &h->lock_vec[hval % SAFE_HASH_LOCK_CNT].mtx; erts_smp_mtx_lock(lock); head = &h->tab[hval & h->size_mask]; b = *head; while(b != NULL) { if ((b->hvalue == hval) && (h->fun.cmp(tmpl, (void*)b) == 0)) { erts_smp_mtx_unlock(lock); return b; } b = b->next; } b = (SafeHashBucket*) h->fun.alloc(tmpl); b->hvalue = hval; b->next = *head; *head = b; grow_limit = h->grow_limit; erts_smp_mtx_unlock(lock); if (erts_smp_atomic_inctest(&h->nitems) > grow_limit) { rehash(h, grow_limit); } return (void*) b; }
void erts_deliver_time(void) { SysTimeval now; erts_smp_mtx_lock(&erts_timeofday_mtx); get_tolerant_timeofday(&now); do_erts_deliver_time(&now); erts_smp_mtx_unlock(&erts_timeofday_mtx); }
/* Private export to erl_time_sup.c */ erts_short_time_t erts_next_time(void) { erts_short_time_t ret; erts_smp_mtx_lock(&tiw_lock); (void)do_time_update(); ret = next_time_internal(); erts_smp_mtx_unlock(&tiw_lock); return ret; }
/* ** Returns size of table in bytes. Stored objects not included. **/ int safe_hash_table_sz(SafeHash *h) { int i, size; for(i=0; h->name[i]; i++); i++; erts_smp_mtx_lock(&h->lock_vec[0].mtx); /* any lock will do to read size */ size = h->size_mask + 1; erts_smp_mtx_unlock(&h->lock_vec[0].mtx); return sizeof(SafeHash) + size*sizeof(SafeHashBucket*) + i; }
void erts_node_table_info(int to, void *to_arg) { int lock = !ERTS_IS_CRASH_DUMPING; if (lock) erts_smp_mtx_lock(&erts_node_table_mtx); hash_info(to, to_arg, &erts_node_table); if (lock) erts_smp_mtx_unlock(&erts_node_table_mtx); }
void erts_set_this_node(Eterm sysname, Uint creation) { erts_smp_mtx_lock(&erts_node_table_mtx); erts_smp_mtx_lock(&erts_dist_table_mtx); (void) hash_erase(&erts_dist_table, (void *) erts_this_dist_entry); erts_this_dist_entry->sysname = sysname; erts_this_dist_entry->creation = creation; (void) hash_put(&erts_dist_table, (void *) erts_this_dist_entry); (void) hash_erase(&erts_node_table, (void *) erts_this_node); erts_this_node->sysname = sysname; erts_this_node->creation = creation; (void) hash_put(&erts_node_table, (void *) erts_this_node); erts_smp_mtx_unlock(&erts_dist_table_mtx); erts_smp_mtx_unlock(&erts_node_table_mtx); }
void erts_set_dist_entry_not_connected(DistEntry *dep) { ERTS_SMP_LC_ASSERT(erts_lc_is_dist_entry_locked(dep)); erts_smp_mtx_lock(&erts_dist_table_mtx); ASSERT(dep != erts_this_dist_entry); ASSERT(is_internal_port(dep->cid)); if(dep->flags & DFLAG_PUBLISHED) { if(dep->prev) { ASSERT(is_in_de_list(dep, erts_visible_dist_entries)); dep->prev->next = dep->next; } else { ASSERT(erts_visible_dist_entries == dep); erts_visible_dist_entries = dep->next; } ASSERT(erts_no_of_visible_dist_entries > 0); erts_no_of_visible_dist_entries--; } else { if(dep->prev) { ASSERT(is_in_de_list(dep, erts_hidden_dist_entries)); dep->prev->next = dep->next; } else { ASSERT(erts_hidden_dist_entries == dep); erts_hidden_dist_entries = dep->next; } ASSERT(erts_no_of_hidden_dist_entries > 0); erts_no_of_hidden_dist_entries--; } if(dep->next) dep->next->prev = dep->prev; dep->status &= ~ERTS_DE_SFLG_CONNECTED; dep->flags = 0; dep->prev = NULL; dep->cid = NIL; dep->next = erts_not_connected_dist_entries; if(erts_not_connected_dist_entries) { ASSERT(erts_not_connected_dist_entries->prev == NULL); erts_not_connected_dist_entries->prev = dep; } erts_not_connected_dist_entries = dep; erts_no_of_not_connected_dist_entries++; erts_smp_mtx_unlock(&erts_dist_table_mtx); }
static ERTS_INLINE Uint32 acquire_bp_sched_ix(Process *c_p) { ErtsSchedulerData *esdp = erts_proc_sched_data(c_p); ASSERT(esdp); #ifdef ERTS_DIRTY_SCHEDULERS if (ERTS_SCHEDULER_IS_DIRTY(esdp)) { erts_smp_mtx_lock(&erts_dirty_bp_ix_mtx); return (Uint32) erts_no_schedulers; } #endif return (Uint32) esdp->no - 1; }
erts_time_t erts_get_time(void) { SysTimeval sys_tv; erts_smp_mtx_lock(&erts_timeofday_mtx); get_tolerant_timeofday(&sys_tv); erts_smp_mtx_unlock(&erts_timeofday_mtx); return sys_tv.tv_sec; }
DistEntry *erts_find_or_insert_dist_entry(Eterm sysname) { DistEntry *res; DistEntry de; long refc; de.sysname = sysname; erts_smp_mtx_lock(&erts_dist_table_mtx); res = hash_put(&erts_dist_table, (void *) &de); refc = erts_refc_inctest(&res->refc, 0); if (refc < 2) /* New or pending delete */ erts_refc_inc(&res->refc, 1); erts_smp_mtx_unlock(&erts_dist_table_mtx); return res; }
static void finalize_purge_operation(Process *c_p, int succeded) { Uint ix; if (c_p) erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); erts_smp_mtx_lock(&purge_state.mtx); ASSERT(purge_state.module != THE_NON_VALUE); purge_state.module = THE_NON_VALUE; /* * Resume all processes that have tried to call * funs in this code. */ for (ix = 0; ix < purge_state.sp_ix; ix++) { Process *rp = erts_pid2proc(NULL, 0, purge_state.sprocs[ix], ERTS_PROC_LOCK_STATUS); if (rp) { erts_resume(rp, ERTS_PROC_LOCK_STATUS); erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); } } erts_smp_mtx_unlock(&purge_state.mtx); if (c_p) erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); if (purge_state.sprocs != &purge_state.def_sprocs[0]) { erts_free(ERTS_ALC_T_PURGE_DATA, purge_state.sprocs); purge_state.sprocs = &purge_state.def_sprocs[0]; purge_state.sp_size = sizeof(purge_state.def_sprocs); purge_state.sp_size /= sizeof(purge_state.def_sprocs[0]); } purge_state.sp_ix = 0; if (purge_state.funs != &purge_state.def_funs[0]) { erts_free(ERTS_ALC_T_PURGE_DATA, purge_state.funs); purge_state.funs = &purge_state.def_funs[0]; purge_state.fe_size = sizeof(purge_state.def_funs); purge_state.fe_size /= sizeof(purge_state.def_funs[0]); } purge_state.fe_ix = 0; }
void get_sys_now(Uint* megasec, Uint* sec, Uint* microsec) { SysTimeval now; erts_smp_mtx_lock(&erts_timeofday_mtx); sys_gettimeofday(&now); erts_smp_mtx_unlock(&erts_timeofday_mtx); *megasec = (Uint) (now.tv_sec / 1000000); *sec = (Uint) (now.tv_sec % 1000000); *microsec = (Uint) (now.tv_usec); }
DistEntry * erts_sysname_to_connected_dist_entry(Eterm sysname) { DistEntry de; DistEntry *res_dep; de.sysname = sysname; if(erts_this_dist_entry->sysname == sysname) { erts_refc_inc(&erts_this_dist_entry->refc, 2); return erts_this_dist_entry; } erts_smp_mtx_lock(&erts_dist_table_mtx); res_dep = (DistEntry *) hash_get(&erts_dist_table, (void *) &de); if (res_dep) { long refc = erts_refc_inctest(&res_dep->refc, 1); if (refc < 2) /* Pending delete */ erts_refc_inc(&res_dep->refc, 1); } erts_smp_mtx_unlock(&erts_dist_table_mtx); if (res_dep) { erts_smp_mtx_t *mtxp; #ifdef ERTS_SMP mtxp = res_dep->mtxp; #else mtxp = NULL; #endif erts_smp_mtx_lock(mtxp); if (is_nil(res_dep->cid)) { erts_deref_dist_entry(res_dep); res_dep = NULL; } erts_smp_mtx_unlock(mtxp); } return res_dep; }
DistEntry *erts_find_dist_entry(Eterm sysname) { DistEntry *res; DistEntry de; de.sysname = sysname; erts_smp_mtx_lock(&erts_dist_table_mtx); res = hash_get(&erts_dist_table, (void *) &de); if (res) { long refc = erts_refc_inctest(&res->refc, 1); if (refc < 2) /* Pending delete */ erts_refc_inc(&res->refc, 1); } erts_smp_mtx_unlock(&erts_dist_table_mtx); return res; }
static ErtsMonotonicTime clock_gettime_monotonic_verified(void) { ErtsMonotonicTime mtime; mtime = (ErtsMonotonicTime) posix_clock_gettime(MONOTONIC_CLOCK_ID, MONOTONIC_CLOCK_ID_STR); erts_smp_mtx_lock(&internal_state.w.f.mtx); if (mtime < internal_state.w.f.last_delivered) mtime = internal_state.w.f.last_delivered; else internal_state.w.f.last_delivered = mtime; erts_smp_mtx_unlock(&internal_state.w.f.mtx); return mtime; }
/* ** Find an object in the hash table */ void* safe_hash_get(SafeHash* h, void* tmpl) { SafeHashValue hval = h->fun.hash(tmpl); SafeHashBucket* b; erts_smp_mtx_t* lock = &h->lock_vec[hval % SAFE_HASH_LOCK_CNT].mtx; erts_smp_mtx_lock(lock); b = h->tab[hval & h->size_mask]; while(b != NULL) { if ((b->hvalue == hval) && (h->fun.cmp(tmpl, (void*)b) == 0)) break; b = b->next; } erts_smp_mtx_unlock(lock); return (void*) b; }
void erts_release_code_write_permission(void) { erts_smp_mtx_lock(&the_code_ix_queue_lock); while (the_code_ix_queue != NULL) { /* unleash the entire herd */ struct code_ix_queue_item* qitem = the_code_ix_queue; erts_smp_proc_lock(qitem->p, ERTS_PROC_LOCK_STATUS); if (!ERTS_PROC_IS_EXITING(qitem->p)) { erts_resume(qitem->p, ERTS_PROC_LOCK_STATUS); } erts_smp_proc_unlock(qitem->p, ERTS_PROC_LOCK_STATUS); the_code_ix_queue = qitem->next; erts_smp_proc_dec_refc(qitem->p); erts_free(ERTS_ALC_T_CODE_IX_LOCK_Q, qitem); } the_code_ix_lock = 0; erts_smp_mtx_unlock(&the_code_ix_queue_lock); }
void erts_delete_dist_entry(DistEntry *dep) { ASSERT(dep != erts_this_dist_entry); if(dep != erts_this_dist_entry) { erts_smp_mtx_lock(&erts_dist_table_mtx); /* * Another thread might have looked up this dist entry after * we decided to delete it (refc became zero). If so, the other * thread incremented refc twice. Once for the new reference * and once for this thread. Therefore, delete dist entry if * refc is 0 or -1 after a decrement. */ if (erts_refc_dectest(&dep->refc, -1) <= 0) (void) hash_erase(&erts_dist_table, (void *) dep); erts_smp_mtx_unlock(&erts_dist_table_mtx); } }
void erts_delete_node(ErlNode *enp) { ASSERT(enp != erts_this_node); if(enp != erts_this_node) { erts_smp_mtx_lock(&erts_node_table_mtx); /* * Another thread might have looked up this node after we * decided to delete it (refc became zero). If so, the other * thread incremented refc twice. Once for the new reference * and once for this thread. Therefore, delete node if refc * is 0 or -1 after a decrement. */ if (erts_refc_dectest(&enp->refc, -1) <= 0) (void) hash_erase(&erts_node_table, (void *) enp); erts_smp_mtx_unlock(&erts_node_table_mtx); } }
ErlNode *erts_find_or_insert_node(Eterm sysname, Uint creation) { ErlNode *res; ErlNode ne; ne.sysname = sysname; ne.creation = creation; erts_smp_mtx_lock(&erts_node_table_mtx); res = hash_put(&erts_node_table, (void *) &ne); ASSERT(res); if (res != erts_this_node) { long refc = erts_refc_inctest(&res->refc, 0); if (refc < 2) /* New or pending delete */ erts_refc_inc(&res->refc, 1); } erts_smp_mtx_unlock(&erts_node_table_mtx); return res; }