void erts_mtrace_init(char *receiver, char *nodename) { char hostname[MAXHOSTNAMELEN + 1]; char pid[21]; /* enough for a 64 bit number */ socket_desc = ERTS_SOCK_INVALID_SOCKET; erts_mtrace_enabled = receiver != NULL; if (erts_mtrace_enabled) { unsigned a, b, c, d, p; byte ip_addr[4]; Uint16 port; erts_mtx_init(&mtrace_buf_mutex, "mtrace_buf", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG); erts_mtx_init(&mtrace_op_mutex, "mtrace_op", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG); socket_desc = erts_sock_open(); if (socket_desc == ERTS_SOCK_INVALID_SOCKET) { disable_trace(1, "Failed to open socket", erts_sock_errno()); return; } if (5 != sscanf(receiver, "%u.%u.%u.%u:%u", &a, &b, &c, &d, &p) || a >= (1 << 8) || b >= (1 << 8)|| c >= (1 << 8) || d >= (1 << 8) || p >= (1 << 16)) { disable_trace(1, "Invalid receiver address", 0); return; } ip_addr[0] = (byte) a; ip_addr[1] = (byte) b; ip_addr[2] = (byte) c; ip_addr[3] = (byte) d; port = (Uint16) p; if (!erts_sock_connect(socket_desc, ip_addr, 4, port)) { disable_trace(1, "Failed to connect to receiver", erts_sock_errno()); return; } tracep = trace_buffer; endp = trace_buffer + TRACE_BUF_SZ; /* gethostname requires that the len is max(hostname) + 1 */ if (erts_sock_gethostname(hostname, MAXHOSTNAMELEN + 1) != 0) hostname[0] = '\0'; hostname[MAXHOSTNAMELEN] = '\0'; sys_get_pid(pid, sizeof(pid)); write_trace_header(nodename ? nodename : "", pid, hostname); erts_mtrace_update_heap_size(); } }
void erts_init_bif_persistent_term(void) { HashTable* hash_table; /* * Initialize the mutex protecting updates. */ erts_mtx_init(&update_table_permission_mtx, "update_persistent_term_permission", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); /* * Initialize delete queue. */ erts_mtx_init(&delete_queue_mtx, "persistent_term_delete_permission", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); /* * Allocate a small initial hash table. */ hash_table = create_initial_table(); erts_atomic_init_nob(&the_hash_table, (erts_aint_t)hash_table); /* * Initialize export entry for traps */ erts_init_trap_export(&persistent_term_get_all_export, am_persistent_term, am_get_all_trap, 2, &persistent_term_get_all_trap); erts_init_trap_export(&persistent_term_info_export, am_persistent_term, am_info_trap, 1, &persistent_term_info_trap); }
int init_async(int hndl) { erts_thr_opts_t thr_opts = ERTS_THR_OPTS_DEFAULT_INITER; AsyncQueue* q; int i; thr_opts.detached = 0; thr_opts.suggested_stack_size = erts_async_thread_suggested_stack_size; #ifndef ERTS_SMP callbacks = NULL; async_handle = hndl; erts_mtx_init(&async_ready_mtx, "async_ready"); async_ready_list = NULL; #endif async_id = 0; erts_smp_spinlock_init(&async_id_lock, "async_id"); async_q = q = (AsyncQueue*) (erts_async_max_threads ? erts_alloc(ERTS_ALC_T_ASYNC_Q, erts_async_max_threads * sizeof(AsyncQueue)) : NULL); for (i = 0; i < erts_async_max_threads; i++) { q->head = NULL; q->tail = NULL; q->len = 0; #ifndef ERTS_SMP q->hndl = hndl; #endif #ifdef ERTS_ENABLE_LOCK_CHECK q->no = i; #endif erts_mtx_init(&q->mtx, "asyncq"); erts_cnd_init(&q->cv); erts_thr_create(&q->thr, async_main, (void*)q, &thr_opts); q++; } return 0; }
void erts_sys_pre_init(void) { erts_printf_add_cr_to_stdout = 1; erts_printf_add_cr_to_stderr = 1; #ifdef USE_THREADS { erts_thr_init_data_t eid = ERTS_THR_INIT_DATA_DEF_INITER; eid.thread_create_child_func = thr_create_prepare_child; /* Before creation in parent */ eid.thread_create_prepare_func = thr_create_prepare; /* After creation in parent */ eid.thread_create_parent_func = thr_create_cleanup, erts_thr_init(&eid); report_exit_list = NULL; #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_init(); #endif #if defined(ERTS_SMP) erts_mtx_init(&chld_stat_mtx, "child_status"); #endif } #ifdef ERTS_SMP erts_smp_atomic32_init_nob(&erts_break_requested, 0); erts_smp_atomic32_init_nob(&have_prepared_crash_dump, 0); #else erts_break_requested = 0; have_prepared_crash_dump = 0; #endif #if !defined(ERTS_SMP) children_died = 0; #endif #endif /* USE_THREADS */ erts_printf_stdout_func = erts_sys_ramlog_printf; erts_smp_atomic_init_nob(&sys_misc_mem_sz, 0); }
void erts_init_proc_lock(int cpus) { int i; for (i = 0; i < ERTS_NO_OF_PIX_LOCKS; i++) { #ifdef ERTS_ENABLE_LOCK_COUNT erts_mtx_init_x(&erts_pix_locks[i].u.mtx, "pix_lock", make_small(i)); #else erts_mtx_init(&erts_pix_locks[i].u.mtx, "pix_lock"); #endif } #if ERTS_PROC_LOCK_OWN_IMPL erts_smp_spinlock_init(&qs_lock, "proc_lck_qs_alloc"); queue_free_list = NULL; erts_thr_install_exit_handler(cleanup_tse); if (cpus > 1) { proc_lock_spin_count = ERTS_PROC_LOCK_SPIN_COUNT_BASE; proc_lock_spin_count += (ERTS_PROC_LOCK_SPIN_COUNT_SCHED_INC * ((int) erts_no_schedulers)); aux_thr_proc_lock_spin_count = ERTS_PROC_LOCK_AUX_SPIN_COUNT; } else if (cpus == 1) { proc_lock_spin_count = 0; aux_thr_proc_lock_spin_count = 0; } else { /* No of cpus unknown. Assume multi proc, but be conservative. */ proc_lock_spin_count = ERTS_PROC_LOCK_SPIN_COUNT_BASE/2; aux_thr_proc_lock_spin_count = ERTS_PROC_LOCK_AUX_SPIN_COUNT/2; } if (proc_lock_spin_count > ERTS_PROC_LOCK_SPIN_COUNT_MAX) proc_lock_spin_count = ERTS_PROC_LOCK_SPIN_COUNT_MAX; #endif #ifdef ERTS_ENABLE_LOCK_CHECK lc_id.proc_lock_main = erts_lc_get_lock_order_id("proc_main"); lc_id.proc_lock_link = erts_lc_get_lock_order_id("proc_link"); lc_id.proc_lock_msgq = erts_lc_get_lock_order_id("proc_msgq"); lc_id.proc_lock_status = erts_lc_get_lock_order_id("proc_status"); #endif }
void erts_msacc_init_thread(char *type, int id, int managed) { ErtsMsAcc *msacc; msacc = erts_alloc(ERTS_ALC_T_MSACC, sizeof(ErtsMsAcc) + sizeof(ErtsMsAccPerfCntr) * ERTS_MSACC_STATE_COUNT); msacc->type = strdup(type); msacc->id = make_small(id); msacc->unmanaged = !managed; msacc->tid = erts_thr_self(); msacc->perf_counter = 0; #ifdef USE_THREADS erts_rwmtx_rwlock(&msacc_mutex); if (!managed) { erts_mtx_init(&msacc->mtx,"msacc_unmanaged_mutex"); msacc->next = msacc_unmanaged; msacc_unmanaged = msacc; msacc_unmanaged_count++; ERTS_MSACC_TSD_SET(msacc); } else { msacc->next = msacc_managed; msacc_managed = msacc; } erts_rwmtx_rwunlock(&msacc_mutex); #else msacc_managed = msacc; #endif erts_msacc_reset(msacc); #ifdef ERTS_MSACC_ALWAYS_ON ERTS_MSACC_TSD_SET(msacc); msacc->perf_counter = erts_sys_perf_counter(); msacc->state = ERTS_MSACC_STATE_OTHER; #endif }
void erl_drv_thr_init(void) { int i; #ifdef USE_THREADS int res = ethr_tsd_key_create(&tid_key,"erts_tid_key"); if (res == 0) res = ethr_install_exit_handler(thread_exit_handler); if (res != 0) fatal_error(res, "erl_drv_thr_init()"); #else tsd_len = 0; tsd = NULL; #endif no_name = "unknown"; next_tsd_key = 0; max_used_tsd_key = -1; used_tsd_keys_len = ERL_DRV_TSD_KEYS_INC; used_tsd_keys = erts_alloc(ERTS_ALC_T_DRV_TSD, sizeof(char *)*ERL_DRV_TSD_KEYS_INC); for (i = 0; i < ERL_DRV_TSD_KEYS_INC; i++) used_tsd_keys[i] = NULL; erts_mtx_init(&tsd_mtx, "drv_tsd"); }
Uint erts_instr_init(int stat, int map_stat) { Uint extra_sz; int i; am_tot = NULL; am_n = NULL; am_c = NULL; am_a = NULL; erts_instr_memory_map = 0; erts_instr_stat = 0; atoms_initialized = 0; if (!stat && !map_stat) return 0; stats = erts_alloc(ERTS_ALC_T_INSTR_INFO, sizeof(struct stats_)); erts_mtx_init(&instr_mutex, "instr"); mem_anchor = NULL; /* Install instrumentation functions */ ERTS_CT_ASSERT(sizeof(erts_allctrs) == sizeof(real_allctrs)); sys_memcpy((void *)real_allctrs,(void *)erts_allctrs,sizeof(erts_allctrs)); sys_memzero((void *) &stats->tot, sizeof(Stat_t)); sys_memzero((void *) stats->a, sizeof(Stat_t)*(ERTS_ALC_A_MAX+1)); sys_memzero((void *) stats->c, sizeof(Stat_t)*(ERTS_ALC_C_MAX+1)); sys_memzero((void *) stats->n, sizeof(Stat_t)*(ERTS_ALC_N_MAX+1)); for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) { if (erts_allctrs_info[i].enabled) stats->ap[i] = &stats->a[i]; else stats->ap[i] = &stats->a[ERTS_ALC_A_SYSTEM]; } if (map_stat) { erts_mtx_init(&instr_x_mutex, "instr_x"); erts_instr_memory_map = 1; erts_instr_stat = 1; for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) { erts_allctrs[i].alloc = map_stat_alloc; erts_allctrs[i].realloc = map_stat_realloc; erts_allctrs[i].free = map_stat_free; erts_allctrs[i].extra = (void *) &real_allctrs[i]; } instr_wrapper.lock = map_stat_pre_lock; instr_wrapper.unlock = map_stat_pre_unlock; extra_sz = MAP_STAT_BLOCK_HEADER_SIZE; } else { erts_instr_stat = 1; for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) { erts_allctrs[i].alloc = stat_alloc; erts_allctrs[i].realloc = stat_realloc; erts_allctrs[i].free = stat_free; erts_allctrs[i].extra = (void *) &real_allctrs[i]; } instr_wrapper.lock = stat_pre_lock; instr_wrapper.unlock = stat_pre_unlock; extra_sz = STAT_BLOCK_HEADER_SIZE; } erts_allctr_wrapper_prelock_init(&instr_wrapper); return extra_sz; }
static void poll_debug_init(void) { erts_mtx_init(&save_ops_mtx, "save_ops_lock"); }
void erts_init_async(void) { async = NULL; if (erts_async_max_threads > 0) { #if ERTS_USE_ASYNC_READY_Q ErtsThrQInit_t qinit = ERTS_THR_Q_INIT_DEFAULT; #endif erts_thr_opts_t thr_opts = ERTS_THR_OPTS_DEFAULT_INITER; char *ptr, thr_name[16]; size_t tot_size = 0; int i; tot_size += ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsAsyncData)); tot_size += sizeof(ErtsAlgndAsyncQ)*erts_async_max_threads; #if ERTS_USE_ASYNC_READY_Q tot_size += sizeof(ErtsAlgndAsyncReadyQ)*erts_no_schedulers; #endif ptr = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_ASYNC_DATA, tot_size); async = (ErtsAsyncData *) ptr; ptr += ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsAsyncData)); async->init.data.no_initialized = 0; erts_mtx_init(&async->init.data.mtx, "async_init_mtx", NIL, ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER); erts_cnd_init(&async->init.data.cnd); erts_atomic_init_nob(&async->init.data.id, 0); async->queue = (ErtsAlgndAsyncQ *) ptr; ptr += sizeof(ErtsAlgndAsyncQ)*erts_async_max_threads; #if ERTS_USE_ASYNC_READY_Q qinit.live.queue = ERTS_THR_Q_LIVE_LONG; qinit.live.objects = ERTS_THR_Q_LIVE_SHORT; qinit.notify = erts_notify_check_async_ready_queue; async->ready_queue = (ErtsAlgndAsyncReadyQ *) ptr; ptr += sizeof(ErtsAlgndAsyncReadyQ)*erts_no_schedulers; for (i = 1; i <= erts_no_schedulers; i++) { ErtsAsyncReadyQ *arq = async_ready_q(i); #if ERTS_USE_ASYNC_READY_ENQ_MTX erts_mtx_init(&arq->x.data.enq_mtx, "async_enq_mtx", make_small(i), ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER); #endif erts_thr_q_finalize_dequeue_state_init(&arq->fin_deq); qinit.arg = (void *) (SWord) i; erts_thr_q_initialize(&arq->thr_q, &qinit); } #endif /* Create async threads... */ thr_opts.detached = 0; thr_opts.suggested_stack_size = erts_async_thread_suggested_stack_size; thr_opts.name = thr_name; for (i = 0; i < erts_async_max_threads; i++) { ErtsAsyncQ *aq = async_q(i); erts_snprintf(thr_opts.name, 16, "async_%d", i+1); erts_thr_create(&aq->thr_id, async_main, (void*) aq, &thr_opts); } /* Wait for async threads to initialize... */ erts_mtx_lock(&async->init.data.mtx); while (async->init.data.no_initialized != erts_async_max_threads) erts_cnd_wait(&async->init.data.cnd, &async->init.data.mtx); erts_mtx_unlock(&async->init.data.mtx); erts_mtx_destroy(&async->init.data.mtx); erts_cnd_destroy(&async->init.data.cnd); } }
static void poll_debug_init(void) { erts_mtx_init(&save_ops_mtx, "save_ops_lock", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG); }