void erts_proc_lock_init(Process *p) { /* We always start with all locks locked */ #if ERTS_PROC_LOCK_ATOMIC_IMPL erts_smp_atomic32_init_nob(&p->lock.flags, (erts_aint32_t) ERTS_PROC_LOCKS_ALL); #else p->lock.flags = ERTS_PROC_LOCKS_ALL; #endif p->lock.queues = NULL; p->lock.refc = 1; #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_proc_lock_init(p); erts_lcnt_proc_lock(&(p->lock), ERTS_PROC_LOCKS_ALL); erts_lcnt_proc_lock_post_x(&(p->lock), ERTS_PROC_LOCKS_ALL, __FILE__, __LINE__); #endif #ifdef ERTS_ENABLE_LOCK_CHECK erts_proc_lc_trylock(p, ERTS_PROC_LOCKS_ALL, 1); #endif #ifdef ERTS_PROC_LOCK_DEBUG { int i; for (i = 0; i <= ERTS_PROC_LOCK_MAX_BIT; i++) erts_smp_atomic32_init_nob(&p->lock.locked[i], (erts_aint32_t) 1); } #endif }
void erts_bp_init(void) { erts_smp_atomic32_init_nob(&erts_active_bp_index, 0); erts_smp_atomic32_init_nob(&erts_staging_bp_index, 1); #ifdef ERTS_DIRTY_SCHEDULERS erts_smp_mtx_init(&erts_dirty_bp_ix_mtx, "dirty_break_point_index", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG); #endif }
void erts_code_ix_init(void) { /* We start emulator by initializing preloaded modules * single threaded with active and staging set both to zero. * Preloading is finished by a commit that will set things straight. */ erts_smp_atomic32_init_nob(&the_active_code_index, 0); erts_smp_atomic32_init_nob(&the_staging_code_index, 0); erts_smp_mtx_init(&the_code_ix_queue_lock, "code_ix_queue"); CIX_TRACE("init"); }
void erts_proc_lock_init(Process *p) { #if ERTS_PROC_LOCK_OWN_IMPL /* We always start with all locks locked */ #if ERTS_PROC_LOCK_ATOMIC_IMPL erts_smp_atomic32_init_nob(&p->lock.flags, (erts_aint32_t) ERTS_PROC_LOCKS_ALL); #else p->lock.flags = ERTS_PROC_LOCKS_ALL; #endif p->lock.queues = NULL; #ifdef ERTS_ENABLE_LOCK_CHECK erts_proc_lc_trylock(p, ERTS_PROC_LOCKS_ALL, 1); #endif #elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL erts_mtx_init_x(&p->lock.main, "proc_main", p->id); ethr_mutex_lock(&p->lock.main.mtx); #ifdef ERTS_ENABLE_LOCK_CHECK erts_lc_trylock(1, &p->lock.main.lc); #endif erts_mtx_init_x(&p->lock.link, "proc_link", p->id); ethr_mutex_lock(&p->lock.link.mtx); #ifdef ERTS_ENABLE_LOCK_CHECK erts_lc_trylock(1, &p->lock.link.lc); #endif erts_mtx_init_x(&p->lock.msgq, "proc_msgq", p->id); ethr_mutex_lock(&p->lock.msgq.mtx); #ifdef ERTS_ENABLE_LOCK_CHECK erts_lc_trylock(1, &p->lock.msgq.lc); #endif erts_mtx_init_x(&p->lock.status, "proc_status", p->id); ethr_mutex_lock(&p->lock.status.mtx); #ifdef ERTS_ENABLE_LOCK_CHECK erts_lc_trylock(1, &p->lock.status.lc); #endif #endif erts_atomic32_init_nob(&p->lock.refc, 1); #ifdef ERTS_PROC_LOCK_DEBUG { int i; for (i = 0; i <= ERTS_PROC_LOCK_MAX_BIT; i++) erts_smp_atomic32_init_nob(&p->lock.locked[i], (erts_aint32_t) 1); } #endif #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_proc_lock_init(p); erts_lcnt_proc_lock(&(p->lock), ERTS_PROC_LOCKS_ALL); erts_lcnt_proc_lock_post_x(&(p->lock), ERTS_PROC_LOCKS_ALL, __FILE__, __LINE__); #endif }
void erts_code_ix_init(void) { /* We start emulator by initializing preloaded modules * single threaded with active and staging set both to zero. * Preloading is finished by a commit that will set things straight. */ erts_smp_atomic32_init_nob(&the_active_code_index, 0); erts_smp_atomic32_init_nob(&the_staging_code_index, 0); erts_smp_mtx_init(&code_write_permission_mtx, "code_write_permission"); #ifdef ERTS_ENABLE_LOCK_CHECK erts_tsd_key_create(&has_code_write_permission, "erts_has_code_write_permission"); #endif CIX_TRACE("init"); }
void erts_sys_pre_init(void) { erts_printf_add_cr_to_stdout = 1; erts_printf_add_cr_to_stderr = 1; #ifdef USE_THREADS { erts_thr_init_data_t eid = ERTS_THR_INIT_DATA_DEF_INITER; eid.thread_create_child_func = thr_create_prepare_child; /* Before creation in parent */ eid.thread_create_prepare_func = thr_create_prepare; /* After creation in parent */ eid.thread_create_parent_func = thr_create_cleanup, erts_thr_init(&eid); report_exit_list = NULL; #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_init(); #endif #if defined(ERTS_SMP) erts_mtx_init(&chld_stat_mtx, "child_status"); #endif } #ifdef ERTS_SMP erts_smp_atomic32_init_nob(&erts_break_requested, 0); erts_smp_atomic32_init_nob(&have_prepared_crash_dump, 0); #else erts_break_requested = 0; have_prepared_crash_dump = 0; #endif #if !defined(ERTS_SMP) children_died = 0; #endif #endif /* USE_THREADS */ erts_printf_stdout_func = erts_sys_ramlog_printf; erts_smp_atomic_init_nob(&sys_misc_mem_sz, 0); }
static int early_init(int *argc, char **argv) /* * Only put things here which are * really important initialize * early! */ { ErtsAllocInitOpts alloc_opts = ERTS_ALLOC_INIT_DEF_OPTS_INITER; int ncpu; int ncpuonln; int ncpuavail; int schdlrs; int schdlrs_onln; int max_main_threads; int max_reader_groups; int reader_groups; char envbuf[21]; /* enough for any 64-bit integer */ size_t envbufsz; erts_sched_compact_load = 1; erts_printf_eterm_func = erts_printf_term; erts_disable_tolerant_timeofday = 0; display_items = 200; erts_proc.max = ERTS_DEFAULT_MAX_PROCESSES; erts_backtrace_depth = DEFAULT_BACKTRACE_SIZE; erts_async_max_threads = 0; erts_async_thread_suggested_stack_size = ERTS_ASYNC_THREAD_MIN_STACK_SIZE; H_MIN_SIZE = H_DEFAULT_SIZE; BIN_VH_MIN_SIZE = VH_DEFAULT_SIZE; erts_initialized = 0; erts_use_sender_punish = 1; erts_pre_early_init_cpu_topology(&max_reader_groups, &ncpu, &ncpuonln, &ncpuavail); #ifndef ERTS_SMP ncpu = 1; ncpuonln = 1; ncpuavail = 1; #endif ignore_break = 0; replace_intr = 0; program = argv[0]; erts_modified_timing_level = -1; erts_compat_rel = this_rel_num(); erts_use_r9_pids_ports = 0; erts_sys_pre_init(); erts_atomic_init_nob(&exiting, 0); #ifdef ERTS_SMP erts_thr_progress_pre_init(); #endif #ifdef ERTS_ENABLE_LOCK_CHECK erts_lc_init(); #endif #ifdef ERTS_SMP erts_smp_atomic32_init_nob(&erts_writing_erl_crash_dump, 0L); erts_tsd_key_create(&erts_is_crash_dumping_key); #else erts_writing_erl_crash_dump = 0; #endif erts_smp_atomic32_init_nob(&erts_max_gen_gcs, (erts_aint32_t) ((Uint16) -1)); erts_pre_init_process(); #if defined(USE_THREADS) && !defined(ERTS_SMP) main_thread = erts_thr_self(); #endif /* * We need to know the number of schedulers to use before we * can initialize the allocators. */ no_schedulers = (Uint) (ncpu > 0 ? ncpu : 1); no_schedulers_online = (ncpuavail > 0 ? ncpuavail : (ncpuonln > 0 ? ncpuonln : no_schedulers)); schdlrs = no_schedulers; schdlrs_onln = no_schedulers_online; envbufsz = sizeof(envbuf); /* erts_sys_getenv() not initialized yet; need erts_sys_getenv__() */ if (erts_sys_getenv__("ERL_THREAD_POOL_SIZE", envbuf, &envbufsz) == 0) erts_async_max_threads = atoi(envbuf); else erts_async_max_threads = 0; if (erts_async_max_threads > ERTS_MAX_NO_OF_ASYNC_THREADS) erts_async_max_threads = ERTS_MAX_NO_OF_ASYNC_THREADS; if (argc && argv) { int i = 1; while (i < *argc) { if (strcmp(argv[i], "--") == 0) { /* end of emulator options */ i++; break; } if (argv[i][0] == '-') { switch (argv[i][1]) { case 'r': { char *sub_param = argv[i]+2; if (has_prefix("g", sub_param)) { char *arg = get_arg(sub_param+1, argv[i+1], &i); if (sscanf(arg, "%d", &max_reader_groups) != 1) { erts_fprintf(stderr, "bad reader groups limit: %s\n", arg); erts_usage(); } if (max_reader_groups < 0) { erts_fprintf(stderr, "bad reader groups limit: %d\n", max_reader_groups); erts_usage(); } } break; } case 'A': { /* set number of threads in thread pool */ char *arg = get_arg(argv[i]+2, argv[i+1], &i); if (((erts_async_max_threads = atoi(arg)) < 0) || (erts_async_max_threads > ERTS_MAX_NO_OF_ASYNC_THREADS)) { erts_fprintf(stderr, "bad number of async threads %s\n", arg); erts_usage(); VERBOSE(DEBUG_SYSTEM, ("using %d async-threads\n", erts_async_max_threads)); } break; } case 'S' : { int tot, onln; char *arg = get_arg(argv[i]+2, argv[i+1], &i); switch (sscanf(arg, "%d:%d", &tot, &onln)) { case 0: switch (sscanf(arg, ":%d", &onln)) { case 1: tot = no_schedulers; goto chk_S; default: goto bad_S; } case 1: onln = tot < schdlrs_onln ? tot : schdlrs_onln; case 2: chk_S: if (tot > 0) schdlrs = tot; else schdlrs = no_schedulers + tot; if (onln > 0) schdlrs_onln = onln; else schdlrs_onln = no_schedulers_online + onln; if (schdlrs < 1 || ERTS_MAX_NO_OF_SCHEDULERS < schdlrs) { erts_fprintf(stderr, "bad amount of schedulers %d\n", tot); erts_usage(); } if (schdlrs_onln < 1 || schdlrs < schdlrs_onln) { erts_fprintf(stderr, "bad amount of schedulers online %d " "(total amount of schedulers %d)\n", schdlrs_onln, schdlrs); erts_usage(); } break; default: bad_S: erts_fprintf(stderr, "bad amount of schedulers %s\n", arg); erts_usage(); break; } VERBOSE(DEBUG_SYSTEM, ("using %d:%d scheduler(s)\n", tot, onln)); break; } default: break; } } i++; } } #ifndef USE_THREADS erts_async_max_threads = 0; #endif #ifdef ERTS_SMP no_schedulers = schdlrs; no_schedulers_online = schdlrs_onln; erts_no_schedulers = (Uint) no_schedulers; #endif erts_early_init_scheduling(no_schedulers); alloc_opts.ncpu = ncpu; erts_alloc_init(argc, argv, &alloc_opts); /* Handles (and removes) -M flags. */ /* Require allocators */ #ifdef ERTS_SMP /* * Thread progress management: * * * Managed threads: * ** Scheduler threads (see erl_process.c) * ** Aux thread (see erl_process.c) * ** Sys message dispatcher thread (see erl_trace.c) * * * Unmanaged threads that need to register: * ** Async threads (see erl_async.c) */ erts_thr_progress_init(no_schedulers, no_schedulers+2, erts_async_max_threads); #endif erts_thr_q_init(); erts_init_utils(); erts_early_init_cpu_topology(no_schedulers, &max_main_threads, max_reader_groups, &reader_groups); #ifdef USE_THREADS { erts_thr_late_init_data_t elid = ERTS_THR_LATE_INIT_DATA_DEF_INITER; elid.mem.std.alloc = ethr_std_alloc; elid.mem.std.realloc = ethr_std_realloc; elid.mem.std.free = ethr_std_free; elid.mem.sl.alloc = ethr_sl_alloc; elid.mem.sl.realloc = ethr_sl_realloc; elid.mem.sl.free = ethr_sl_free; elid.mem.ll.alloc = ethr_ll_alloc; elid.mem.ll.realloc = ethr_ll_realloc; elid.mem.ll.free = ethr_ll_free; elid.main_threads = max_main_threads; elid.reader_groups = reader_groups; erts_thr_late_init(&elid); } #endif #ifdef ERTS_ENABLE_LOCK_CHECK erts_lc_late_init(); #endif #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_late_init(); #endif #if defined(HIPE) hipe_signal_init(); /* must be done very early */ #endif erl_sys_args(argc, argv); /* Creates threads on Windows that depend on the arguments, so has to be after erl_sys_args */ erl_sys_init(); erts_ets_realloc_always_moves = 0; erts_ets_always_compress = 0; erts_dist_buf_busy_limit = ERTS_DE_BUSY_LIMIT; return ncpu; }
void erts_sys_pre_init(void) { #ifdef USE_THREADS erts_thr_init_data_t eid = ERTS_THR_INIT_DATA_DEF_INITER; #endif erts_printf_add_cr_to_stdout = 1; erts_printf_add_cr_to_stderr = 1; #ifdef USE_THREADS eid.thread_create_child_func = thr_create_prepare_child; /* Before creation in parent */ eid.thread_create_prepare_func = thr_create_prepare; /* After creation in parent */ eid.thread_create_parent_func = thr_create_cleanup, #ifdef ERTS_THR_HAVE_SIG_FUNCS sigemptyset(&thr_create_sigmask); sigaddset(&thr_create_sigmask, SIGINT); /* block interrupt */ sigaddset(&thr_create_sigmask, SIGUSR1); /* block user defined signal */ #endif erts_thr_init(&eid); #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_init(); #endif #endif /* USE_THREADS */ erts_init_sys_time_sup(); #ifdef USE_THREADS #ifdef ERTS_SMP erts_smp_atomic32_init_nob(&erts_break_requested, 0); erts_smp_atomic32_init_nob(&erts_got_sigusr1, 0); erts_smp_atomic32_init_nob(&have_prepared_crash_dump, 0); #else erts_break_requested = 0; erts_got_sigusr1 = 0; have_prepared_crash_dump = 0; #endif #endif /* USE_THREADS */ erts_smp_atomic_init_nob(&sys_misc_mem_sz, 0); { /* * Unfortunately we depend on fd 0,1,2 in the old shell code. * So if for some reason we do not have those open when we start * we have to open them here. Not doing this can cause the emulator * to deadlock when reaping the fd_driver ports :( */ int fd; /* Make sure fd 0 is open */ if ((fd = open("/dev/null", O_RDONLY)) != 0) close(fd); /* Make sure fds 1 and 2 are open */ while (fd < 3) { fd = open("/dev/null", O_WRONLY); } close(fd); } /* We need a file descriptor to close in the crashdump creation. * We close this one to be sure we can get a fd for our real file ... * so, we create one here ... a stone to carry all the way home. */ crashdump_companion_cube_fd = open("/dev/null", O_RDONLY); /* don't lose it, there will be cake */ }
void erts_bp_init(void) { erts_smp_atomic32_init_nob(&erts_active_bp_index, 0); erts_smp_atomic32_init_nob(&erts_staging_bp_index, 1); }
static ERTS_INLINE void do_time_init(void) { erts_smp_atomic32_init_nob(&do_time, 0); }
/* * This function is responsible for enabling, disabling, resetting and * gathering data related to microstate accounting. * * Managed threads and unmanaged threads are handled differently. * - managed threads get a misc_aux job telling them to switch on msacc * - unmanaged have some fields protected by a mutex that has to be taken * before any values can be updated * * For performance reasons there is also a global value erts_msacc_enabled * that controls the state of all threads. Statistics gathering is only on * if erts_msacc_enabled && msacc is true. */ Eterm erts_msacc_request(Process *c_p, int action, Eterm *threads) { #ifdef ERTS_ENABLE_MSACC ErtsMsAcc *msacc = ERTS_MSACC_TSD_GET(); ErtsSchedulerData *esdp = erts_proc_sched_data(c_p); Eterm ref; ErtsMSAccReq *msaccrp; Eterm *hp; #ifdef ERTS_MSACC_ALWAYS_ON if (action == ERTS_MSACC_ENABLE || action == ERTS_MSACC_DISABLE) return THE_NON_VALUE; #else /* take care of double enable, and double disable here */ if (msacc && action == ERTS_MSACC_ENABLE) { return THE_NON_VALUE; } else if (!msacc && action == ERTS_MSACC_DISABLE) { return THE_NON_VALUE; } #endif ref = erts_make_ref(c_p); msaccrp = erts_alloc(ERTS_ALC_T_MSACC, sizeof(ErtsMSAccReq)); hp = &msaccrp->ref_heap[0]; msaccrp->action = action; msaccrp->proc = c_p; msaccrp->ref = STORE_NC(&hp, NULL, ref); msaccrp->req_sched = esdp->no; #ifdef ERTS_SMP *threads = erts_no_schedulers; *threads += 1; /* aux thread */ #else *threads = 1; #endif erts_smp_atomic32_init_nob(&msaccrp->refc,(erts_aint32_t)*threads); erts_proc_add_refc(c_p, *threads); if (erts_no_schedulers > 1) erts_schedule_multi_misc_aux_work(1, erts_no_schedulers, reply_msacc, (void *) msaccrp); #ifdef ERTS_SMP /* aux thread */ erts_schedule_misc_aux_work(0, reply_msacc, (void *) msaccrp); #endif #ifdef USE_THREADS /* Manage unmanaged threads */ switch (action) { case ERTS_MSACC_GATHER: { Uint unmanaged_count; ErtsMsAcc *msacc, **unmanaged; int i = 0; /* we copy a list of pointers here so that we do not have to have the msacc_mutex when sending messages */ erts_rwmtx_rlock(&msacc_mutex); unmanaged_count = msacc_unmanaged_count; unmanaged = erts_alloc(ERTS_ALC_T_MSACC, sizeof(ErtsMsAcc*)*unmanaged_count); for (i = 0, msacc = msacc_unmanaged; i < unmanaged_count; i++, msacc = msacc->next) { unmanaged[i] = msacc; } erts_rwmtx_runlock(&msacc_mutex); for (i = 0; i < unmanaged_count; i++) { erts_mtx_lock(&unmanaged[i]->mtx); if (unmanaged[i]->perf_counter) { ErtsSysPerfCounter perf_counter; /* if enabled update stats */ perf_counter = erts_sys_perf_counter(); unmanaged[i]->perf_counters[unmanaged[i]->state] += perf_counter - unmanaged[i]->perf_counter; unmanaged[i]->perf_counter = perf_counter; } erts_mtx_unlock(&unmanaged[i]->mtx); send_reply(unmanaged[i],msaccrp); } erts_free(ERTS_ALC_T_MSACC,unmanaged); /* We have just sent unmanaged_count messages, so bump no of threads */ *threads += unmanaged_count; break; } case ERTS_MSACC_RESET: { ErtsMsAcc *msacc; erts_rwmtx_rlock(&msacc_mutex); for (msacc = msacc_unmanaged; msacc != NULL; msacc = msacc->next) erts_msacc_reset(msacc); erts_rwmtx_runlock(&msacc_mutex); break; } case ERTS_MSACC_ENABLE: { erts_rwmtx_rlock(&msacc_mutex); for (msacc = msacc_unmanaged; msacc != NULL; msacc = msacc->next) { erts_mtx_lock(&msacc->mtx); msacc->perf_counter = erts_sys_perf_counter(); /* we assume the unmanaged thread is sleeping */ msacc->state = ERTS_MSACC_STATE_SLEEP; erts_mtx_unlock(&msacc->mtx); } erts_rwmtx_runlock(&msacc_mutex); break; } case ERTS_MSACC_DISABLE: { ErtsSysPerfCounter perf_counter; erts_rwmtx_rlock(&msacc_mutex); /* make sure to update stats with latest results */ for (msacc = msacc_unmanaged; msacc != NULL; msacc = msacc->next) { erts_mtx_lock(&msacc->mtx); perf_counter = erts_sys_perf_counter(); msacc->perf_counters[msacc->state] += perf_counter - msacc->perf_counter; msacc->perf_counter = 0; erts_mtx_unlock(&msacc->mtx); } erts_rwmtx_runlock(&msacc_mutex); break; } default: { ASSERT(0); } } #endif *threads = make_small(*threads); reply_msacc((void *) msaccrp); #ifndef ERTS_MSACC_ALWAYS_ON /* enable/disable the global value */ if (action == ERTS_MSACC_ENABLE) { erts_msacc_enabled = 1; } else if (action == ERTS_MSACC_DISABLE) { erts_msacc_enabled = 0; } #endif return ref; #else return THE_NON_VALUE; #endif }