static erts_lc_locked_locks_t * create_locked_locks(char *thread_name) { erts_lc_locked_locks_t *l_lcks = malloc(sizeof(erts_lc_locked_locks_t)); if (!l_lcks) lc_abort(); l_lcks->thread_name = strdup(thread_name ? thread_name : "unknown"); if (!l_lcks->thread_name) lc_abort(); l_lcks->emu_thread = 0; l_lcks->tid = erts_thr_self(); l_lcks->required.first = NULL; l_lcks->required.last = NULL; l_lcks->locked.first = NULL; l_lcks->locked.last = NULL; l_lcks->prev = NULL; lc_lock(); l_lcks->next = erts_locked_locks; if (erts_locked_locks) erts_locked_locks->prev = l_lcks; erts_locked_locks = l_lcks; lc_unlock(); erts_tsd_set(locks_key, (void *) l_lcks); return l_lcks; }
static erts_lc_locked_locks_t * create_locked_locks(char *thread_name) { erts_lc_locked_locks_t *l_lcks = malloc(sizeof(erts_lc_locked_locks_t)); if (!l_lcks) ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!"); l_lcks->thread_name = strdup(thread_name ? thread_name : "unknown"); if (!l_lcks->thread_name) ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!"); l_lcks->emu_thread = 0; l_lcks->tid = erts_thr_self(); l_lcks->required.first = NULL; l_lcks->required.last = NULL; l_lcks->locked.first = NULL; l_lcks->locked.last = NULL; l_lcks->prev = NULL; lc_lock(); l_lcks->next = erts_locked_locks; if (erts_locked_locks) erts_locked_locks->prev = l_lcks; erts_locked_locks = l_lcks; lc_unlock(); erts_tsd_set(locks_key, (void *) l_lcks); return l_lcks; }
static lc_thread_t * create_thread_data(char *thread_name) { lc_thread_t *thr = malloc(sizeof(lc_thread_t)); if (!thr) ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!"); thr->thread_name = strdup(thread_name ? thread_name : "unknown"); if (!thr->thread_name) ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!"); thr->emu_thread = 0; thr->tid = erts_thr_self(); thr->required.first = NULL; thr->required.last = NULL; thr->locked.first = NULL; thr->locked.last = NULL; thr->prev = NULL; thr->free_blocks = NULL; thr->chunks = NULL; sys_memzero(&thr->matrix, sizeof(thr->matrix)); lc_lock_threads(); thr->next = lc_threads; if (lc_threads) lc_threads->prev = thr; lc_threads = thr; lc_unlock_threads(); erts_tsd_set(locks_key, (void *) thr); return thr; }
static ErtsMsAcc* get_msacc(void) { ErtsMsAcc *msacc; erts_rwmtx_rlock(&msacc_mutex); msacc = msacc_managed; while (!erts_equal_tids(msacc->tid,erts_thr_self())) { msacc = msacc->next; ASSERT(msacc != NULL); } erts_rwmtx_runlock(&msacc_mutex); return msacc; }
static void system_cleanup(int flush_async) { /* * Make sure only one thread exits the runtime system. */ if (erts_atomic_inc_read_nob(&exiting) != 1) { /* * Another thread is currently exiting the system; * wait for it to do its job. */ #ifdef ERTS_SMP if (erts_thr_progress_is_managed_thread()) { /* * The exiting thread might be waiting for * us to block; need to update status... */ erts_thr_progress_active(NULL, 0); erts_thr_progress_prepare_wait(NULL); } #endif /* Wait forever... */ while (1) erts_milli_sleep(10000000); } /* No cleanup wanted if ... * 1. we are about to do an abnormal exit * 2. we haven't finished initializing, or * 3. another thread than the main thread is performing the exit * (in threaded non smp case). */ if (!flush_async || !erts_initialized #if defined(USE_THREADS) && !defined(ERTS_SMP) || !erts_equal_tids(main_thread, erts_thr_self()) #endif ) return; #ifdef ERTS_SMP #ifdef ERTS_ENABLE_LOCK_CHECK erts_lc_check_exact(NULL, 0); #endif #endif erts_exit_flush_async(); }
void erts_msacc_init_thread(char *type, int id, int managed) { ErtsMsAcc *msacc; msacc = erts_alloc(ERTS_ALC_T_MSACC, sizeof(ErtsMsAcc) + sizeof(ErtsMsAccPerfCntr) * ERTS_MSACC_STATE_COUNT); msacc->type = strdup(type); msacc->id = make_small(id); msacc->unmanaged = !managed; msacc->tid = erts_thr_self(); msacc->perf_counter = 0; #ifdef USE_THREADS erts_rwmtx_rwlock(&msacc_mutex); if (!managed) { erts_mtx_init(&msacc->mtx,"msacc_unmanaged_mutex"); msacc->next = msacc_unmanaged; msacc_unmanaged = msacc; msacc_unmanaged_count++; ERTS_MSACC_TSD_SET(msacc); } else { msacc->next = msacc_managed; msacc_managed = msacc; } erts_rwmtx_rwunlock(&msacc_mutex); #else msacc_managed = msacc; #endif erts_msacc_reset(msacc); #ifdef ERTS_MSACC_ALWAYS_ON ERTS_MSACC_TSD_SET(msacc); msacc->perf_counter = erts_sys_perf_counter(); msacc->state = ERTS_MSACC_STATE_OTHER; #endif }
static int early_init(int *argc, char **argv) /* * Only put things here which are * really important initialize * early! */ { ErtsAllocInitOpts alloc_opts = ERTS_ALLOC_INIT_DEF_OPTS_INITER; int ncpu; int ncpuonln; int ncpuavail; int schdlrs; int schdlrs_onln; int max_main_threads; int max_reader_groups; int reader_groups; char envbuf[21]; /* enough for any 64-bit integer */ size_t envbufsz; erts_sched_compact_load = 1; erts_printf_eterm_func = erts_printf_term; erts_disable_tolerant_timeofday = 0; display_items = 200; erts_proc.max = ERTS_DEFAULT_MAX_PROCESSES; erts_backtrace_depth = DEFAULT_BACKTRACE_SIZE; erts_async_max_threads = 0; erts_async_thread_suggested_stack_size = ERTS_ASYNC_THREAD_MIN_STACK_SIZE; H_MIN_SIZE = H_DEFAULT_SIZE; BIN_VH_MIN_SIZE = VH_DEFAULT_SIZE; erts_initialized = 0; erts_use_sender_punish = 1; erts_pre_early_init_cpu_topology(&max_reader_groups, &ncpu, &ncpuonln, &ncpuavail); #ifndef ERTS_SMP ncpu = 1; ncpuonln = 1; ncpuavail = 1; #endif ignore_break = 0; replace_intr = 0; program = argv[0]; erts_modified_timing_level = -1; erts_compat_rel = this_rel_num(); erts_use_r9_pids_ports = 0; erts_sys_pre_init(); erts_atomic_init_nob(&exiting, 0); #ifdef ERTS_SMP erts_thr_progress_pre_init(); #endif #ifdef ERTS_ENABLE_LOCK_CHECK erts_lc_init(); #endif #ifdef ERTS_SMP erts_smp_atomic32_init_nob(&erts_writing_erl_crash_dump, 0L); erts_tsd_key_create(&erts_is_crash_dumping_key); #else erts_writing_erl_crash_dump = 0; #endif erts_smp_atomic32_init_nob(&erts_max_gen_gcs, (erts_aint32_t) ((Uint16) -1)); erts_pre_init_process(); #if defined(USE_THREADS) && !defined(ERTS_SMP) main_thread = erts_thr_self(); #endif /* * We need to know the number of schedulers to use before we * can initialize the allocators. */ no_schedulers = (Uint) (ncpu > 0 ? ncpu : 1); no_schedulers_online = (ncpuavail > 0 ? ncpuavail : (ncpuonln > 0 ? ncpuonln : no_schedulers)); schdlrs = no_schedulers; schdlrs_onln = no_schedulers_online; envbufsz = sizeof(envbuf); /* erts_sys_getenv() not initialized yet; need erts_sys_getenv__() */ if (erts_sys_getenv__("ERL_THREAD_POOL_SIZE", envbuf, &envbufsz) == 0) erts_async_max_threads = atoi(envbuf); else erts_async_max_threads = 0; if (erts_async_max_threads > ERTS_MAX_NO_OF_ASYNC_THREADS) erts_async_max_threads = ERTS_MAX_NO_OF_ASYNC_THREADS; if (argc && argv) { int i = 1; while (i < *argc) { if (strcmp(argv[i], "--") == 0) { /* end of emulator options */ i++; break; } if (argv[i][0] == '-') { switch (argv[i][1]) { case 'r': { char *sub_param = argv[i]+2; if (has_prefix("g", sub_param)) { char *arg = get_arg(sub_param+1, argv[i+1], &i); if (sscanf(arg, "%d", &max_reader_groups) != 1) { erts_fprintf(stderr, "bad reader groups limit: %s\n", arg); erts_usage(); } if (max_reader_groups < 0) { erts_fprintf(stderr, "bad reader groups limit: %d\n", max_reader_groups); erts_usage(); } } break; } case 'A': { /* set number of threads in thread pool */ char *arg = get_arg(argv[i]+2, argv[i+1], &i); if (((erts_async_max_threads = atoi(arg)) < 0) || (erts_async_max_threads > ERTS_MAX_NO_OF_ASYNC_THREADS)) { erts_fprintf(stderr, "bad number of async threads %s\n", arg); erts_usage(); VERBOSE(DEBUG_SYSTEM, ("using %d async-threads\n", erts_async_max_threads)); } break; } case 'S' : { int tot, onln; char *arg = get_arg(argv[i]+2, argv[i+1], &i); switch (sscanf(arg, "%d:%d", &tot, &onln)) { case 0: switch (sscanf(arg, ":%d", &onln)) { case 1: tot = no_schedulers; goto chk_S; default: goto bad_S; } case 1: onln = tot < schdlrs_onln ? tot : schdlrs_onln; case 2: chk_S: if (tot > 0) schdlrs = tot; else schdlrs = no_schedulers + tot; if (onln > 0) schdlrs_onln = onln; else schdlrs_onln = no_schedulers_online + onln; if (schdlrs < 1 || ERTS_MAX_NO_OF_SCHEDULERS < schdlrs) { erts_fprintf(stderr, "bad amount of schedulers %d\n", tot); erts_usage(); } if (schdlrs_onln < 1 || schdlrs < schdlrs_onln) { erts_fprintf(stderr, "bad amount of schedulers online %d " "(total amount of schedulers %d)\n", schdlrs_onln, schdlrs); erts_usage(); } break; default: bad_S: erts_fprintf(stderr, "bad amount of schedulers %s\n", arg); erts_usage(); break; } VERBOSE(DEBUG_SYSTEM, ("using %d:%d scheduler(s)\n", tot, onln)); break; } default: break; } } i++; } } #ifndef USE_THREADS erts_async_max_threads = 0; #endif #ifdef ERTS_SMP no_schedulers = schdlrs; no_schedulers_online = schdlrs_onln; erts_no_schedulers = (Uint) no_schedulers; #endif erts_early_init_scheduling(no_schedulers); alloc_opts.ncpu = ncpu; erts_alloc_init(argc, argv, &alloc_opts); /* Handles (and removes) -M flags. */ /* Require allocators */ #ifdef ERTS_SMP /* * Thread progress management: * * * Managed threads: * ** Scheduler threads (see erl_process.c) * ** Aux thread (see erl_process.c) * ** Sys message dispatcher thread (see erl_trace.c) * * * Unmanaged threads that need to register: * ** Async threads (see erl_async.c) */ erts_thr_progress_init(no_schedulers, no_schedulers+2, erts_async_max_threads); #endif erts_thr_q_init(); erts_init_utils(); erts_early_init_cpu_topology(no_schedulers, &max_main_threads, max_reader_groups, &reader_groups); #ifdef USE_THREADS { erts_thr_late_init_data_t elid = ERTS_THR_LATE_INIT_DATA_DEF_INITER; elid.mem.std.alloc = ethr_std_alloc; elid.mem.std.realloc = ethr_std_realloc; elid.mem.std.free = ethr_std_free; elid.mem.sl.alloc = ethr_sl_alloc; elid.mem.sl.realloc = ethr_sl_realloc; elid.mem.sl.free = ethr_sl_free; elid.mem.ll.alloc = ethr_ll_alloc; elid.mem.ll.realloc = ethr_ll_realloc; elid.mem.ll.free = ethr_ll_free; elid.main_threads = max_main_threads; elid.reader_groups = reader_groups; erts_thr_late_init(&elid); } #endif #ifdef ERTS_ENABLE_LOCK_CHECK erts_lc_late_init(); #endif #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_late_init(); #endif #if defined(HIPE) hipe_signal_init(); /* must be done very early */ #endif erl_sys_args(argc, argv); /* Creates threads on Windows that depend on the arguments, so has to be after erl_sys_args */ erl_sys_init(); erts_ets_realloc_always_moves = 0; erts_ets_always_compress = 0; erts_dist_buf_busy_limit = ERTS_DE_BUSY_LIMIT; return ncpu; }
static void system_cleanup(int flush_async) { /* * Make sure only one thread exits the runtime system. */ if (erts_atomic_inc_read_nob(&exiting) != 1) { /* * Another thread is currently exiting the system; * wait for it to do its job. */ #ifdef ERTS_SMP if (erts_thr_progress_is_managed_thread()) { /* * The exiting thread might be waiting for * us to block; need to update status... */ erts_thr_progress_active(NULL, 0); erts_thr_progress_prepare_wait(NULL); } #endif /* Wait forever... */ while (1) erts_milli_sleep(10000000); } /* No cleanup wanted if ... * 1. we are about to do an abnormal exit * 2. we haven't finished initializing, or * 3. another thread than the main thread is performing the exit * (in threaded non smp case). */ if (!flush_async || !erts_initialized #if defined(USE_THREADS) && !defined(ERTS_SMP) || !erts_equal_tids(main_thread, erts_thr_self()) #endif ) return; #ifdef ERTS_SMP #ifdef ERTS_ENABLE_LOCK_CHECK erts_lc_check_exact(NULL, 0); #endif #endif #ifdef HYBRID if (ma_src_stack) erts_free(ERTS_ALC_T_OBJECT_STACK, (void *)ma_src_stack); if (ma_dst_stack) erts_free(ERTS_ALC_T_OBJECT_STACK, (void *)ma_dst_stack); if (ma_offset_stack) erts_free(ERTS_ALC_T_OBJECT_STACK, (void *)ma_offset_stack); ma_src_stack = NULL; ma_dst_stack = NULL; ma_offset_stack = NULL; erts_cleanup_offheap(&erts_global_offheap); #endif #if defined(HYBRID) && !defined(INCREMENTAL) if (global_heap) { ERTS_HEAP_FREE(ERTS_ALC_T_HEAP, (void*) global_heap, sizeof(Eterm) * global_heap_sz); } global_heap = NULL; #endif #ifdef INCREMENTAL erts_cleanup_incgc(); #endif erts_exit_flush_async(); }
/* XXX THIS SHOULD BE IN SYSTEM !!!! */ void erl_crash_dump_v(char *file, int line, char* fmt, va_list args) { #ifdef ERTS_SMP ErtsThrPrgrData tpd_buf; /* in case we aren't a managed thread... */ #endif int fd; size_t envsz; time_t now; char env[21]; /* enough to hold any 64-bit integer */ size_t dumpnamebufsize = MAXPATHLEN; char dumpnamebuf[MAXPATHLEN]; char* dumpname; int secs; int env_erl_crash_dump_seconds_set = 1; int i; if (ERTS_SOMEONE_IS_CRASH_DUMPING) return; #ifdef ERTS_SMP /* Order all managed threads to block, this has to be done first to guarantee that this is the only thread to generate crash dump. */ erts_thr_progress_fatal_error_block(&tpd_buf); #ifdef ERTS_THR_HAVE_SIG_FUNCS /* * We suspend all scheduler threads so that we can dump some * data about the currently running processes and scheduler data. * We have to be very very careful when doing this as the schedulers * could be anywhere. */ for (i = 0; i < erts_no_schedulers; i++) { erts_tid_t tid = ERTS_SCHEDULER_IX(i)->tid; if (!erts_equal_tids(tid,erts_thr_self())) sys_thr_suspend(tid); } #endif /* Allow us to pass certain places without locking... */ erts_smp_atomic32_set_mb(&erts_writing_erl_crash_dump, 1); erts_smp_tsd_set(erts_is_crash_dumping_key, (void *) 1); #else /* !ERTS_SMP */ erts_writing_erl_crash_dump = 1; #endif /* ERTS_SMP */ envsz = sizeof(env); /* ERL_CRASH_DUMP_SECONDS not set * if we have a heart port, break immediately * otherwise dump crash indefinitely (until crash is complete) * same as ERL_CRASH_DUMP_SECONDS = 0 * - do not write dump * - do not set an alarm * - break immediately * * ERL_CRASH_DUMP_SECONDS = 0 * - do not write dump * - do not set an alarm * - break immediately * * ERL_CRASH_DUMP_SECONDS < 0 * - do not set alarm * - write dump until done * * ERL_CRASH_DUMP_SECONDS = S (and S positive) * - Don't dump file forever * - set alarm (set in sys) * - write dump until alarm or file is written completely */ if (erts_sys_getenv__("ERL_CRASH_DUMP_SECONDS", env, &envsz) != 0) { env_erl_crash_dump_seconds_set = 0; secs = -1; } else { env_erl_crash_dump_seconds_set = 1; secs = atoi(env); } if (secs == 0) { return; } /* erts_sys_prepare_crash_dump returns 1 if heart port is found, otherwise 0 * If we don't find heart (0) and we don't have ERL_CRASH_DUMP_SECONDS set * we should continue writing a dump * * beware: secs -1 means no alarm */ if (erts_sys_prepare_crash_dump(secs) && !env_erl_crash_dump_seconds_set ) { return; } if (erts_sys_getenv__("ERL_CRASH_DUMP",&dumpnamebuf[0],&dumpnamebufsize) != 0) dumpname = "erl_crash.dump"; else dumpname = &dumpnamebuf[0]; erts_fprintf(stderr,"\nCrash dump is being written to: %s...", dumpname); fd = open(dumpname,O_WRONLY | O_CREAT | O_TRUNC,0640); if (fd < 0) return; /* Can't create the crash dump, skip it */ time(&now); erts_fdprintf(fd, "=erl_crash_dump:0.3\n%s", ctime(&now)); if (file != NULL) erts_fdprintf(fd, "The error occurred in file %s, line %d\n", file, line); if (fmt != NULL && *fmt != '\0') { erts_fdprintf(fd, "Slogan: "); erts_vfdprintf(fd, fmt, args); } erts_fdprintf(fd, "System version: "); erts_print_system_version(fd, NULL, NULL); erts_fdprintf(fd, "%s\n", "Compiled: " ERLANG_COMPILE_DATE); erts_fdprintf(fd, "Taints: "); erts_print_nif_taints(fd, NULL); erts_fdprintf(fd, "Atoms: %d\n", atom_table_size()); #ifdef USE_THREADS /* We want to note which thread it was that called erl_exit */ if (erts_get_scheduler_data()) { erts_fdprintf(fd, "Calling Thread: scheduler:%d\n", erts_get_scheduler_data()->no); } else { if (!erts_thr_getname(erts_thr_self(), dumpnamebuf, MAXPATHLEN)) erts_fdprintf(fd, "Calling Thread: %s\n", dumpnamebuf); else erts_fdprintf(fd, "Calling Thread: %p\n", erts_thr_self()); } #else erts_fdprintf(fd, "Calling Thread: scheduler:1\n"); #endif #if defined(ERTS_HAVE_TRY_CATCH) /* * erts_print_scheduler_info is not guaranteed to be safe to call * here for all schedulers as we may have suspended a scheduler * in the middle of updating the STACK_TOP and STACK_START * variables and thus when scanning the stack we could get * segmentation faults. We protect against this very unlikely * scenario by using the ERTS_SYS_TRY_CATCH. */ for (i = 0; i < erts_no_schedulers; i++) { ERTS_SYS_TRY_CATCH( erts_print_scheduler_info(fd, NULL, ERTS_SCHEDULER_IX(i)), erts_fdprintf(fd, "** crashed **\n")); } #endif #ifdef ERTS_SMP #if defined(ERTS_THR_HAVE_SIG_FUNCS) /* We resume all schedulers so that we are in a known safe state when we write the rest of the crash dump */ for (i = 0; i < erts_no_schedulers; i++) { erts_tid_t tid = ERTS_SCHEDULER_IX(i)->tid; if (!erts_equal_tids(tid,erts_thr_self())) sys_thr_resume(tid); } #endif /* * Wait for all managed threads to block. If all threads haven't blocked * after a minute, we go anyway and hope for the best... * * We do not release system again. We expect an exit() or abort() after * dump has been written. */ erts_thr_progress_fatal_error_wait(60000); /* Either worked or not... */ #endif #ifndef ERTS_HAVE_TRY_CATCH /* This is safe to call here, as all schedulers are blocked */ for (i = 0; i < erts_no_schedulers; i++) { erts_print_scheduler_info(fd, NULL, ERTS_SCHEDULER_IX(i)); } #endif info(fd, NULL); /* General system info */ if (erts_ptab_initialized(&erts_proc)) process_info(fd, NULL); /* Info about each process and port */ db_info(fd, NULL, 0); erts_print_bif_timer_info(fd, NULL); distribution_info(fd, NULL); erts_fdprintf(fd, "=loaded_modules\n"); loaded(fd, NULL); erts_dump_fun_entries(fd, NULL); erts_deep_process_dump(fd, NULL); erts_fdprintf(fd, "=atoms\n"); dump_atoms(fd, NULL); /* Keep the instrumentation data at the end of the dump */ if (erts_instr_memory_map || erts_instr_stat) { erts_fdprintf(fd, "=instr_data\n"); if (erts_instr_stat) { erts_fdprintf(fd, "=memory_status\n"); erts_instr_dump_stat_to_fd(fd, 0); } if (erts_instr_memory_map) { erts_fdprintf(fd, "=memory_map\n"); erts_instr_dump_memory_map_to_fd(fd); } } erts_fdprintf(fd, "=end\n"); close(fd); erts_fprintf(stderr,"done\n"); }
static void system_cleanup(int exit_code) { /* No cleanup wanted if ... * 1. we are about to do an abnormal exit * 2. we haven't finished initializing, or * 3. another thread than the main thread is performing the exit * (in threaded non smp case). */ if (exit_code != 0 || !erts_initialized #if defined(USE_THREADS) && !defined(ERTS_SMP) || !erts_equal_tids(main_thread, erts_thr_self()) #endif ) return; #ifdef ERTS_SMP #ifdef ERTS_ENABLE_LOCK_CHECK erts_lc_check_exact(NULL, 0); #endif erts_smp_block_system(ERTS_BS_FLG_ALLOW_GC); /* We never release it... */ #endif #ifdef HYBRID if (ma_src_stack) erts_free(ERTS_ALC_T_OBJECT_STACK, (void *)ma_src_stack); if (ma_dst_stack) erts_free(ERTS_ALC_T_OBJECT_STACK, (void *)ma_dst_stack); if (ma_offset_stack) erts_free(ERTS_ALC_T_OBJECT_STACK, (void *)ma_offset_stack); ma_src_stack = NULL; ma_dst_stack = NULL; ma_offset_stack = NULL; erts_cleanup_offheap(&erts_global_offheap); #endif #if defined(HYBRID) && !defined(INCREMENTAL) if (global_heap) { ERTS_HEAP_FREE(ERTS_ALC_T_HEAP, (void*) global_heap, sizeof(Eterm) * global_heap_sz); } global_heap = NULL; #endif #ifdef INCREMENTAL erts_cleanup_incgc(); #endif #if defined(USE_THREADS) exit_async(); #endif #if HAVE_ERTS_MSEG erts_mseg_exit(); #endif /* * A lot more cleaning could/should have been done... */ }