void erts_lc_init(void) { #ifdef ERTS_LC_STATIC_ALLOC int i; static erts_lc_free_block_t fbs[ERTS_LC_FB_CHUNK_SIZE]; for (i = 0; i < ERTS_LC_FB_CHUNK_SIZE - 1; i++) { #ifdef DEBUG memset((void *) &fbs[i], 0xdf, sizeof(erts_lc_free_block_t)); #endif fbs[i].next = &fbs[i+1]; } #ifdef DEBUG memset((void *) &fbs[ERTS_LC_FB_CHUNK_SIZE-1], 0xdf, sizeof(erts_lc_free_block_t)); #endif fbs[ERTS_LC_FB_CHUNK_SIZE-1].next = NULL; free_blocks = &fbs[0]; #else /* #ifdef ERTS_LC_STATIC_ALLOC */ free_blocks = NULL; #endif /* #ifdef ERTS_LC_STATIC_ALLOC */ if (ethr_spinlock_init(&free_blocks_lock) != 0) lc_abort(); erts_tsd_key_create(&locks_key); }
void erts_thr_progress_pre_init(void) { intrnl = NULL; erts_tsd_key_create(&erts_thr_prgr_data_key__, "erts_thr_prgr_data_key"); init_nob(&erts_thr_prgr__.current, ERTS_THR_PRGR_VAL_FIRST); }
void erts_lc_init(void) { if (ethr_spinlock_init(&lc_threads_lock) != 0) ERTS_INTERNAL_ERROR("spinlock_init failed"); erts_tsd_key_create(&locks_key,"erts_lock_check_key"); }
/* we have to split initiation as atoms are not inited in early init */ void erts_msacc_early_init(void) { #ifndef ERTS_MSACC_ALWAYS_ON erts_msacc_enabled = 0; #endif erts_rwmtx_init(&msacc_mutex,"msacc_list_mutex"); #ifdef USE_THREADS erts_tsd_key_create(&erts_msacc_key,"erts_msacc_key"); #else erts_msacc = NULL; #endif }
void erts_code_ix_init(void) { /* We start emulator by initializing preloaded modules * single threaded with active and staging set both to zero. * Preloading is finished by a commit that will set things straight. */ erts_smp_atomic32_init_nob(&the_active_code_index, 0); erts_smp_atomic32_init_nob(&the_staging_code_index, 0); erts_smp_mtx_init(&code_write_permission_mtx, "code_write_permission"); #ifdef ERTS_ENABLE_LOCK_CHECK erts_tsd_key_create(&has_code_write_permission, "erts_has_code_write_permission"); #endif CIX_TRACE("init"); }
static int early_init(int *argc, char **argv) /* * Only put things here which are * really important initialize * early! */ { ErtsAllocInitOpts alloc_opts = ERTS_ALLOC_INIT_DEF_OPTS_INITER; int ncpu; int ncpuonln; int ncpuavail; int schdlrs; int schdlrs_onln; int max_main_threads; int max_reader_groups; int reader_groups; char envbuf[21]; /* enough for any 64-bit integer */ size_t envbufsz; erts_sched_compact_load = 1; erts_printf_eterm_func = erts_printf_term; erts_disable_tolerant_timeofday = 0; display_items = 200; erts_proc.max = ERTS_DEFAULT_MAX_PROCESSES; erts_backtrace_depth = DEFAULT_BACKTRACE_SIZE; erts_async_max_threads = 0; erts_async_thread_suggested_stack_size = ERTS_ASYNC_THREAD_MIN_STACK_SIZE; H_MIN_SIZE = H_DEFAULT_SIZE; BIN_VH_MIN_SIZE = VH_DEFAULT_SIZE; erts_initialized = 0; erts_use_sender_punish = 1; erts_pre_early_init_cpu_topology(&max_reader_groups, &ncpu, &ncpuonln, &ncpuavail); #ifndef ERTS_SMP ncpu = 1; ncpuonln = 1; ncpuavail = 1; #endif ignore_break = 0; replace_intr = 0; program = argv[0]; erts_modified_timing_level = -1; erts_compat_rel = this_rel_num(); erts_use_r9_pids_ports = 0; erts_sys_pre_init(); erts_atomic_init_nob(&exiting, 0); #ifdef ERTS_SMP erts_thr_progress_pre_init(); #endif #ifdef ERTS_ENABLE_LOCK_CHECK erts_lc_init(); #endif #ifdef ERTS_SMP erts_smp_atomic32_init_nob(&erts_writing_erl_crash_dump, 0L); erts_tsd_key_create(&erts_is_crash_dumping_key); #else erts_writing_erl_crash_dump = 0; #endif erts_smp_atomic32_init_nob(&erts_max_gen_gcs, (erts_aint32_t) ((Uint16) -1)); erts_pre_init_process(); #if defined(USE_THREADS) && !defined(ERTS_SMP) main_thread = erts_thr_self(); #endif /* * We need to know the number of schedulers to use before we * can initialize the allocators. */ no_schedulers = (Uint) (ncpu > 0 ? ncpu : 1); no_schedulers_online = (ncpuavail > 0 ? ncpuavail : (ncpuonln > 0 ? ncpuonln : no_schedulers)); schdlrs = no_schedulers; schdlrs_onln = no_schedulers_online; envbufsz = sizeof(envbuf); /* erts_sys_getenv() not initialized yet; need erts_sys_getenv__() */ if (erts_sys_getenv__("ERL_THREAD_POOL_SIZE", envbuf, &envbufsz) == 0) erts_async_max_threads = atoi(envbuf); else erts_async_max_threads = 0; if (erts_async_max_threads > ERTS_MAX_NO_OF_ASYNC_THREADS) erts_async_max_threads = ERTS_MAX_NO_OF_ASYNC_THREADS; if (argc && argv) { int i = 1; while (i < *argc) { if (strcmp(argv[i], "--") == 0) { /* end of emulator options */ i++; break; } if (argv[i][0] == '-') { switch (argv[i][1]) { case 'r': { char *sub_param = argv[i]+2; if (has_prefix("g", sub_param)) { char *arg = get_arg(sub_param+1, argv[i+1], &i); if (sscanf(arg, "%d", &max_reader_groups) != 1) { erts_fprintf(stderr, "bad reader groups limit: %s\n", arg); erts_usage(); } if (max_reader_groups < 0) { erts_fprintf(stderr, "bad reader groups limit: %d\n", max_reader_groups); erts_usage(); } } break; } case 'A': { /* set number of threads in thread pool */ char *arg = get_arg(argv[i]+2, argv[i+1], &i); if (((erts_async_max_threads = atoi(arg)) < 0) || (erts_async_max_threads > ERTS_MAX_NO_OF_ASYNC_THREADS)) { erts_fprintf(stderr, "bad number of async threads %s\n", arg); erts_usage(); VERBOSE(DEBUG_SYSTEM, ("using %d async-threads\n", erts_async_max_threads)); } break; } case 'S' : { int tot, onln; char *arg = get_arg(argv[i]+2, argv[i+1], &i); switch (sscanf(arg, "%d:%d", &tot, &onln)) { case 0: switch (sscanf(arg, ":%d", &onln)) { case 1: tot = no_schedulers; goto chk_S; default: goto bad_S; } case 1: onln = tot < schdlrs_onln ? tot : schdlrs_onln; case 2: chk_S: if (tot > 0) schdlrs = tot; else schdlrs = no_schedulers + tot; if (onln > 0) schdlrs_onln = onln; else schdlrs_onln = no_schedulers_online + onln; if (schdlrs < 1 || ERTS_MAX_NO_OF_SCHEDULERS < schdlrs) { erts_fprintf(stderr, "bad amount of schedulers %d\n", tot); erts_usage(); } if (schdlrs_onln < 1 || schdlrs < schdlrs_onln) { erts_fprintf(stderr, "bad amount of schedulers online %d " "(total amount of schedulers %d)\n", schdlrs_onln, schdlrs); erts_usage(); } break; default: bad_S: erts_fprintf(stderr, "bad amount of schedulers %s\n", arg); erts_usage(); break; } VERBOSE(DEBUG_SYSTEM, ("using %d:%d scheduler(s)\n", tot, onln)); break; } default: break; } } i++; } } #ifndef USE_THREADS erts_async_max_threads = 0; #endif #ifdef ERTS_SMP no_schedulers = schdlrs; no_schedulers_online = schdlrs_onln; erts_no_schedulers = (Uint) no_schedulers; #endif erts_early_init_scheduling(no_schedulers); alloc_opts.ncpu = ncpu; erts_alloc_init(argc, argv, &alloc_opts); /* Handles (and removes) -M flags. */ /* Require allocators */ #ifdef ERTS_SMP /* * Thread progress management: * * * Managed threads: * ** Scheduler threads (see erl_process.c) * ** Aux thread (see erl_process.c) * ** Sys message dispatcher thread (see erl_trace.c) * * * Unmanaged threads that need to register: * ** Async threads (see erl_async.c) */ erts_thr_progress_init(no_schedulers, no_schedulers+2, erts_async_max_threads); #endif erts_thr_q_init(); erts_init_utils(); erts_early_init_cpu_topology(no_schedulers, &max_main_threads, max_reader_groups, &reader_groups); #ifdef USE_THREADS { erts_thr_late_init_data_t elid = ERTS_THR_LATE_INIT_DATA_DEF_INITER; elid.mem.std.alloc = ethr_std_alloc; elid.mem.std.realloc = ethr_std_realloc; elid.mem.std.free = ethr_std_free; elid.mem.sl.alloc = ethr_sl_alloc; elid.mem.sl.realloc = ethr_sl_realloc; elid.mem.sl.free = ethr_sl_free; elid.mem.ll.alloc = ethr_ll_alloc; elid.mem.ll.realloc = ethr_ll_realloc; elid.mem.ll.free = ethr_ll_free; elid.main_threads = max_main_threads; elid.reader_groups = reader_groups; erts_thr_late_init(&elid); } #endif #ifdef ERTS_ENABLE_LOCK_CHECK erts_lc_late_init(); #endif #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_late_init(); #endif #if defined(HIPE) hipe_signal_init(); /* must be done very early */ #endif erl_sys_args(argc, argv); /* Creates threads on Windows that depend on the arguments, so has to be after erl_sys_args */ erl_sys_init(); erts_ets_realloc_always_moves = 0; erts_ets_always_compress = 0; erts_dist_buf_busy_limit = ERTS_DE_BUSY_LIMIT; return ncpu; }
erts_sspa_data_t * erts_sspa_create(size_t blk_sz, int pa_size, int nthreads, const char* name) { erts_sspa_data_t *data; size_t tot_size; size_t chunk_mem_size; char *p; char *chunk_start; int cix; int no_blocks = pa_size; int no_blocks_per_chunk; size_t aligned_blk_sz; #if !defined(ERTS_STRUCTURE_ALIGNED_ALLOC) /* Force 64-bit alignment... */ aligned_blk_sz = ((blk_sz - 1) / 8) * 8 + 8; #else /* Alignment of structure is enough... */ aligned_blk_sz = blk_sz; #endif if (!name) { /* schedulers only variant */ ASSERT(!nthreads); nthreads = erts_no_schedulers; } else { ASSERT(nthreads > 0); } if (nthreads == 1) no_blocks_per_chunk = no_blocks; else { int extra = (no_blocks - 1)/4 + 1; if (extra == 0) extra = 1; no_blocks_per_chunk = no_blocks; no_blocks_per_chunk += extra * nthreads; no_blocks_per_chunk /= nthreads; } no_blocks = no_blocks_per_chunk * nthreads; chunk_mem_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_sspa_chunk_header_t)); chunk_mem_size += aligned_blk_sz * no_blocks_per_chunk; chunk_mem_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(chunk_mem_size); tot_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_sspa_data_t)); tot_size += chunk_mem_size * nthreads; p = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_PRE_ALLOC_DATA, tot_size); data = (erts_sspa_data_t *) p; p += ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_sspa_data_t)); chunk_start = p; data->chunks_mem_size = chunk_mem_size; data->start = chunk_start; data->end = chunk_start + chunk_mem_size * nthreads; data->nthreads = nthreads; if (name) { /* thread variant */ erts_tsd_key_create(&data->tsd_key, (char*)name); erts_atomic_init_nob(&data->id_generator, 0); } /* Initialize all chunks */ for (cix = 0; cix < nthreads; cix++) { erts_sspa_chunk_t *chnk = erts_sspa_cix2chunk(data, cix); erts_sspa_chunk_header_t *chdr = &chnk->aligned.header; erts_sspa_blk_t *blk; int i; erts_atomic_init_nob(&chdr->tail.data.last, (erts_aint_t) &chdr->tail.data.marker); erts_atomic_init_nob(&chdr->tail.data.marker.next_atmc, ERTS_AINT_NULL); erts_atomic_init_nob(&chdr->tail.data.um_refc[0], 0); erts_atomic_init_nob(&chdr->tail.data.um_refc[1], 0); erts_atomic32_init_nob(&chdr->tail.data.um_refc_ix, 0); chdr->head.no_thr_progress_check = 0; chdr->head.used_marker = 1; chdr->head.first = &chdr->tail.data.marker; chdr->head.unref_end = &chdr->tail.data.marker; chdr->head.next.thr_progress = erts_thr_progress_current(); chdr->head.next.thr_progress_reached = 1; chdr->head.next.um_refc_ix = 1; chdr->head.next.unref_end = &chdr->tail.data.marker; p = &chnk->data[0]; chdr->local.first = (erts_sspa_blk_t *) p; blk = (erts_sspa_blk_t *) p; for (i = 0; i < no_blocks_per_chunk; i++) { blk = (erts_sspa_blk_t *) p; p += aligned_blk_sz; blk->next_ptr = (erts_sspa_blk_t *) p; } blk->next_ptr = NULL; chdr->local.last = blk; chdr->local.cnt = no_blocks_per_chunk; chdr->local.lim = no_blocks_per_chunk / 3; ERTS_SSPA_DBG_CHK_LCL(chdr); } return data; }