unsigned beam_catches_cons(BeamInstr *cp, unsigned cdr) { int i; struct bc_pool* p = &bccix[erts_staging_code_ix()]; ASSERT(p->is_staging); /* * Allocate from free_list while it is non-empty. * If free_list is empty, allocate at high_mark. */ if (p->free_list >= 0) { i = p->free_list; p->free_list = p->beam_catches[i].cdr; } else { if (p->high_mark >= p->tabsize) { /* No free slots and table is full: realloc table */ beam_catch_t* prev_vec = p->beam_catches; unsigned newsize = p->tabsize*2; p->beam_catches = erts_alloc(ERTS_ALC_T_CODE, newsize*sizeof(beam_catch_t)); sys_memcpy(p->beam_catches, prev_vec, p->tabsize*sizeof(beam_catch_t)); gc_old_vec(prev_vec); p->tabsize = newsize; } i = p->high_mark++; } p->beam_catches[i].cp = cp; p->beam_catches[i].cdr = cdr; return i; }
static void table_end_staging_ranges(ErtsAlcType_t alctr, struct ranges* r, int commit) { ErtsCodeIndex dst = erts_staging_code_ix(); if (commit && r[dst].modules == NULL) { Sint i; Sint n; /* No modules added, just clone src and remove purged code. */ ErtsCodeIndex src = erts_active_code_ix(); erts_smp_atomic_add_nob(&mem_used, r[src].n); r[dst].modules = erts_alloc(alctr, r[src].n * sizeof(Range)); r[dst].allocated = r[src].n; n = 0; for (i = 0; i < r[src].n; i++) { Range* rp = r[src].modules+i; if (rp->start < RANGE_END(rp)) { /* Only insert a module that has not been purged. */ r[dst].modules[n] = *rp; n++; } } r[dst].n = n; erts_smp_atomic_set_nob(&r[dst].mid, (erts_aint_t) (r[dst].modules + n / 2)); } }
void erts_update_ranges(BeamInstr* code, Uint size) { ErtsCodeIndex dst = erts_staging_code_ix(); ErtsCodeIndex src = erts_active_code_ix(); if (src == dst) { ASSERT(!erts_initialized); /* * During start-up of system, the indices are the same * and erts_start_staging_ranges() has not been called. */ if (r[dst].modules == NULL) { Sint need = 128; erts_atomic_add_nob(&mem_used, need); r[dst].modules = erts_alloc(ERTS_ALC_T_MODULE_REFS, need * sizeof(Range)); r[dst].allocated = need; write_ptr = r[dst].modules; } } ASSERT(r[dst].modules); write_ptr->start = code; erts_atomic_init_nob(&(write_ptr->end), (erts_aint_t)(((byte *)code) + size)); write_ptr++; }
void module_end_staging(int commit) { ASSERT(dbg_load_code_ix == erts_staging_code_ix()); if (!commit) { /* abort */ IndexTable* tab = &module_tables[erts_staging_code_ix()]; int oldsz, newsz; ASSERT(entries_at_start_staging <= tab->entries); oldsz = index_table_sz(tab); index_erase_latest_from(tab, entries_at_start_staging); newsz = index_table_sz(tab); erts_atomic_add_nob(&tot_module_bytes, (newsz - oldsz)); } IF_DEBUG(dbg_load_code_ix = -1); }
Module* erts_put_module(Eterm mod) { ERTS_LC_ASSERT(erts_initialized == 0 || erts_has_code_write_permission()); return put_module(mod, &module_tables[erts_staging_code_ix()]); }
BIF_RETTYPE delete_module_1(BIF_ALIST_1) { ErtsCodeIndex code_ix; Module* modp; int is_blocking = 0; int success = 0; Eterm res = NIL; if (is_not_atom(BIF_ARG_1)) { BIF_ERROR(BIF_P, BADARG); } if (!erts_try_seize_code_write_permission(BIF_P)) { ERTS_BIF_YIELD1(bif_export[BIF_delete_module_1], BIF_P, BIF_ARG_1); } { erts_start_staging_code_ix(0); code_ix = erts_staging_code_ix(); modp = erts_get_module(BIF_ARG_1, code_ix); if (!modp) { res = am_undefined; } else if (modp->old.code_hdr) { erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf(); erts_dsprintf(dsbufp, "Module %T must be purged before loading\n", BIF_ARG_1); erts_send_error_to_logger(BIF_P->group_leader, dsbufp); ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG); } else { if (modp->curr.num_breakpoints > 0 || modp->curr.num_traced_exports > 0 || IF_HIPE(hipe_need_blocking(modp))) { /* tracing or hipe need to go single threaded */ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_smp_thr_progress_block(); is_blocking = 1; if (modp->curr.num_breakpoints) { erts_clear_module_break(modp); ASSERT(modp->curr.num_breakpoints == 0); } } delete_code(modp); res = am_true; success = 1; } } { struct m mod; Eterm retval; mod.module = BIF_ARG_1; mod.modp = modp; retval = staging_epilogue(BIF_P, success, res, is_blocking, &mod, 1, 0); return retval; } }
void erts_end_staging_ranges(int commit) { if (commit) { Sint i; ErtsCodeIndex src = erts_active_code_ix(); ErtsCodeIndex dst = erts_staging_code_ix(); Range* mp; Sint num_inserted; mp = r[dst].modules; num_inserted = write_ptr - mp; for (i = 0; i < r[src].n; i++) { Range* rp = r[src].modules+i; if (rp->start < RANGE_END(rp)) { /* Only insert a module that has not been purged. */ write_ptr->start = rp->start; erts_atomic_init_nob(&write_ptr->end, (erts_aint_t)(RANGE_END(rp))); write_ptr++; } } /* * There are num_inserted new range entries (unsorted) at the * beginning of the modules array, followed by the old entries * (sorted). We must now sort the entire array. */ r[dst].n = write_ptr - mp; if (num_inserted > 1) { qsort(mp, r[dst].n, sizeof(Range), (int (*)(const void *, const void *)) rangecompare); } else if (num_inserted == 1) { /* Sift the new range into place. This is faster than qsort(). */ Range t = mp[0]; for (i = 0; i < r[dst].n-1 && t.start > mp[i+1].start; i++) { mp[i] = mp[i+1]; } mp[i] = t; } r[dst].modules = mp; CHECK(&r[dst]); erts_atomic_set_nob(&r[dst].mid, (erts_aint_t) (r[dst].modules + r[dst].n / 2)); if (r[dst].allocated * 2 > erts_dump_num_lit_areas) { erts_dump_num_lit_areas *= 2; erts_dump_lit_areas = (ErtsLiteralArea **) erts_realloc(ERTS_ALC_T_CRASH_DUMP, (void *) erts_dump_lit_areas, erts_dump_num_lit_areas * sizeof(ErtsLiteralArea*)); } } }
static void table_start_staging_ranges(ErtsAlcType_t alctr, struct ranges* r) { ErtsCodeIndex dst = erts_staging_code_ix(); if (r[dst].modules) { erts_smp_atomic_add_nob(&mem_used, -r[dst].allocated); erts_free(alctr, r[dst].modules); r[dst].modules = NULL; } }
void erts_commit_staging_code_ix(void) { ErtsCodeIndex ix; /* We need to this lock as we are now making the staging export table active */ export_staging_lock(); ix = erts_staging_code_ix(); erts_smp_atomic32_set_nob(&the_active_code_index, ix); ix = (ix + 1) % ERTS_NUM_CODE_IX; erts_smp_atomic32_set_nob(&the_staging_code_index, ix); export_staging_unlock(); CIX_TRACE("activate"); }
void beam_catches_start_staging(void) { ErtsCodeIndex dst = erts_staging_code_ix(); ErtsCodeIndex src = erts_active_code_ix(); beam_catch_t* prev_vec = bccix[dst].beam_catches; ASSERT(!bccix[src].is_staging && !bccix[dst].is_staging); bccix[dst] = bccix[src]; gc_old_vec(prev_vec); IF_DEBUG(bccix[dst].is_staging = 1); }
void module_start_staging(void) { IndexTable* src = &module_tables[erts_active_code_ix()]; IndexTable* dst = &module_tables[erts_staging_code_ix()]; Module* src_mod; Module* dst_mod; int i, oldsz, newsz; ASSERT(dbg_load_code_ix == -1); ASSERT(dst->entries <= src->entries); /* * Make sure our existing modules are up-to-date */ for (i = 0; i < dst->entries; i++) { src_mod = (Module*) erts_index_lookup(src, i); dst_mod = (Module*) erts_index_lookup(dst, i); ASSERT(src_mod->module == dst_mod->module); copy_module(dst_mod, src_mod); } /* * Copy all new modules from active table */ oldsz = index_table_sz(dst); for (i = dst->entries; i < src->entries; i++) { src_mod = (Module*) erts_index_lookup(src, i); dst_mod = (Module*) index_put_entry(dst, src_mod); ASSERT(dst_mod != src_mod); copy_module(dst_mod, src_mod); } newsz = index_table_sz(dst); erts_atomic_add_nob(&tot_module_bytes, (newsz - oldsz)); entries_at_start_staging = dst->entries; IF_DEBUG(dbg_load_code_ix = erts_staging_code_ix()); }
void export_info(int to, void *to_arg) { #ifdef ERTS_SMP int lock = !ERTS_IS_CRASH_DUMPING; if (lock) export_staging_lock(); #endif index_info(to, to_arg, &export_tables[erts_active_code_ix()]); hash_info(to, to_arg, &export_tables[erts_staging_code_ix()].htable); #ifdef ERTS_SMP if (lock) export_staging_unlock(); #endif }
Export* erts_export_put(Eterm mod, Eterm func, unsigned int arity) { ErtsCodeIndex code_ix = erts_staging_code_ix(); struct export_templ templ; struct export_entry* ee; ASSERT(is_atom(mod)); ASSERT(is_atom(func)); export_staging_lock(); ee = (struct export_entry*) index_put_entry(&export_tables[code_ix], init_template(&templ, mod, func, arity)); export_staging_unlock(); return ee->ep; }
void beam_catches_init(void) { int i; bccix[0].tabsize = DEFAULT_TABSIZE; bccix[0].free_list = -1; bccix[0].high_mark = 0; bccix[0].beam_catches = erts_alloc(ERTS_ALC_T_CODE, sizeof(beam_catch_t)*DEFAULT_TABSIZE); IF_DEBUG(bccix[0].is_staging = 0); for (i=1; i<ERTS_NUM_CODE_IX; i++) { bccix[i] = bccix[i-1]; } /* For initial load: */ IF_DEBUG(bccix[erts_staging_code_ix()].is_staging = 1); }
void erts_start_staging_ranges(int num_new) { ErtsCodeIndex src = erts_active_code_ix(); ErtsCodeIndex dst = erts_staging_code_ix(); Sint need; if (r[dst].modules) { erts_atomic_add_nob(&mem_used, -r[dst].allocated); erts_free(ERTS_ALC_T_MODULE_REFS, r[dst].modules); } need = r[dst].allocated = r[src].n + num_new; erts_atomic_add_nob(&mem_used, need); write_ptr = erts_alloc(ERTS_ALC_T_MODULE_REFS, need * sizeof(Range)); r[dst].modules = write_ptr; }
static void delete_code(Module* modp) { ErtsCodeIndex code_ix = erts_staging_code_ix(); Eterm module = make_atom(modp->module); int i; for (i = 0; i < export_list_size(code_ix); i++) { Export *ep = export_list(i, code_ix); if (ep != NULL && (ep->code[0] == module)) { if (ep->addressv[code_ix] == ep->code+3) { if (ep->code[3] == (BeamInstr) em_apply_bif) { continue; } else if (ep->code[3] == (BeamInstr) em_call_traced_function) { ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); ASSERT(modp->curr.num_traced_exports > 0); --modp->curr.num_traced_exports; MatchSetUnref(ep->match_prog_set); ep->match_prog_set = NULL; } else ASSERT(ep->code[3] == (BeamInstr) em_call_error_handler || !erts_initialized); } ep->addressv[code_ix] = ep->code+3; ep->code[3] = (BeamInstr) em_call_error_handler; ep->code[4] = 0; ASSERT(ep->match_prog_set == NULL); } } ASSERT(modp->curr.num_breakpoints == 0); ASSERT(modp->curr.num_traced_exports == 0); modp->old = modp->curr; modp->curr.code = NULL; modp->curr.code_length = 0; modp->curr.catches = BEAM_CATCHES_NIL; modp->curr.nif = NULL; }
static void delete_code(Module* modp) { ErtsCodeIndex code_ix = erts_staging_code_ix(); Eterm module = make_atom(modp->module); int i; for (i = 0; i < export_list_size(code_ix); i++) { Export *ep = export_list(i, code_ix); if (ep != NULL && (ep->info.mfa.module == module)) { if (ep->addressv[code_ix] == ep->beam) { if (ep->beam[0] == (BeamInstr) em_apply_bif) { continue; } else if (ep->beam[0] == (BeamInstr) BeamOp(op_i_generic_breakpoint)) { ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); ASSERT(modp->curr.num_traced_exports > 0); DBG_TRACE_MFA_P(&ep->info.mfa, "export trace cleared, code_ix=%d", code_ix); erts_clear_export_break(modp, &ep->info); } else ASSERT(ep->beam[0] == (BeamInstr) em_call_error_handler || !erts_initialized); } ep->addressv[code_ix] = ep->beam; ep->beam[0] = (BeamInstr) em_call_error_handler; ep->beam[1] = 0; DBG_TRACE_MFA_P(&ep->info.mfa, "export invalidation, code_ix=%d", code_ix); } } ASSERT(modp->curr.num_breakpoints == 0); ASSERT(modp->curr.num_traced_exports == 0); modp->old = modp->curr; erts_module_instance_init(&modp->curr); }
void beam_catches_delmod(unsigned head, BeamInstr *code, unsigned code_bytes, ErtsCodeIndex code_ix) { struct bc_pool* p = &bccix[code_ix]; unsigned i, cdr; ASSERT((code_ix == erts_active_code_ix()) != bccix[erts_staging_code_ix()].is_staging); for(i = head; i != (unsigned)-1;) { if (i >= p->tabsize) { erts_exit(ERTS_ERROR_EXIT, "beam_catches_delmod: index %#x is out of range\r\n", i); } if( (char*)p->beam_catches[i].cp - (char*)code >= code_bytes ) { erts_exit(ERTS_ERROR_EXIT, "beam_catches_delmod: item %#x has cp %p which is not " "in module's range [%p,%p[\r\n", i, p->beam_catches[i].cp, code, ((char*)code + code_bytes)); } p->beam_catches[i].cp = 0; cdr = p->beam_catches[i].cdr; p->beam_catches[i].cdr = p->free_list; p->free_list = i; i = cdr; } }
static void table_update_ranges(ErtsAlcType_t alctr, struct ranges* r, BeamInstr* code, Uint size) { ErtsCodeIndex dst = erts_staging_code_ix(); ErtsCodeIndex src = erts_active_code_ix(); Sint i; Sint n; Sint need; if (src == dst) { ASSERT(!erts_initialized); /* * During start-up of system, the indices are the same. * Handle this by faking a source area. */ src = (src+1) % ERTS_NUM_CODE_IX; if (r[src].modules) { erts_smp_atomic_add_nob(&mem_used, -r[src].allocated); erts_free(alctr, r[src].modules); } r[src] = r[dst]; r[dst].modules = 0; } CHECK(&r[src]); ASSERT(r[dst].modules == NULL); need = r[dst].allocated = r[src].n + 1; erts_smp_atomic_add_nob(&mem_used, need); r[dst].modules = (Range *) erts_alloc(alctr, need * sizeof(Range)); n = 0; for (i = 0; i < r[src].n; i++) { Range* rp = r[src].modules+i; if (code < rp->start) { r[dst].modules[n].start = code; erts_smp_atomic_init_nob(&r[dst].modules[n].end, (erts_aint_t)(((byte *)code) + size)); ASSERT(!n || RANGE_END(&r[dst].modules[n-1]) < code); n++; break; } if (rp->start < RANGE_END(rp)) { /* Only insert a module that has not been purged. */ r[dst].modules[n].start = rp->start; erts_smp_atomic_init_nob(&r[dst].modules[n].end, (erts_aint_t)(RANGE_END(rp))); ASSERT(!n || RANGE_END(&r[dst].modules[n-1]) < rp->start); n++; } } while (i < r[src].n) { Range* rp = r[src].modules+i; if (rp->start < RANGE_END(rp)) { /* Only insert a module that has not been purged. */ r[dst].modules[n].start = rp->start; erts_smp_atomic_init_nob(&r[dst].modules[n].end, (erts_aint_t)(RANGE_END(rp))); ASSERT(!n || RANGE_END(&r[dst].modules[n-1]) < rp->start); n++; } i++; } if (n == 0 || code > r[dst].modules[n-1].start) { r[dst].modules[n].start = code; erts_smp_atomic_init_nob(&r[dst].modules[n].end, (erts_aint_t)(((byte *)code) + size)); ASSERT(!n || RANGE_END(&r[dst].modules[n-1]) < code); n++; } ASSERT(n <= r[src].n+1); r[dst].n = n; erts_smp_atomic_set_nob(&r[dst].mid, (erts_aint_t) (r[dst].modules + n / 2)); CHECK(&r[dst]); CHECK(&r[src]); }
void beam_catches_end_staging(int commit) { IF_DEBUG(bccix[erts_staging_code_ix()].is_staging = 0); }