static Eterm staging_epilogue(Process* c_p, int commit, Eterm res, int is_blocking, struct m* loaded, int nloaded) { #ifdef ERTS_SMP if (is_blocking || !commit) #endif { if (commit) { erts_end_staging_code_ix(); erts_commit_staging_code_ix(); if (loaded) { int i; for (i=0; i < nloaded; i++) { set_default_trace_pattern(loaded[i].module); } } } else { erts_abort_staging_code_ix(); } if (loaded) { erts_free(ERTS_ALC_T_LOADER_TMP, loaded); } if (is_blocking) { erts_smp_thr_progress_unblock(); erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); } erts_release_code_write_permission(); return res; } #ifdef ERTS_SMP else { ErtsThrPrgrVal later; ASSERT(is_value(res)); if (loaded) { erts_free(ERTS_ALC_T_LOADER_TMP, loaded); } erts_end_staging_code_ix(); /* * Now we must wait for all schedulers to do a memory barrier before * we can activate and let them access the new staged code. This allows * schedulers to read active code_ix in a safe way while executing * without any memory barriers at all. */ later = erts_thr_progress_later(); erts_thr_progress_wakeup(c_p->scheduler_data, later); erts_notify_code_ix_activation(c_p, later); erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL); /* * handle_code_ix_activation() will do the rest "later" * and resume this process to return 'res'. */ ERTS_BIF_YIELD_RETURN(c_p, res); } #endif }
BIF_RETTYPE erts_internal_copy_literals_2(BIF_ALIST_2) { ErtsCodeIndex code_ix; Eterm res = am_true; if (is_not_atom(BIF_ARG_1) || (am_true != BIF_ARG_2 && am_false != BIF_ARG_2)) { BIF_ERROR(BIF_P, BADARG); } if (!erts_try_seize_code_write_permission(BIF_P)) { ERTS_BIF_YIELD2(bif_export[BIF_erts_internal_copy_literals_2], BIF_P, BIF_ARG_1, BIF_ARG_2); } code_ix = erts_active_code_ix(); if (BIF_ARG_2 == am_true) { Module* modp = erts_get_module(BIF_ARG_1, code_ix); if (!modp || !modp->old.code_hdr) { res = am_false; goto done; } if (erts_clrange.ptr != NULL && !(BIF_P->static_flags & ERTS_STC_FLG_SYSTEM_PROC)) { res = am_aborted; goto done; } erts_clrange.ptr = modp->old.code_hdr->literals_start; erts_clrange.sz = modp->old.code_hdr->literals_end - erts_clrange.ptr; erts_clrange.pid = BIF_P->common.id; } else if (BIF_ARG_2 == am_false) { if (erts_clrange.pid != BIF_P->common.id) { res = am_false; goto done; } erts_clrange.ptr = NULL; erts_clrange.sz = 0; erts_clrange.pid = THE_NON_VALUE; } #ifdef ERTS_SMP ASSERT(committer_state.stager == NULL); committer_state.stager = BIF_P; erts_schedule_thr_prgr_later_op(copy_literals_commit, NULL, &committer_state.lop); erts_proc_inc_refc(BIF_P); erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL); ERTS_BIF_YIELD_RETURN(BIF_P, am_true); #endif done: erts_release_code_write_permission(); BIF_RET(res); }
static void copy_literals_commit(void* null) { Process* p = committer_state.stager; #ifdef DEBUG committer_state.stager = NULL; #endif erts_release_code_write_permission(); erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); if (!ERTS_PROC_IS_EXITING(p)) { erts_resume(p, ERTS_PROC_LOCK_STATUS); } erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); erts_proc_dec_refc(p); }
BIF_RETTYPE code_make_stub_module_3(BIF_ALIST_3) { #if !defined(HIPE) BIF_ERROR(BIF_P, EXC_NOTSUP); #else Module* modp; Eterm res, mod; if (!ERTS_TERM_IS_MAGIC_BINARY(BIF_ARG_1) || is_not_atom(mod = erts_module_for_prepared_code (((ProcBin*)binary_val(BIF_ARG_1))->val))) { BIF_ERROR(BIF_P, BADARG); } if (!erts_try_seize_code_write_permission(BIF_P)) { ERTS_BIF_YIELD3(bif_export[BIF_code_make_stub_module_3], BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3); } erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_smp_thr_progress_block(); modp = erts_get_module(mod, erts_active_code_ix()); if (modp && modp->curr.num_breakpoints > 0) { ASSERT(modp->curr.code_hdr != NULL); erts_clear_module_break(modp); ASSERT(modp->curr.num_breakpoints == 0); } erts_start_staging_code_ix(1); res = erts_make_stub_module(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3); if (res == mod) { erts_end_staging_code_ix(); erts_commit_staging_code_ix(); if (!modp) modp = erts_get_module(mod, erts_active_code_ix()); hipe_redirect_to_module(modp); } else { erts_abort_staging_code_ix(); } erts_smp_thr_progress_unblock(); erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_release_code_write_permission(); return res; #endif }
BIF_RETTYPE code_make_stub_module_3(BIF_ALIST_3) { Module* modp; Eterm res; if (!erts_try_seize_code_write_permission(BIF_P)) { ERTS_BIF_YIELD3(bif_export[BIF_code_make_stub_module_3], BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3); } erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_smp_thr_progress_block(); modp = erts_get_module(BIF_ARG_1, erts_active_code_ix()); if (modp && modp->curr.num_breakpoints > 0) { ASSERT(modp->curr.code_hdr != NULL); erts_clear_module_break(modp); ASSERT(modp->curr.num_breakpoints == 0); } erts_start_staging_code_ix(1); res = erts_make_stub_module(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3); if (res == BIF_ARG_1) { erts_end_staging_code_ix(); erts_commit_staging_code_ix(); } else { erts_abort_staging_code_ix(); } erts_smp_thr_progress_unblock(); erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_release_code_write_permission(); return res; }
BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2) { ErtsCodeIndex code_ix; Module* modp; if (!erts_try_seize_code_write_permission(BIF_P)) { ERTS_BIF_YIELD2(bif_export[BIF_finish_after_on_load_2], BIF_P, BIF_ARG_1, BIF_ARG_2); } /* ToDo: Use code_ix staging instead of thread blocking */ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_smp_thr_progress_block(); code_ix = erts_active_code_ix(); modp = erts_get_module(BIF_ARG_1, code_ix); if (!modp || !modp->old.code_hdr) { error: erts_smp_thr_progress_unblock(); erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_release_code_write_permission(); BIF_ERROR(BIF_P, BADARG); } if (modp->old.code_hdr->on_load_function_ptr == NULL) { goto error; } if (BIF_ARG_2 != am_false && BIF_ARG_2 != am_true) { goto error; } if (BIF_ARG_2 == am_true) { int i; struct erl_module_instance t; /* * Swap old and new code. */ t = modp->curr; modp->curr = modp->old; modp->old = t; /* * The on_load function succeded. Fix up export entries. */ for (i = 0; i < export_list_size(code_ix); i++) { Export *ep = export_list(i,code_ix); if (ep == NULL || ep->code[0] != BIF_ARG_1) { continue; } if (ep->code[4] != 0) { ep->addressv[code_ix] = (void *) ep->code[4]; ep->code[4] = 0; } else { if (ep->addressv[code_ix] == ep->code+3 && ep->code[3] == (BeamInstr) em_apply_bif) { continue; } ep->addressv[code_ix] = ep->code+3; ep->code[3] = (BeamInstr) em_call_error_handler; } } modp->curr.code_hdr->on_load_function_ptr = NULL; set_default_trace_pattern(BIF_ARG_1); } else if (BIF_ARG_2 == am_false) { int i; /* * The on_load function failed. Remove references to the * code that is about to be purged from the export entries. */ for (i = 0; i < export_list_size(code_ix); i++) { Export *ep = export_list(i,code_ix); if (ep == NULL || ep->code[0] != BIF_ARG_1) { continue; } if (ep->code[3] == (BeamInstr) em_apply_bif) { continue; } ep->code[4] = 0; } } erts_smp_thr_progress_unblock(); erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_release_code_write_permission(); BIF_RET(am_true); }
BIF_RETTYPE finish_loading_1(BIF_ALIST_1) { Sint i; Sint n; struct m* p = NULL; Uint exceptions; Eterm res; int is_blocking = 0; int do_commit = 0; if (!erts_try_seize_code_write_permission(BIF_P)) { ERTS_BIF_YIELD1(bif_export[BIF_finish_loading_1], BIF_P, BIF_ARG_1); } /* * Validate the argument before we start loading; it must be a * proper list where each element is a magic binary containing * prepared (not previously loaded) code. * * First count the number of elements and allocate an array * to keep the elements in. */ n = erts_list_length(BIF_ARG_1); if (n < 0) { badarg: if (p) { erts_free(ERTS_ALC_T_LOADER_TMP, p); } erts_release_code_write_permission(); BIF_ERROR(BIF_P, BADARG); } p = erts_alloc(ERTS_ALC_T_LOADER_TMP, n*sizeof(struct m)); /* * We now know that the argument is a proper list. Validate * and collect the binaries into the array. */ for (i = 0; i < n; i++) { Eterm* cons = list_val(BIF_ARG_1); Eterm term = CAR(cons); ProcBin* pb; if (!ERTS_TERM_IS_MAGIC_BINARY(term)) { goto badarg; } pb = (ProcBin*) binary_val(term); p[i].code = pb->val; p[i].module = erts_module_for_prepared_code(p[i].code); if (p[i].module == NIL) { goto badarg; } BIF_ARG_1 = CDR(cons); } /* * Since we cannot handle atomic loading of a group of modules * if one or more of them uses on_load, we will only allow * more than one element in the list if none of the modules * have an on_load function. */ if (n > 1) { for (i = 0; i < n; i++) { if (erts_has_code_on_load(p[i].code) == am_true) { erts_free(ERTS_ALC_T_LOADER_TMP, p); erts_release_code_write_permission(); BIF_ERROR(BIF_P, SYSTEM_LIMIT); } } } /* * All types are correct. There cannot be a BADARG from now on. * Before we can start loading, we must check whether any of * the modules already has old code. To avoid a race, we must * not allow other process to initiate a code loading operation * from now on. */ res = am_ok; erts_start_staging_code_ix(n); for (i = 0; i < n; i++) { p[i].modp = erts_put_module(p[i].module); p[i].modp->seen = 0; } exceptions = 0; for (i = 0; i < n; i++) { p[i].exception = 0; if (p[i].modp->seen) { p[i].exception = 1; exceptions++; } p[i].modp->seen = 1; } if (exceptions) { res = exception_list(BIF_P, am_duplicated, p, exceptions); goto done; } for (i = 0; i < n; i++) { if (p[i].modp->curr.num_breakpoints > 0 || p[i].modp->curr.num_traced_exports > 0 || erts_is_default_trace_enabled()) { /* tracing involved, fallback with thread blocking */ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_smp_thr_progress_block(); is_blocking = 1; break; } } if (is_blocking) { for (i = 0; i < n; i++) { if (p[i].modp->curr.num_breakpoints) { erts_clear_module_break(p[i].modp); ASSERT(p[i].modp->curr.num_breakpoints == 0); } } } exceptions = 0; for (i = 0; i < n; i++) { p[i].exception = 0; if (p[i].modp->curr.code_hdr && p[i].modp->old.code_hdr) { p[i].exception = 1; exceptions++; } } if (exceptions) { res = exception_list(BIF_P, am_not_purged, p, exceptions); } else { /* * Now we can load all code. This can't fail. */ exceptions = 0; for (i = 0; i < n; i++) { Eterm mod; Eterm retval; erts_refc_inc(&p[i].code->refc, 1); retval = erts_finish_loading(p[i].code, BIF_P, 0, &mod); ASSERT(retval == NIL || retval == am_on_load); if (retval == am_on_load) { p[i].exception = 1; exceptions++; } } /* * Check whether any module has an on_load_handler. */ if (exceptions) { res = exception_list(BIF_P, am_on_load, p, exceptions); } do_commit = 1; } done: return staging_epilogue(BIF_P, do_commit, res, is_blocking, p, n); }
/* Do the actualy module purging and return: * true for success * false if no such old module * BADARG if not an atom */ BIF_RETTYPE erts_internal_purge_module_1(BIF_ALIST_1) { ErtsCodeIndex code_ix; BeamInstr* code; BeamInstr* end; Module* modp; int is_blocking = 0; Eterm ret; if (is_not_atom(BIF_ARG_1)) { BIF_ERROR(BIF_P, BADARG); } if (!erts_try_seize_code_write_permission(BIF_P)) { ERTS_BIF_YIELD1(bif_export[BIF_erts_internal_purge_module_1], BIF_P, BIF_ARG_1); } code_ix = erts_active_code_ix(); /* * Correct module? */ if ((modp = erts_get_module(BIF_ARG_1, code_ix)) == NULL) { ERTS_BIF_PREP_RET(ret, am_false); } else { erts_rwlock_old_code(code_ix); /* * Any code to purge? */ if (!modp->old.code_hdr) { ERTS_BIF_PREP_RET(ret, am_false); } else { /* * Unload any NIF library */ if (modp->old.nif != NULL) { /* ToDo: Do unload nif without blocking */ erts_rwunlock_old_code(code_ix); erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_smp_thr_progress_block(); is_blocking = 1; erts_rwlock_old_code(code_ix); erts_unload_nif(modp->old.nif); modp->old.nif = NULL; } /* * Remove the old code. */ ASSERT(erts_total_code_size >= modp->old.code_length); erts_total_code_size -= modp->old.code_length; code = (BeamInstr*) modp->old.code_hdr; end = (BeamInstr *)((char *)code + modp->old.code_length); erts_cleanup_funs_on_purge(code, end); beam_catches_delmod(modp->old.catches, code, modp->old.code_length, code_ix); decrement_refc(modp->old.code_hdr); if (modp->old.code_hdr->literals_start) { erts_free(ERTS_ALC_T_LITERAL, modp->old.code_hdr->literals_start); } erts_free(ERTS_ALC_T_CODE, (void *) code); modp->old.code_hdr = NULL; modp->old.code_length = 0; modp->old.catches = BEAM_CATCHES_NIL; erts_remove_from_ranges(code); ERTS_BIF_PREP_RET(ret, am_true); } erts_rwunlock_old_code(code_ix); } if (is_blocking) { erts_smp_thr_progress_unblock(); erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); } erts_release_code_write_permission(); return ret; }
BIF_RETTYPE erts_debug_breakpoint_2(BIF_ALIST_2) { Process* p = BIF_P; Eterm MFA = BIF_ARG_1; Eterm boolean = BIF_ARG_2; Eterm* tp; ErtsCodeMFA mfa; int i; int specified = 0; Eterm res; BpFunctions f; if (boolean != am_true && boolean != am_false) goto error; if (is_not_tuple(MFA)) { goto error; } tp = tuple_val(MFA); if (*tp != make_arityval(3)) { goto error; } if (!is_atom(tp[1]) || !is_atom(tp[2]) || (!is_small(tp[3]) && tp[3] != am_Underscore)) { goto error; } for (i = 0; i < 3 && tp[i+1] != am_Underscore; i++, specified++) { /* Empty loop body */ } for (i = specified; i < 3; i++) { if (tp[i+1] != am_Underscore) { goto error; } } mfa.module = tp[1]; mfa.function = tp[2]; if (is_small(tp[3])) { mfa.arity = signed_val(tp[3]); } if (!erts_try_seize_code_write_permission(BIF_P)) { ERTS_BIF_YIELD2(bif_export[BIF_erts_debug_breakpoint_2], BIF_P, BIF_ARG_1, BIF_ARG_2); } erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN); erts_thr_progress_block(); erts_bp_match_functions(&f, &mfa, specified); if (boolean == am_true) { erts_set_debug_break(&f); erts_install_breakpoints(&f); erts_commit_staged_bp(); } else { erts_clear_debug_break(&f); erts_commit_staged_bp(); erts_uninstall_breakpoints(&f); } erts_consolidate_bp_data(&f, 1); res = make_small(f.matched); erts_bp_free_matched_functions(&f); erts_thr_progress_unblock(); erts_proc_lock(p, ERTS_PROC_LOCK_MAIN); erts_release_code_write_permission(); return res; error: BIF_ERROR(p, BADARG); }
BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2) { ErtsCodeIndex code_ix; Module* modp; Eterm on_load; if (!erts_try_seize_code_write_permission(BIF_P)) { ERTS_BIF_YIELD2(bif_export[BIF_finish_after_on_load_2], BIF_P, BIF_ARG_1, BIF_ARG_2); } /* ToDo: Use code_ix staging instead of thread blocking */ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_smp_thr_progress_block(); code_ix = erts_active_code_ix(); modp = erts_get_module(BIF_ARG_1, code_ix); if (!modp || modp->curr.code == 0) { error: erts_smp_thr_progress_unblock(); erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_release_code_write_permission(); BIF_ERROR(BIF_P, BADARG); } if ((on_load = modp->curr.code[MI_ON_LOAD_FUNCTION_PTR]) == 0) { goto error; } if (BIF_ARG_2 != am_false && BIF_ARG_2 != am_true) { goto error; } if (BIF_ARG_2 == am_true) { int i; /* * The on_load function succeded. Fix up export entries. */ for (i = 0; i < export_list_size(code_ix); i++) { Export *ep = export_list(i,code_ix); if (ep != NULL && ep->code[0] == BIF_ARG_1 && ep->code[4] != 0) { ep->addressv[code_ix] = (void *) ep->code[4]; ep->code[4] = 0; } } modp->curr.code[MI_ON_LOAD_FUNCTION_PTR] = 0; set_default_trace_pattern(BIF_ARG_1); } else if (BIF_ARG_2 == am_false) { BeamInstr* code; BeamInstr* end; /* * The on_load function failed. Remove the loaded code. * This is an combination of delete and purge. We purge * the current code; the old code is not touched. */ erts_total_code_size -= modp->curr.code_length; code = modp->curr.code; end = (BeamInstr *)((char *)code + modp->curr.code_length); erts_cleanup_funs_on_purge(code, end); beam_catches_delmod(modp->curr.catches, code, modp->curr.code_length, erts_active_code_ix()); erts_free(ERTS_ALC_T_CODE, (void *) code); modp->curr.code = NULL; modp->curr.code_length = 0; modp->curr.catches = BEAM_CATCHES_NIL; erts_remove_from_ranges(code); } erts_smp_thr_progress_unblock(); erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_release_code_write_permission(); BIF_RET(am_true); }
BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2) { ErtsCodeIndex code_ix; Module* modp; if (!erts_try_seize_code_write_permission(BIF_P)) { ERTS_BIF_YIELD2(bif_export[BIF_finish_after_on_load_2], BIF_P, BIF_ARG_1, BIF_ARG_2); } /* ToDo: Use code_ix staging instead of thread blocking */ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_smp_thr_progress_block(); code_ix = erts_active_code_ix(); modp = erts_get_module(BIF_ARG_1, code_ix); if (!modp || !modp->on_load || !modp->on_load->code_hdr) { error: erts_smp_thr_progress_unblock(); erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_release_code_write_permission(); BIF_ERROR(BIF_P, BADARG); } if (modp->on_load->code_hdr->on_load_function_ptr == NULL) { goto error; } if (BIF_ARG_2 != am_false && BIF_ARG_2 != am_true) { goto error; } if (BIF_ARG_2 == am_true) { int i; /* * Make the code with the on_load function current. */ if (modp->curr.code_hdr) { modp->old = modp->curr; } modp->curr = *modp->on_load; erts_free(ERTS_ALC_T_PREPARED_CODE, modp->on_load); modp->on_load = 0; /* * The on_load function succeded. Fix up export entries. */ for (i = 0; i < export_list_size(code_ix); i++) { Export *ep = export_list(i,code_ix); if (ep == NULL || ep->info.mfa.module != BIF_ARG_1) { continue; } if (ep->beam[1] != 0) { ep->addressv[code_ix] = (void *) ep->beam[1]; ep->beam[1] = 0; } else { if (ep->addressv[code_ix] == ep->beam && ep->beam[0] == (BeamInstr) em_apply_bif) { continue; } ep->addressv[code_ix] = ep->beam; ep->beam[0] = (BeamInstr) em_call_error_handler; } } modp->curr.code_hdr->on_load_function_ptr = NULL; set_default_trace_pattern(BIF_ARG_1); #ifdef HIPE hipe_redirect_to_module(modp); #endif } else if (BIF_ARG_2 == am_false) { int i; /* * The on_load function failed. Remove references to the * code that is about to be purged from the export entries. */ for (i = 0; i < export_list_size(code_ix); i++) { Export *ep = export_list(i,code_ix); if (ep == NULL || ep->info.mfa.module != BIF_ARG_1) { continue; } if (ep->beam[0] == (BeamInstr) em_apply_bif) { continue; } ep->beam[1] = 0; } } erts_smp_thr_progress_unblock(); erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_release_code_write_permission(); BIF_RET(am_true); }
static Eterm staging_epilogue(Process* c_p, int commit, Eterm res, int is_blocking, struct m* mods, int nmods, int free_mods) { #ifdef ERTS_SMP if (is_blocking || !commit) #endif { if (commit) { int i; erts_end_staging_code_ix(); erts_commit_staging_code_ix(); for (i=0; i < nmods; i++) { if (mods[i].modp->curr.code_hdr) { set_default_trace_pattern(mods[i].module); } #ifdef HIPE hipe_redirect_to_module(mods[i].modp); #endif } } else { erts_abort_staging_code_ix(); } if (free_mods) { erts_free(ERTS_ALC_T_LOADER_TMP, mods); } if (is_blocking) { erts_smp_thr_progress_unblock(); erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); } erts_release_code_write_permission(); return res; } #ifdef ERTS_SMP else { ASSERT(is_value(res)); if (free_mods) { erts_free(ERTS_ALC_T_LOADER_TMP, mods); } erts_end_staging_code_ix(); /* * Now we must wait for all schedulers to do a memory barrier before * we can commit and let them access the new staged code. This allows * schedulers to read active code_ix in a safe way while executing * without any memory barriers at all. */ ASSERT(committer_state.stager == NULL); committer_state.stager = c_p; erts_schedule_thr_prgr_later_op(smp_code_ix_commiter, NULL, &committer_state.lop); erts_proc_inc_refc(c_p); erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL); /* * smp_code_ix_commiter() will do the rest "later" * and resume this process to return 'res'. */ ERTS_BIF_YIELD_RETURN(c_p, res); } #endif }
BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2) { if (BIF_P != erts_code_purger) BIF_ERROR(BIF_P, EXC_NOTSUP); if (is_not_atom(BIF_ARG_1)) BIF_ERROR(BIF_P, BADARG); switch (BIF_ARG_2) { case am_prepare: case am_prepare_on_load: { /* * Prepare for purge by marking all fun * entries referring to the code to purge * with "pending purge" markers. */ ErtsCodeIndex code_ix; Module* modp; Eterm res; if (is_value(purge_state.module)) BIF_ERROR(BIF_P, BADARG); code_ix = erts_active_code_ix(); /* * Correct module? */ modp = erts_get_module(BIF_ARG_1, code_ix); if (!modp) res = am_false; else { /* * Any code to purge? */ if (BIF_ARG_2 == am_prepare_on_load) { erts_rwlock_old_code(code_ix); } else { erts_rlock_old_code(code_ix); } if (BIF_ARG_2 == am_prepare_on_load) { ASSERT(modp->on_load); ASSERT(modp->on_load->code_hdr); purge_state.saved_old = modp->old; modp->old = *modp->on_load; erts_free(ERTS_ALC_T_PREPARED_CODE, (void *) modp->on_load); modp->on_load = 0; } if (!modp->old.code_hdr) res = am_false; else { BeamInstr* code; BeamInstr* end; erts_smp_mtx_lock(&purge_state.mtx); purge_state.module = BIF_ARG_1; erts_smp_mtx_unlock(&purge_state.mtx); res = am_true; code = (BeamInstr*) modp->old.code_hdr; end = (BeamInstr *)((char *)code + modp->old.code_length); erts_fun_purge_prepare(code, end); } if (BIF_ARG_2 == am_prepare_on_load) { erts_rwunlock_old_code(code_ix); } else { erts_runlock_old_code(code_ix); } } #ifndef ERTS_SMP BIF_RET(res); #else if (res != am_true) BIF_RET(res); else { /* * We'll be resumed when all schedulers are guaranteed * to see the "pending purge" markers that we've made on * all fun entries of the code that we are about to purge. * Processes trying to call these funs will be suspended * before calling the funs. That is we are guaranteed not * to get any more direct references into the code while * checking for such references... */ erts_schedule_thr_prgr_later_op(resume_purger, NULL, &purger_lop_data); erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL); ERTS_BIF_YIELD_RETURN(BIF_P, am_true); } #endif } case am_abort: { /* * Soft purge that detected direct references into the code * we set out to purge. Abort the purge. */ if (purge_state.module != BIF_ARG_1) BIF_ERROR(BIF_P, BADARG); erts_fun_purge_abort_prepare(purge_state.funs, purge_state.fe_ix); #ifndef ERTS_SMP erts_fun_purge_abort_finalize(purge_state.funs, purge_state.fe_ix); finalize_purge_operation(BIF_P, 0); BIF_RET(am_false); #else /* * We need to restore the code addresses of the funs in * two stages in order to ensure that we do not get any * stale suspended processes due to the purge abort. * Restore address pointer (erts_fun_purge_abort_prepare); * wait for thread progress; clear pending purge address * pointer (erts_fun_purge_abort_finalize), and then * resume processes that got suspended * (finalize_purge_operation). */ erts_schedule_thr_prgr_later_op(finalize_purge_abort, NULL, &purger_lop_data); erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL); ERTS_BIF_YIELD_RETURN(BIF_P, am_false); #endif } case am_complete: { ErtsCodeIndex code_ix; BeamInstr* code; Module* modp; int is_blocking = 0; Eterm ret; ErtsLiteralArea *literals = NULL; /* * We have no direct references into the code. * Complete to purge. */ if (purge_state.module != BIF_ARG_1) BIF_ERROR(BIF_P, BADARG); if (!erts_try_seize_code_write_permission(BIF_P)) { ERTS_BIF_YIELD2(bif_export[BIF_erts_internal_purge_module_2], BIF_P, BIF_ARG_1, BIF_ARG_2); } code_ix = erts_active_code_ix(); /* * Correct module? */ if ((modp = erts_get_module(BIF_ARG_1, code_ix)) == NULL) { ERTS_BIF_PREP_RET(ret, am_false); } else { erts_rwlock_old_code(code_ix); /* * Any code to purge? */ if (!modp->old.code_hdr) { ERTS_BIF_PREP_RET(ret, am_false); } else { /* * Unload any NIF library */ if (modp->old.nif != NULL || IF_HIPE(hipe_purge_need_blocking(modp))) { /* ToDo: Do unload nif without blocking */ erts_rwunlock_old_code(code_ix); erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_smp_thr_progress_block(); is_blocking = 1; erts_rwlock_old_code(code_ix); if (modp->old.nif) { erts_unload_nif(modp->old.nif); modp->old.nif = NULL; } } /* * Remove the old code. */ ASSERT(erts_total_code_size >= modp->old.code_length); erts_total_code_size -= modp->old.code_length; code = (BeamInstr*) modp->old.code_hdr; erts_fun_purge_complete(purge_state.funs, purge_state.fe_ix); beam_catches_delmod(modp->old.catches, code, modp->old.code_length, code_ix); literals = modp->old.code_hdr->literal_area; modp->old.code_hdr->literal_area = NULL; erts_free(ERTS_ALC_T_CODE, (void *) code); modp->old.code_hdr = NULL; modp->old.code_length = 0; modp->old.catches = BEAM_CATCHES_NIL; erts_remove_from_ranges(code); #ifdef HIPE hipe_purge_module(modp, is_blocking); #endif ERTS_BIF_PREP_RET(ret, am_true); } if (purge_state.saved_old.code_hdr) { modp->old = purge_state.saved_old; purge_state.saved_old.code_hdr = 0; } erts_rwunlock_old_code(code_ix); } if (is_blocking) { erts_smp_thr_progress_unblock(); erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); } erts_release_code_write_permission(); finalize_purge_operation(BIF_P, ret == am_true); if (literals) { ErtsLiteralAreaRef *ref; ref = erts_alloc(ERTS_ALC_T_LITERAL_REF, sizeof(ErtsLiteralAreaRef)); ref->literal_area = literals; ref->next = NULL; erts_smp_mtx_lock(&release_literal_areas.mtx); if (release_literal_areas.last) { release_literal_areas.last->next = ref; release_literal_areas.last = ref; } else { release_literal_areas.first = ref; release_literal_areas.last = ref; } erts_smp_mtx_unlock(&release_literal_areas.mtx); erts_queue_message(erts_literal_area_collector, 0, erts_alloc_message(0, NULL), am_copy_literals, BIF_P->common.id); } return ret; } default: BIF_ERROR(BIF_P, BADARG); } }