BIF_RETTYPE erts_internal_copy_literals_2(BIF_ALIST_2) { ErtsCodeIndex code_ix; Eterm res = am_true; if (is_not_atom(BIF_ARG_1) || (am_true != BIF_ARG_2 && am_false != BIF_ARG_2)) { BIF_ERROR(BIF_P, BADARG); } if (!erts_try_seize_code_write_permission(BIF_P)) { ERTS_BIF_YIELD2(bif_export[BIF_erts_internal_copy_literals_2], BIF_P, BIF_ARG_1, BIF_ARG_2); } code_ix = erts_active_code_ix(); if (BIF_ARG_2 == am_true) { Module* modp = erts_get_module(BIF_ARG_1, code_ix); if (!modp || !modp->old.code_hdr) { res = am_false; goto done; } if (erts_clrange.ptr != NULL && !(BIF_P->static_flags & ERTS_STC_FLG_SYSTEM_PROC)) { res = am_aborted; goto done; } erts_clrange.ptr = modp->old.code_hdr->literals_start; erts_clrange.sz = modp->old.code_hdr->literals_end - erts_clrange.ptr; erts_clrange.pid = BIF_P->common.id; } else if (BIF_ARG_2 == am_false) { if (erts_clrange.pid != BIF_P->common.id) { res = am_false; goto done; } erts_clrange.ptr = NULL; erts_clrange.sz = 0; erts_clrange.pid = THE_NON_VALUE; } #ifdef ERTS_SMP ASSERT(committer_state.stager == NULL); committer_state.stager = BIF_P; erts_schedule_thr_prgr_later_op(copy_literals_commit, NULL, &committer_state.lop); erts_proc_inc_refc(BIF_P); erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL); ERTS_BIF_YIELD_RETURN(BIF_P, am_true); #endif done: erts_release_code_write_permission(); BIF_RET(res); }
Eterm check_process_code_2(BIF_ALIST_2) { Process* rp; Module* modp; if (is_not_atom(BIF_ARG_2)) { goto error; } if (is_internal_pid(BIF_ARG_1)) { Eterm res; if (internal_pid_index(BIF_ARG_1) >= erts_max_processes) goto error; modp = erts_get_module(BIF_ARG_2); if (modp == NULL) { /* Doesn't exist. */ return am_false; } else if (modp->old_code == NULL) { /* No old code. */ return am_false; } #ifdef ERTS_SMP rp = erts_pid2proc_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, BIF_ARG_1, ERTS_PROC_LOCK_MAIN); #else rp = erts_pid2proc(BIF_P, 0, BIF_ARG_1, 0); #endif if (!rp) { BIF_RET(am_false); } if (rp == ERTS_PROC_LOCK_BUSY) { ERTS_BIF_YIELD2(bif_export[BIF_check_process_code_2], BIF_P, BIF_ARG_1, BIF_ARG_2); } res = check_process_code(rp, modp); #ifdef ERTS_SMP if (BIF_P != rp) { erts_resume(rp, ERTS_PROC_LOCK_MAIN); erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN); } #endif BIF_RET(res); } else if (is_external_pid(BIF_ARG_1) && external_pid_dist_entry(BIF_ARG_1) == erts_this_dist_entry) { BIF_RET(am_false); } error: BIF_ERROR(BIF_P, BADARG); }
Eterm check_process_code_2(BIF_ALIST_2) { Process* rp; Module* modp; if (is_not_atom(BIF_ARG_2)) { goto error; } if (is_internal_pid(BIF_ARG_1)) { Eterm res; if (internal_pid_index(BIF_ARG_1) >= erts_max_processes) goto error; rp = erts_pid2proc_not_running(BIF_P, ERTS_PROC_LOCK_MAIN, BIF_ARG_1, ERTS_PROC_LOCK_MAIN); if (!rp) { BIF_RET(am_false); } if (rp == ERTS_PROC_LOCK_BUSY) { ERTS_BIF_YIELD2(bif_export[BIF_check_process_code_2], BIF_P, BIF_ARG_1, BIF_ARG_2); } modp = erts_get_module(BIF_ARG_2); res = check_process_code(rp, modp); #ifdef ERTS_SMP if (BIF_P != rp) erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN); #endif BIF_RET(res); } else if (is_external_pid(BIF_ARG_1) && external_pid_dist_entry(BIF_ARG_1) == erts_this_dist_entry) { BIF_RET(am_false); } error: BIF_ERROR(BIF_P, BADARG); }
BIF_RETTYPE erts_internal_check_dirty_process_code_2(BIF_ALIST_2) { #if !defined(ERTS_DIRTY_SCHEDULERS) BIF_ERROR(BIF_P, EXC_NOTSUP); #else Process *rp; int reds = 0; Eterm res; if (BIF_P != erts_dirty_process_code_checker) BIF_ERROR(BIF_P, EXC_NOTSUP); if (is_not_internal_pid(BIF_ARG_1)) BIF_ERROR(BIF_P, BADARG); if (is_not_atom(BIF_ARG_2)) BIF_ERROR(BIF_P, BADARG); rp = erts_pid2proc_not_running(BIF_P, ERTS_PROC_LOCK_MAIN, BIF_ARG_1, ERTS_PROC_LOCK_MAIN); if (rp == ERTS_PROC_LOCK_BUSY) ERTS_BIF_YIELD2(bif_export[BIF_erts_internal_check_dirty_process_code_2], BIF_P, BIF_ARG_1, BIF_ARG_2); if (!rp) BIF_RET(am_false); res = erts_check_process_code(rp, BIF_ARG_2, &reds, BIF_P->fcalls); if (BIF_P != rp) erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN); ASSERT(is_value(res)); BIF_RET2(res, reds); #endif }
BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2) { ErtsCodeIndex code_ix; Module* modp; if (!erts_try_seize_code_write_permission(BIF_P)) { ERTS_BIF_YIELD2(bif_export[BIF_finish_after_on_load_2], BIF_P, BIF_ARG_1, BIF_ARG_2); } /* ToDo: Use code_ix staging instead of thread blocking */ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_smp_thr_progress_block(); code_ix = erts_active_code_ix(); modp = erts_get_module(BIF_ARG_1, code_ix); if (!modp || !modp->old.code_hdr) { error: erts_smp_thr_progress_unblock(); erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_release_code_write_permission(); BIF_ERROR(BIF_P, BADARG); } if (modp->old.code_hdr->on_load_function_ptr == NULL) { goto error; } if (BIF_ARG_2 != am_false && BIF_ARG_2 != am_true) { goto error; } if (BIF_ARG_2 == am_true) { int i; struct erl_module_instance t; /* * Swap old and new code. */ t = modp->curr; modp->curr = modp->old; modp->old = t; /* * The on_load function succeded. Fix up export entries. */ for (i = 0; i < export_list_size(code_ix); i++) { Export *ep = export_list(i,code_ix); if (ep == NULL || ep->code[0] != BIF_ARG_1) { continue; } if (ep->code[4] != 0) { ep->addressv[code_ix] = (void *) ep->code[4]; ep->code[4] = 0; } else { if (ep->addressv[code_ix] == ep->code+3 && ep->code[3] == (BeamInstr) em_apply_bif) { continue; } ep->addressv[code_ix] = ep->code+3; ep->code[3] = (BeamInstr) em_call_error_handler; } } modp->curr.code_hdr->on_load_function_ptr = NULL; set_default_trace_pattern(BIF_ARG_1); } else if (BIF_ARG_2 == am_false) { int i; /* * The on_load function failed. Remove references to the * code that is about to be purged from the export entries. */ for (i = 0; i < export_list_size(code_ix); i++) { Export *ep = export_list(i,code_ix); if (ep == NULL || ep->code[0] != BIF_ARG_1) { continue; } if (ep->code[3] == (BeamInstr) em_apply_bif) { continue; } ep->code[4] = 0; } } erts_smp_thr_progress_unblock(); erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_release_code_write_permission(); BIF_RET(am_true); }
BIF_RETTYPE erts_debug_breakpoint_2(BIF_ALIST_2) { Process* p = BIF_P; Eterm MFA = BIF_ARG_1; Eterm boolean = BIF_ARG_2; Eterm* tp; ErtsCodeMFA mfa; int i; int specified = 0; Eterm res; BpFunctions f; if (boolean != am_true && boolean != am_false) goto error; if (is_not_tuple(MFA)) { goto error; } tp = tuple_val(MFA); if (*tp != make_arityval(3)) { goto error; } if (!is_atom(tp[1]) || !is_atom(tp[2]) || (!is_small(tp[3]) && tp[3] != am_Underscore)) { goto error; } for (i = 0; i < 3 && tp[i+1] != am_Underscore; i++, specified++) { /* Empty loop body */ } for (i = specified; i < 3; i++) { if (tp[i+1] != am_Underscore) { goto error; } } mfa.module = tp[1]; mfa.function = tp[2]; if (is_small(tp[3])) { mfa.arity = signed_val(tp[3]); } if (!erts_try_seize_code_write_permission(BIF_P)) { ERTS_BIF_YIELD2(bif_export[BIF_erts_debug_breakpoint_2], BIF_P, BIF_ARG_1, BIF_ARG_2); } erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN); erts_thr_progress_block(); erts_bp_match_functions(&f, &mfa, specified); if (boolean == am_true) { erts_set_debug_break(&f); erts_install_breakpoints(&f); erts_commit_staged_bp(); } else { erts_clear_debug_break(&f); erts_commit_staged_bp(); erts_uninstall_breakpoints(&f); } erts_consolidate_bp_data(&f, 1); res = make_small(f.matched); erts_bp_free_matched_functions(&f); erts_thr_progress_unblock(); erts_proc_lock(p, ERTS_PROC_LOCK_MAIN); erts_release_code_write_permission(); return res; error: BIF_ERROR(p, BADARG); }
BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2) { ErtsCodeIndex code_ix; Module* modp; Eterm on_load; if (!erts_try_seize_code_write_permission(BIF_P)) { ERTS_BIF_YIELD2(bif_export[BIF_finish_after_on_load_2], BIF_P, BIF_ARG_1, BIF_ARG_2); } /* ToDo: Use code_ix staging instead of thread blocking */ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_smp_thr_progress_block(); code_ix = erts_active_code_ix(); modp = erts_get_module(BIF_ARG_1, code_ix); if (!modp || modp->curr.code == 0) { error: erts_smp_thr_progress_unblock(); erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_release_code_write_permission(); BIF_ERROR(BIF_P, BADARG); } if ((on_load = modp->curr.code[MI_ON_LOAD_FUNCTION_PTR]) == 0) { goto error; } if (BIF_ARG_2 != am_false && BIF_ARG_2 != am_true) { goto error; } if (BIF_ARG_2 == am_true) { int i; /* * The on_load function succeded. Fix up export entries. */ for (i = 0; i < export_list_size(code_ix); i++) { Export *ep = export_list(i,code_ix); if (ep != NULL && ep->code[0] == BIF_ARG_1 && ep->code[4] != 0) { ep->addressv[code_ix] = (void *) ep->code[4]; ep->code[4] = 0; } } modp->curr.code[MI_ON_LOAD_FUNCTION_PTR] = 0; set_default_trace_pattern(BIF_ARG_1); } else if (BIF_ARG_2 == am_false) { BeamInstr* code; BeamInstr* end; /* * The on_load function failed. Remove the loaded code. * This is an combination of delete and purge. We purge * the current code; the old code is not touched. */ erts_total_code_size -= modp->curr.code_length; code = modp->curr.code; end = (BeamInstr *)((char *)code + modp->curr.code_length); erts_cleanup_funs_on_purge(code, end); beam_catches_delmod(modp->curr.catches, code, modp->curr.code_length, erts_active_code_ix()); erts_free(ERTS_ALC_T_CODE, (void *) code); modp->curr.code = NULL; modp->curr.code_length = 0; modp->curr.catches = BEAM_CATCHES_NIL; erts_remove_from_ranges(code); } erts_smp_thr_progress_unblock(); erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_release_code_write_permission(); BIF_RET(am_true); }
BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2) { ErtsCodeIndex code_ix; Module* modp; if (!erts_try_seize_code_write_permission(BIF_P)) { ERTS_BIF_YIELD2(bif_export[BIF_finish_after_on_load_2], BIF_P, BIF_ARG_1, BIF_ARG_2); } /* ToDo: Use code_ix staging instead of thread blocking */ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_smp_thr_progress_block(); code_ix = erts_active_code_ix(); modp = erts_get_module(BIF_ARG_1, code_ix); if (!modp || !modp->on_load || !modp->on_load->code_hdr) { error: erts_smp_thr_progress_unblock(); erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_release_code_write_permission(); BIF_ERROR(BIF_P, BADARG); } if (modp->on_load->code_hdr->on_load_function_ptr == NULL) { goto error; } if (BIF_ARG_2 != am_false && BIF_ARG_2 != am_true) { goto error; } if (BIF_ARG_2 == am_true) { int i; /* * Make the code with the on_load function current. */ if (modp->curr.code_hdr) { modp->old = modp->curr; } modp->curr = *modp->on_load; erts_free(ERTS_ALC_T_PREPARED_CODE, modp->on_load); modp->on_load = 0; /* * The on_load function succeded. Fix up export entries. */ for (i = 0; i < export_list_size(code_ix); i++) { Export *ep = export_list(i,code_ix); if (ep == NULL || ep->info.mfa.module != BIF_ARG_1) { continue; } if (ep->beam[1] != 0) { ep->addressv[code_ix] = (void *) ep->beam[1]; ep->beam[1] = 0; } else { if (ep->addressv[code_ix] == ep->beam && ep->beam[0] == (BeamInstr) em_apply_bif) { continue; } ep->addressv[code_ix] = ep->beam; ep->beam[0] = (BeamInstr) em_call_error_handler; } } modp->curr.code_hdr->on_load_function_ptr = NULL; set_default_trace_pattern(BIF_ARG_1); #ifdef HIPE hipe_redirect_to_module(modp); #endif } else if (BIF_ARG_2 == am_false) { int i; /* * The on_load function failed. Remove references to the * code that is about to be purged from the export entries. */ for (i = 0; i < export_list_size(code_ix); i++) { Export *ep = export_list(i,code_ix); if (ep == NULL || ep->info.mfa.module != BIF_ARG_1) { continue; } if (ep->beam[0] == (BeamInstr) em_apply_bif) { continue; } ep->beam[1] = 0; } } erts_smp_thr_progress_unblock(); erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_release_code_write_permission(); BIF_RET(am_true); }
BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2) { if (BIF_P != erts_code_purger) BIF_ERROR(BIF_P, EXC_NOTSUP); if (is_not_atom(BIF_ARG_1)) BIF_ERROR(BIF_P, BADARG); switch (BIF_ARG_2) { case am_prepare: case am_prepare_on_load: { /* * Prepare for purge by marking all fun * entries referring to the code to purge * with "pending purge" markers. */ ErtsCodeIndex code_ix; Module* modp; Eterm res; if (is_value(purge_state.module)) BIF_ERROR(BIF_P, BADARG); code_ix = erts_active_code_ix(); /* * Correct module? */ modp = erts_get_module(BIF_ARG_1, code_ix); if (!modp) res = am_false; else { /* * Any code to purge? */ if (BIF_ARG_2 == am_prepare_on_load) { erts_rwlock_old_code(code_ix); } else { erts_rlock_old_code(code_ix); } if (BIF_ARG_2 == am_prepare_on_load) { ASSERT(modp->on_load); ASSERT(modp->on_load->code_hdr); purge_state.saved_old = modp->old; modp->old = *modp->on_load; erts_free(ERTS_ALC_T_PREPARED_CODE, (void *) modp->on_load); modp->on_load = 0; } if (!modp->old.code_hdr) res = am_false; else { BeamInstr* code; BeamInstr* end; erts_smp_mtx_lock(&purge_state.mtx); purge_state.module = BIF_ARG_1; erts_smp_mtx_unlock(&purge_state.mtx); res = am_true; code = (BeamInstr*) modp->old.code_hdr; end = (BeamInstr *)((char *)code + modp->old.code_length); erts_fun_purge_prepare(code, end); } if (BIF_ARG_2 == am_prepare_on_load) { erts_rwunlock_old_code(code_ix); } else { erts_runlock_old_code(code_ix); } } #ifndef ERTS_SMP BIF_RET(res); #else if (res != am_true) BIF_RET(res); else { /* * We'll be resumed when all schedulers are guaranteed * to see the "pending purge" markers that we've made on * all fun entries of the code that we are about to purge. * Processes trying to call these funs will be suspended * before calling the funs. That is we are guaranteed not * to get any more direct references into the code while * checking for such references... */ erts_schedule_thr_prgr_later_op(resume_purger, NULL, &purger_lop_data); erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL); ERTS_BIF_YIELD_RETURN(BIF_P, am_true); } #endif } case am_abort: { /* * Soft purge that detected direct references into the code * we set out to purge. Abort the purge. */ if (purge_state.module != BIF_ARG_1) BIF_ERROR(BIF_P, BADARG); erts_fun_purge_abort_prepare(purge_state.funs, purge_state.fe_ix); #ifndef ERTS_SMP erts_fun_purge_abort_finalize(purge_state.funs, purge_state.fe_ix); finalize_purge_operation(BIF_P, 0); BIF_RET(am_false); #else /* * We need to restore the code addresses of the funs in * two stages in order to ensure that we do not get any * stale suspended processes due to the purge abort. * Restore address pointer (erts_fun_purge_abort_prepare); * wait for thread progress; clear pending purge address * pointer (erts_fun_purge_abort_finalize), and then * resume processes that got suspended * (finalize_purge_operation). */ erts_schedule_thr_prgr_later_op(finalize_purge_abort, NULL, &purger_lop_data); erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL); ERTS_BIF_YIELD_RETURN(BIF_P, am_false); #endif } case am_complete: { ErtsCodeIndex code_ix; BeamInstr* code; Module* modp; int is_blocking = 0; Eterm ret; ErtsLiteralArea *literals = NULL; /* * We have no direct references into the code. * Complete to purge. */ if (purge_state.module != BIF_ARG_1) BIF_ERROR(BIF_P, BADARG); if (!erts_try_seize_code_write_permission(BIF_P)) { ERTS_BIF_YIELD2(bif_export[BIF_erts_internal_purge_module_2], BIF_P, BIF_ARG_1, BIF_ARG_2); } code_ix = erts_active_code_ix(); /* * Correct module? */ if ((modp = erts_get_module(BIF_ARG_1, code_ix)) == NULL) { ERTS_BIF_PREP_RET(ret, am_false); } else { erts_rwlock_old_code(code_ix); /* * Any code to purge? */ if (!modp->old.code_hdr) { ERTS_BIF_PREP_RET(ret, am_false); } else { /* * Unload any NIF library */ if (modp->old.nif != NULL || IF_HIPE(hipe_purge_need_blocking(modp))) { /* ToDo: Do unload nif without blocking */ erts_rwunlock_old_code(code_ix); erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_smp_thr_progress_block(); is_blocking = 1; erts_rwlock_old_code(code_ix); if (modp->old.nif) { erts_unload_nif(modp->old.nif); modp->old.nif = NULL; } } /* * Remove the old code. */ ASSERT(erts_total_code_size >= modp->old.code_length); erts_total_code_size -= modp->old.code_length; code = (BeamInstr*) modp->old.code_hdr; erts_fun_purge_complete(purge_state.funs, purge_state.fe_ix); beam_catches_delmod(modp->old.catches, code, modp->old.code_length, code_ix); literals = modp->old.code_hdr->literal_area; modp->old.code_hdr->literal_area = NULL; erts_free(ERTS_ALC_T_CODE, (void *) code); modp->old.code_hdr = NULL; modp->old.code_length = 0; modp->old.catches = BEAM_CATCHES_NIL; erts_remove_from_ranges(code); #ifdef HIPE hipe_purge_module(modp, is_blocking); #endif ERTS_BIF_PREP_RET(ret, am_true); } if (purge_state.saved_old.code_hdr) { modp->old = purge_state.saved_old; purge_state.saved_old.code_hdr = 0; } erts_rwunlock_old_code(code_ix); } if (is_blocking) { erts_smp_thr_progress_unblock(); erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); } erts_release_code_write_permission(); finalize_purge_operation(BIF_P, ret == am_true); if (literals) { ErtsLiteralAreaRef *ref; ref = erts_alloc(ERTS_ALC_T_LITERAL_REF, sizeof(ErtsLiteralAreaRef)); ref->literal_area = literals; ref->next = NULL; erts_smp_mtx_lock(&release_literal_areas.mtx); if (release_literal_areas.last) { release_literal_areas.last->next = ref; release_literal_areas.last = ref; } else { release_literal_areas.first = ref; release_literal_areas.last = ref; } erts_smp_mtx_unlock(&release_literal_areas.mtx); erts_queue_message(erts_literal_area_collector, 0, erts_alloc_message(0, NULL), am_copy_literals, BIF_P->common.id); } return ret; } default: BIF_ERROR(BIF_P, BADARG); } }
BIF_RETTYPE persistent_term_put_2(BIF_ALIST_2) { static const Uint ITERATIONS_PER_RED = 32; ErtsPersistentTermPut2Context* ctx; Eterm state_mref = THE_NON_VALUE; long iterations_until_trap; long max_iterations; #define PUT_TRAP_CODE \ BIF_TRAP2(bif_export[BIF_persistent_term_put_2], BIF_P, state_mref, BIF_ARG_2) #define TRAPPING_COPY_TABLE_PUT(TABLE_DEST, OLD_TABLE, NEW_SIZE, COPY_TYPE, LOC_NAME) \ TRAPPING_COPY_TABLE(TABLE_DEST, OLD_TABLE, NEW_SIZE, COPY_TYPE, LOC_NAME, PUT_TRAP_CODE) #ifdef DEBUG (void)ITERATIONS_PER_RED; iterations_until_trap = max_iterations = GET_SMALL_RANDOM_INT(ERTS_BIF_REDS_LEFT(BIF_P) + (Uint)&ctx); #else iterations_until_trap = max_iterations = ITERATIONS_PER_RED * ERTS_BIF_REDS_LEFT(BIF_P); #endif if (is_internal_magic_ref(BIF_ARG_1) && (ERTS_MAGIC_BIN_DESTRUCTOR(erts_magic_ref2bin(BIF_ARG_1)) == persistent_term_put_2_ctx_bin_dtor)) { /* Restore state after a trap */ Binary* state_bin; state_mref = BIF_ARG_1; state_bin = erts_magic_ref2bin(state_mref); ctx = ERTS_MAGIC_BIN_DATA(state_bin); ASSERT(BIF_P->flags & F_DISABLE_GC); erts_set_gc_state(BIF_P, 1); switch (ctx->trap_location) { case PUT2_TRAP_LOCATION_NEW_KEY: goto L_PUT2_TRAP_LOCATION_NEW_KEY; case PUT2_TRAP_LOCATION_REPLACE_VALUE: goto L_PUT2_TRAP_LOCATION_REPLACE_VALUE; } } else { /* Save state in magic bin in case trapping is necessary */ Eterm* hp; Binary* state_bin = erts_create_magic_binary(sizeof(ErtsPersistentTermPut2Context), persistent_term_put_2_ctx_bin_dtor); hp = HAlloc(BIF_P, ERTS_MAGIC_REF_THING_SIZE); state_mref = erts_mk_magic_ref(&hp, &MSO(BIF_P), state_bin); ctx = ERTS_MAGIC_BIN_DATA(state_bin); /* * IMPORTANT: The following field is used to detect if * persistent_term_put_2_ctx_bin_dtor needs to free memory */ ctx->cpy_ctx.new_table = NULL; } if (!try_seize_update_permission(BIF_P)) { ERTS_BIF_YIELD2(bif_export[BIF_persistent_term_put_2], BIF_P, BIF_ARG_1, BIF_ARG_2); } ctx->hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table); ctx->key = BIF_ARG_1; ctx->term = BIF_ARG_2; ctx->entry_index = lookup(ctx->hash_table, ctx->key); ctx->heap[0] = make_arityval(2); ctx->heap[1] = ctx->key; ctx->heap[2] = ctx->term; ctx->tuple = make_tuple(ctx->heap); if (is_nil(ctx->hash_table->term[ctx->entry_index])) { Uint new_size = ctx->hash_table->allocated; if (MUST_GROW(ctx->hash_table)) { new_size *= 2; } TRAPPING_COPY_TABLE_PUT(ctx->hash_table, ctx->hash_table, new_size, ERTS_PERSISTENT_TERM_CPY_NO_REHASH, PUT2_TRAP_LOCATION_NEW_KEY); ctx->entry_index = lookup(ctx->hash_table, ctx->key); ctx->hash_table->num_entries++; } else { Eterm tuple = ctx->hash_table->term[ctx->entry_index]; Eterm old_term; ASSERT(is_tuple_arity(tuple, 2)); old_term = boxed_val(tuple)[2]; if (EQ(ctx->term, old_term)) { /* Same value. No need to update anything. */ release_update_permission(0); BIF_RET(am_ok); } else { /* Mark the old term for deletion. */ mark_for_deletion(ctx->hash_table, ctx->entry_index); TRAPPING_COPY_TABLE_PUT(ctx->hash_table, ctx->hash_table, ctx->hash_table->allocated, ERTS_PERSISTENT_TERM_CPY_NO_REHASH, PUT2_TRAP_LOCATION_REPLACE_VALUE); } } { Uint term_size; Uint lit_area_size; ErlOffHeap code_off_heap; ErtsLiteralArea* literal_area; erts_shcopy_t info; Eterm* ptr; /* * Preserve internal sharing in the term by using the * sharing-preserving functions. However, literals must * be copied in case the module holding them are unloaded. */ INITIALIZE_SHCOPY(info); info.copy_literals = 1; term_size = copy_shared_calculate(ctx->tuple, &info); ERTS_INIT_OFF_HEAP(&code_off_heap); lit_area_size = ERTS_LITERAL_AREA_ALLOC_SIZE(term_size); literal_area = erts_alloc(ERTS_ALC_T_LITERAL, lit_area_size); ptr = &literal_area->start[0]; literal_area->end = ptr + term_size; ctx->tuple = copy_shared_perform(ctx->tuple, term_size, &info, &ptr, &code_off_heap); ASSERT(tuple_val(ctx->tuple) == literal_area->start); literal_area->off_heap = code_off_heap.first; DESTROY_SHCOPY(info); erts_set_literal_tag(&ctx->tuple, literal_area->start, term_size); ctx->hash_table->term[ctx->entry_index] = ctx->tuple; erts_schedule_thr_prgr_later_op(table_updater, ctx->hash_table, &thr_prog_op); suspend_updater(BIF_P); } BUMP_REDS(BIF_P, (max_iterations - iterations_until_trap) / ITERATIONS_PER_RED); ERTS_BIF_YIELD_RETURN(BIF_P, am_ok); }