Exemple #1
0
BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2)
{
    Module* modp = erts_get_module(BIF_ARG_1);
    Eterm on_load;

    if (!modp || modp->code == 0) {
    error:
	BIF_ERROR(BIF_P, BADARG);
    }
    if ((on_load = modp->code[MI_ON_LOAD_FUNCTION_PTR]) == 0) {
	goto error;
    }
    if (BIF_ARG_2 != am_false && BIF_ARG_2 != am_true) {
	goto error;
    }

    erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
    erts_smp_thr_progress_block();

    if (BIF_ARG_2 == am_true) {
	int i;

	/*
	 * The on_load function succeded. Fix up export entries.
	 */
	for (i = 0; i < export_list_size(); i++) {
	    Export *ep = export_list(i);
	    if (ep != NULL &&
		ep->code[0] == BIF_ARG_1 &&
		ep->code[4] != 0) {
		ep->address = (void *) ep->code[4];
		ep->code[4] = 0;
	    }
	}
	modp->code[MI_ON_LOAD_FUNCTION_PTR] = 0;
	set_default_trace_pattern(BIF_ARG_1);
    } else if (BIF_ARG_2 == am_false) {
	BeamInstr* code;
	BeamInstr* end;

	/*
	 * The on_load function failed. Remove the loaded code.
	 * This is an combination of delete and purge. We purge
	 * the current code; the old code is not touched.
	 */
	erts_total_code_size -= modp->code_length;
	code = modp->code;
	end = (BeamInstr *)((char *)code + modp->code_length);
	erts_cleanup_funs_on_purge(code, end);
	beam_catches_delmod(modp->catches, code, modp->code_length);
	erts_free(ERTS_ALC_T_CODE, (void *) code);
	modp->code = NULL;
	modp->code_length = 0;
	modp->catches = BEAM_CATCHES_NIL;
	remove_from_address_table(code);
    }
    erts_smp_thr_progress_unblock();
    erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
    BIF_RET(am_true);
}
Exemple #2
0
static Eterm
staging_epilogue(Process* c_p, int commit, Eterm res, int is_blocking,
		 struct m* loaded, int nloaded)
{    
#ifdef ERTS_SMP
    if (is_blocking || !commit)
#endif
    {
	if (commit) {
	    erts_end_staging_code_ix();
	    erts_commit_staging_code_ix();
	    if (loaded) {
		int i;
		for (i=0; i < nloaded; i++) {		
		    set_default_trace_pattern(loaded[i].module);
		}
	    }
	}
	else {
	    erts_abort_staging_code_ix();
	}
	if (loaded) {
	    erts_free(ERTS_ALC_T_LOADER_TMP, loaded);
	}
	if (is_blocking) {
	    erts_smp_thr_progress_unblock();
	    erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
	}
	erts_release_code_write_permission();
	return res;
    }
#ifdef ERTS_SMP
    else {
	ErtsThrPrgrVal later;
	ASSERT(is_value(res));

	if (loaded) {
	    erts_free(ERTS_ALC_T_LOADER_TMP, loaded);
	}
	erts_end_staging_code_ix();
	/*
	 * Now we must wait for all schedulers to do a memory barrier before
	 * we can activate and let them access the new staged code. This allows
	 * schedulers to read active code_ix in a safe way while executing
	 * without any memory barriers at all. 
	 */
    
	later = erts_thr_progress_later(); 
	erts_thr_progress_wakeup(c_p->scheduler_data, later);
	erts_notify_code_ix_activation(c_p, later);
	erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
	/*
	 * handle_code_ix_activation() will do the rest "later"
	 * and resume this process to return 'res'.  
	 */
	ERTS_BIF_YIELD_RETURN(c_p, res);
    }
#endif
}
Exemple #3
0
BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2)
{
    ErtsCodeIndex code_ix;
    Module* modp;

    if (!erts_try_seize_code_write_permission(BIF_P)) {
	ERTS_BIF_YIELD2(bif_export[BIF_finish_after_on_load_2],
			BIF_P, BIF_ARG_1, BIF_ARG_2);
    }

    /* ToDo: Use code_ix staging instead of thread blocking */

    erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
    erts_smp_thr_progress_block();

    code_ix = erts_active_code_ix();
    modp = erts_get_module(BIF_ARG_1, code_ix);

    if (!modp || !modp->old.code_hdr) {
    error:
	erts_smp_thr_progress_unblock();
        erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
	erts_release_code_write_permission();
	BIF_ERROR(BIF_P, BADARG);
    }
    if (modp->old.code_hdr->on_load_function_ptr == NULL) {
	goto error;
    }
    if (BIF_ARG_2 != am_false && BIF_ARG_2 != am_true) {
	goto error;
    }

    if (BIF_ARG_2 == am_true) {
	int i;
	struct erl_module_instance t;

	/*
	 * Swap old and new code.
	 */
	t = modp->curr;
	modp->curr = modp->old;
	modp->old = t;

	/*
	 * The on_load function succeded. Fix up export entries.
	 */
	for (i = 0; i < export_list_size(code_ix); i++) {
	    Export *ep = export_list(i,code_ix);
	    if (ep == NULL || ep->code[0] != BIF_ARG_1) {
		continue;
	    }
	    if (ep->code[4] != 0) {
		ep->addressv[code_ix] = (void *) ep->code[4];
		ep->code[4] = 0;
	    } else {
		if (ep->addressv[code_ix] == ep->code+3 &&
		    ep->code[3] == (BeamInstr) em_apply_bif) {
		    continue;
		}
		ep->addressv[code_ix] = ep->code+3;
		ep->code[3] = (BeamInstr) em_call_error_handler;
	    }
	}
	modp->curr.code_hdr->on_load_function_ptr = NULL;
	set_default_trace_pattern(BIF_ARG_1);
    } else if (BIF_ARG_2 == am_false) {
	int i;

	/*
	 * The on_load function failed. Remove references to the
	 * code that is about to be purged from the export entries.
	 */

	for (i = 0; i < export_list_size(code_ix); i++) {
	    Export *ep = export_list(i,code_ix);
	    if (ep == NULL || ep->code[0] != BIF_ARG_1) {
		continue;
	    }
	    if (ep->code[3] == (BeamInstr) em_apply_bif) {
		continue;
	    }
	    ep->code[4] = 0;
	}
    }
    erts_smp_thr_progress_unblock();
    erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
    erts_release_code_write_permission();
    BIF_RET(am_true);
}
Exemple #4
0
BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2)
{
    ErtsCodeIndex code_ix;
    Module* modp;
    Eterm on_load;

    if (!erts_try_seize_code_write_permission(BIF_P)) {
	ERTS_BIF_YIELD2(bif_export[BIF_finish_after_on_load_2],
			BIF_P, BIF_ARG_1, BIF_ARG_2);
    }

    /* ToDo: Use code_ix staging instead of thread blocking */

    erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
    erts_smp_thr_progress_block();

    code_ix = erts_active_code_ix();
    modp = erts_get_module(BIF_ARG_1, code_ix);

    if (!modp || modp->curr.code == 0) {
    error:
	erts_smp_thr_progress_unblock();
        erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
	erts_release_code_write_permission();
	BIF_ERROR(BIF_P, BADARG);
    }
    if ((on_load = modp->curr.code[MI_ON_LOAD_FUNCTION_PTR]) == 0) {
	goto error;
    }
    if (BIF_ARG_2 != am_false && BIF_ARG_2 != am_true) {
	goto error;
    }

    if (BIF_ARG_2 == am_true) {
	int i;

	/*
	 * The on_load function succeded. Fix up export entries.
	 */
	for (i = 0; i < export_list_size(code_ix); i++) {
	    Export *ep = export_list(i,code_ix);
	    if (ep != NULL &&
		ep->code[0] == BIF_ARG_1 &&
		ep->code[4] != 0) {
		ep->addressv[code_ix] = (void *) ep->code[4];
		ep->code[4] = 0;
	    }
	}
	modp->curr.code[MI_ON_LOAD_FUNCTION_PTR] = 0;
	set_default_trace_pattern(BIF_ARG_1);
    } else if (BIF_ARG_2 == am_false) {
	BeamInstr* code;
	BeamInstr* end;

	/*
	 * The on_load function failed. Remove the loaded code.
	 * This is an combination of delete and purge. We purge
	 * the current code; the old code is not touched.
	 */
	erts_total_code_size -= modp->curr.code_length;
	code = modp->curr.code;
	end = (BeamInstr *)((char *)code + modp->curr.code_length);
	erts_cleanup_funs_on_purge(code, end);
	beam_catches_delmod(modp->curr.catches, code, modp->curr.code_length,
			    erts_active_code_ix());
	erts_free(ERTS_ALC_T_CODE, (void *) code);
	modp->curr.code = NULL;
	modp->curr.code_length = 0;
	modp->curr.catches = BEAM_CATCHES_NIL;
	erts_remove_from_ranges(code);
    }
    erts_smp_thr_progress_unblock();
    erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
    erts_release_code_write_permission();
    BIF_RET(am_true);
}
Exemple #5
0
Eterm
load_module_2(BIF_ALIST_2)
{
    Eterm   reason;
    Eterm*  hp;
    int      i;
    int      sz;
    byte*    code;
    Eterm res;
    byte* temp_alloc = NULL;

    if (is_not_atom(BIF_ARG_1)) {
    error:
	erts_free_aligned_binary_bytes(temp_alloc);
	BIF_ERROR(BIF_P, BADARG);
    }
    if ((code = erts_get_aligned_binary_bytes(BIF_ARG_2, &temp_alloc)) == NULL) {
	goto error;
    }
    erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
    erts_smp_thr_progress_block();

    hp = HAlloc(BIF_P, 3);
    sz = binary_size(BIF_ARG_2);
    if ((i = erts_load_module(BIF_P, 0,
			      BIF_P->group_leader, &BIF_ARG_1, code, sz)) < 0) { 
	switch (i) {
	case -1: reason = am_badfile; break; 
	case -2: reason = am_nofile; break;
	case -3: reason = am_not_purged; break;
	case -4:
	    reason = am_atom_put("native_code", sizeof("native_code")-1);
	    break;
	case -5:
	    {
		/*
		 * The module contains an on_load function. The loader
		 * has loaded the module as usual, except that the
		 * export entries does not point into the module, so it
		 * is not possible to call any code in the module.
		 */

		ERTS_DECL_AM(on_load);
		reason = AM_on_load;
		break;
	    }
	default: reason = am_badfile; break;
	}
	res = TUPLE2(hp, am_error, reason);
	goto done;
    }

    set_default_trace_pattern(BIF_ARG_1);
    res = TUPLE2(hp, am_module, BIF_ARG_1);

 done:
    erts_free_aligned_binary_bytes(temp_alloc);
    erts_smp_thr_progress_unblock();
    erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);

    BIF_RET(res);
}
Exemple #6
0
BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2)
{
    ErtsCodeIndex code_ix;
    Module* modp;

    if (!erts_try_seize_code_write_permission(BIF_P)) {
	ERTS_BIF_YIELD2(bif_export[BIF_finish_after_on_load_2],
			BIF_P, BIF_ARG_1, BIF_ARG_2);
    }

    /* ToDo: Use code_ix staging instead of thread blocking */

    erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
    erts_smp_thr_progress_block();

    code_ix = erts_active_code_ix();
    modp = erts_get_module(BIF_ARG_1, code_ix);

    if (!modp || !modp->on_load || !modp->on_load->code_hdr) {
    error:
	erts_smp_thr_progress_unblock();
        erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
	erts_release_code_write_permission();
	BIF_ERROR(BIF_P, BADARG);
    }
    if (modp->on_load->code_hdr->on_load_function_ptr == NULL) {
	goto error;
    }
    if (BIF_ARG_2 != am_false && BIF_ARG_2 != am_true) {
	goto error;
    }

    if (BIF_ARG_2 == am_true) {
	int i;

	/*
	 * Make the code with the on_load function current.
	 */

	if (modp->curr.code_hdr) {
	    modp->old = modp->curr;
	}
	modp->curr = *modp->on_load;
	erts_free(ERTS_ALC_T_PREPARED_CODE, modp->on_load);
	modp->on_load = 0;

	/*
	 * The on_load function succeded. Fix up export entries.
	 */
	for (i = 0; i < export_list_size(code_ix); i++) {
	    Export *ep = export_list(i,code_ix);
	    if (ep == NULL || ep->info.mfa.module != BIF_ARG_1) {
		continue;
	    }
	    if (ep->beam[1] != 0) {
		ep->addressv[code_ix] = (void *) ep->beam[1];
		ep->beam[1] = 0;
	    } else {
		if (ep->addressv[code_ix] == ep->beam &&
		    ep->beam[0] == (BeamInstr) em_apply_bif) {
		    continue;
		}
		ep->addressv[code_ix] = ep->beam;
		ep->beam[0] = (BeamInstr) em_call_error_handler;
	    }
	}
	modp->curr.code_hdr->on_load_function_ptr = NULL;
	set_default_trace_pattern(BIF_ARG_1);
      #ifdef HIPE
        hipe_redirect_to_module(modp);
      #endif
    } else if (BIF_ARG_2 == am_false) {
	int i;

	/*
	 * The on_load function failed. Remove references to the
	 * code that is about to be purged from the export entries.
	 */

	for (i = 0; i < export_list_size(code_ix); i++) {
	    Export *ep = export_list(i,code_ix);
	    if (ep == NULL || ep->info.mfa.module != BIF_ARG_1) {
		continue;
	    }
	    if (ep->beam[0] == (BeamInstr) em_apply_bif) {
		continue;
	    }
	    ep->beam[1] = 0;
	}
    }
    erts_smp_thr_progress_unblock();
    erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
    erts_release_code_write_permission();
    BIF_RET(am_true);
}
Exemple #7
0
static Eterm
staging_epilogue(Process* c_p, int commit, Eterm res, int is_blocking,
		 struct m* mods, int nmods, int free_mods)
{    
#ifdef ERTS_SMP
    if (is_blocking || !commit)
#endif
    {
	if (commit) {
	    int i;
	    erts_end_staging_code_ix();
	    erts_commit_staging_code_ix();

	    for (i=0; i < nmods; i++) {
		if (mods[i].modp->curr.code_hdr) {
		    set_default_trace_pattern(mods[i].module);
		}
	      #ifdef HIPE
		hipe_redirect_to_module(mods[i].modp);
	      #endif
	    }
	}
	else {
	    erts_abort_staging_code_ix();
	}
	if (free_mods) {
	    erts_free(ERTS_ALC_T_LOADER_TMP, mods);
	}
	if (is_blocking) {
	    erts_smp_thr_progress_unblock();
	    erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
	}
	erts_release_code_write_permission();
	return res;
    }
#ifdef ERTS_SMP
    else {
	ASSERT(is_value(res));

	if (free_mods) {
	    erts_free(ERTS_ALC_T_LOADER_TMP, mods);
	}
	erts_end_staging_code_ix();
	/*
	 * Now we must wait for all schedulers to do a memory barrier before
	 * we can commit and let them access the new staged code. This allows
	 * schedulers to read active code_ix in a safe way while executing
	 * without any memory barriers at all. 
	 */
	ASSERT(committer_state.stager == NULL);
	committer_state.stager = c_p;
	erts_schedule_thr_prgr_later_op(smp_code_ix_commiter, NULL, &committer_state.lop);
	erts_proc_inc_refc(c_p);
	erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
	/*
	 * smp_code_ix_commiter() will do the rest "later"
	 * and resume this process to return 'res'.  
	 */
	ERTS_BIF_YIELD_RETURN(c_p, res);
    }
#endif
}