/* Return value as a bif, called by erlang:monitor */ Eterm erts_ddll_monitor_driver(Process *p, Eterm description, ErtsProcLocks plocks) { Eterm *tp; Eterm ret; char *name; if (is_not_tuple(description)) { BIF_ERROR(p,BADARG); } tp = tuple_val(description); if (*tp != make_arityval(2)) { BIF_ERROR(p,BADARG); } if ((name = pick_list_or_atom(tp[1])) == NULL) { BIF_ERROR(p,BADARG); } switch (tp[2]) { case am_loaded: ERTS_BIF_PREP_RET(ret, notify_when_loaded(p,tp[1],name,plocks)); break; case am_unloaded: ERTS_BIF_PREP_RET(ret, notify_when_unloaded(p,tp[1],name,plocks, ERL_DE_PROC_AWAIT_UNLOAD)); break; case am_unloaded_only: ERTS_BIF_PREP_RET(ret, notify_when_unloaded(p,tp[1],name,plocks, ERL_DE_PROC_AWAIT_UNLOAD_ONLY)); break; default: ERTS_BIF_PREP_ERROR(ret,p,BADARG); break; } erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name); return ret; }
/* Do the actualy module purging and return: * true for success * false if no such old module * BADARG if not an atom */ BIF_RETTYPE erts_internal_purge_module_1(BIF_ALIST_1) { ErtsCodeIndex code_ix; BeamInstr* code; BeamInstr* end; Module* modp; int is_blocking = 0; Eterm ret; if (is_not_atom(BIF_ARG_1)) { BIF_ERROR(BIF_P, BADARG); } if (!erts_try_seize_code_write_permission(BIF_P)) { ERTS_BIF_YIELD1(bif_export[BIF_erts_internal_purge_module_1], BIF_P, BIF_ARG_1); } code_ix = erts_active_code_ix(); /* * Correct module? */ if ((modp = erts_get_module(BIF_ARG_1, code_ix)) == NULL) { ERTS_BIF_PREP_RET(ret, am_false); } else { erts_rwlock_old_code(code_ix); /* * Any code to purge? */ if (!modp->old.code_hdr) { ERTS_BIF_PREP_RET(ret, am_false); } else { /* * Unload any NIF library */ if (modp->old.nif != NULL) { /* ToDo: Do unload nif without blocking */ erts_rwunlock_old_code(code_ix); erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_smp_thr_progress_block(); is_blocking = 1; erts_rwlock_old_code(code_ix); erts_unload_nif(modp->old.nif); modp->old.nif = NULL; } /* * Remove the old code. */ ASSERT(erts_total_code_size >= modp->old.code_length); erts_total_code_size -= modp->old.code_length; code = (BeamInstr*) modp->old.code_hdr; end = (BeamInstr *)((char *)code + modp->old.code_length); erts_cleanup_funs_on_purge(code, end); beam_catches_delmod(modp->old.catches, code, modp->old.code_length, code_ix); decrement_refc(modp->old.code_hdr); if (modp->old.code_hdr->literals_start) { erts_free(ERTS_ALC_T_LITERAL, modp->old.code_hdr->literals_start); } erts_free(ERTS_ALC_T_CODE, (void *) code); modp->old.code_hdr = NULL; modp->old.code_length = 0; modp->old.catches = BEAM_CATCHES_NIL; erts_remove_from_ranges(code); ERTS_BIF_PREP_RET(ret, am_true); } erts_rwunlock_old_code(code_ix); } if (is_blocking) { erts_smp_thr_progress_unblock(); erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); } erts_release_code_write_permission(); return ret; }
static BIF_RETTYPE dirty_test(Process *c_p, Eterm type, Eterm arg1, Eterm arg2, UWord *I) { BIF_RETTYPE ret; if (am_scheduler == arg1) { ErtsSchedulerData *esdp; if (arg2 != am_type) goto badarg; esdp = erts_proc_sched_data(c_p); if (!esdp) goto scheduler_type_error; switch (esdp->type) { case ERTS_SCHED_NORMAL: ERTS_BIF_PREP_RET(ret, am_normal); break; case ERTS_SCHED_DIRTY_CPU: ERTS_BIF_PREP_RET(ret, am_dirty_cpu); break; case ERTS_SCHED_DIRTY_IO: ERTS_BIF_PREP_RET(ret, am_dirty_io); break; default: scheduler_type_error: ERTS_BIF_PREP_RET(ret, am_error); break; } } else if (am_error == arg1) { switch (arg2) { case am_notsup: ERTS_BIF_PREP_ERROR(ret, c_p, EXC_NOTSUP); break; case am_undef: ERTS_BIF_PREP_ERROR(ret, c_p, EXC_UNDEF); break; case am_badarith: ERTS_BIF_PREP_ERROR(ret, c_p, EXC_BADARITH); break; case am_noproc: ERTS_BIF_PREP_ERROR(ret, c_p, EXC_NOPROC); break; case am_system_limit: ERTS_BIF_PREP_ERROR(ret, c_p, SYSTEM_LIMIT); break; case am_badarg: default: goto badarg; } } else if (am_copy == arg1) { int i; Eterm res; for (res = NIL, i = 0; i < 1000; i++) { Eterm *hp, sz; Eterm cpy; /* We do not want this to be optimized, but rather the oposite... */ sz = size_object(arg2); hp = HAlloc(c_p, sz); cpy = copy_struct(arg2, sz, &hp, &c_p->off_heap); hp = HAlloc(c_p, 2); res = CONS(hp, cpy, res); } ERTS_BIF_PREP_RET(ret, res); } else if (am_send == arg1) { dirty_send_message(c_p, arg2, am_ok); ERTS_BIF_PREP_RET(ret, am_ok); } else if (ERTS_IS_ATOM_STR("wait", arg1)) { if (!ms_wait(c_p, arg2, type == am_dirty_cpu)) goto badarg; ERTS_BIF_PREP_RET(ret, am_ok); } else if (ERTS_IS_ATOM_STR("reschedule", arg1)) { /* * Reschedule operation after decrement of two until we reach * zero. Switch between dirty scheduler types when 'n' is * evenly divided by 4. If the initial value wasn't evenly * dividable by 2, throw badarg exception. */ Eterm next_type; Sint n; if (!term_to_Sint(arg2, &n) || n < 0) goto badarg; if (n == 0) ERTS_BIF_PREP_RET(ret, am_ok); else { Eterm argv[3]; Eterm eint = erts_make_integer((Uint) (n - 2), c_p); if (n % 4 != 0) next_type = type; else { switch (type) { case am_dirty_cpu: next_type = am_dirty_io; break; case am_dirty_io: next_type = am_normal; break; case am_normal: next_type = am_dirty_cpu; break; default: goto badarg; } } switch (next_type) { case am_dirty_io: argv[0] = arg1; argv[1] = eint; ret = erts_schedule_bif(c_p, argv, I, erts_debug_dirty_io_2, ERTS_SCHED_DIRTY_IO, am_erts_debug, am_dirty_io, 2); break; case am_dirty_cpu: argv[0] = arg1; argv[1] = eint; ret = erts_schedule_bif(c_p, argv, I, erts_debug_dirty_cpu_2, ERTS_SCHED_DIRTY_CPU, am_erts_debug, am_dirty_cpu, 2); break; case am_normal: argv[0] = am_normal; argv[1] = arg1; argv[2] = eint; ret = erts_schedule_bif(c_p, argv, I, erts_debug_dirty_3, ERTS_SCHED_NORMAL, am_erts_debug, am_dirty, 3); break; default: goto badarg; } } } else if (ERTS_IS_ATOM_STR("ready_wait6_done", arg1)) { ERTS_DECL_AM(ready); ERTS_DECL_AM(done); dirty_send_message(c_p, arg2, AM_ready); ms_wait(c_p, make_small(6000), 0); dirty_send_message(c_p, arg2, AM_done); ERTS_BIF_PREP_RET(ret, am_ok); } else if (ERTS_IS_ATOM_STR("alive_waitexiting", arg1)) { Process *real_c_p = erts_proc_shadow2real(c_p); Eterm *hp, *hp2; Uint sz; int i; ErtsSchedulerData *esdp = erts_proc_sched_data(c_p); int dirty_io = esdp->type == ERTS_SCHED_DIRTY_IO; if (ERTS_PROC_IS_EXITING(real_c_p)) goto badarg; dirty_send_message(c_p, arg2, am_alive); /* Wait until dead */ while (!ERTS_PROC_IS_EXITING(real_c_p)) { if (dirty_io) ms_wait(c_p, make_small(100), 0); else erts_thr_yield(); } ms_wait(c_p, make_small(1000), 0); /* Should still be able to allocate memory */ hp = HAlloc(c_p, 3); /* Likely on heap */ sz = 10000; hp2 = HAlloc(c_p, sz); /* Likely in heap fragment */ *hp2 = make_pos_bignum_header(sz); for (i = 1; i < sz; i++) hp2[i] = (Eterm) 4711; ERTS_BIF_PREP_RET(ret, TUPLE2(hp, am_ok, make_big(hp2))); } else { badarg: ERTS_BIF_PREP_ERROR(ret, c_p, BADARG); } return ret; }
BIF_RETTYPE erts_internal_port_command_3(BIF_ALIST_3) { BIF_RETTYPE res; Port *prt; int flags = 0; Eterm ref; if (is_not_nil(BIF_ARG_3)) { Eterm l = BIF_ARG_3; while (is_list(l)) { Eterm* cons = list_val(l); Eterm car = CAR(cons); if (car == am_force) flags |= ERTS_PORT_SIG_FLG_FORCE; else if (car == am_nosuspend) flags |= ERTS_PORT_SIG_FLG_NOSUSPEND; else BIF_RET(am_badarg); l = CDR(cons); } if (!is_nil(l)) BIF_RET(am_badarg); } prt = sig_lookup_port(BIF_P, BIF_ARG_1); if (!prt) BIF_RET(am_badarg); if (flags & ERTS_PORT_SIG_FLG_FORCE) { if (!(prt->drv_ptr->flags & ERL_DRV_FLAG_SOFT_BUSY)) BIF_RET(am_notsup); } #ifdef DEBUG ref = NIL; #endif switch (erts_port_output(BIF_P, flags, prt, prt->common.id, BIF_ARG_2, &ref)) { case ERTS_PORT_OP_CALLER_EXIT: case ERTS_PORT_OP_BADARG: case ERTS_PORT_OP_DROPPED: ERTS_BIF_PREP_RET(res, am_badarg); break; case ERTS_PORT_OP_BUSY: ASSERT(!(flags & ERTS_PORT_SIG_FLG_FORCE)); if (flags & ERTS_PORT_SIG_FLG_NOSUSPEND) ERTS_BIF_PREP_RET(res, am_false); else { erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, prt); ERTS_BIF_PREP_YIELD3(res, bif_export[BIF_erts_internal_port_command_3], BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3); } break; case ERTS_PORT_OP_BUSY_SCHEDULED: ASSERT(!(flags & ERTS_PORT_SIG_FLG_FORCE)); /* Fall through... */ case ERTS_PORT_OP_SCHEDULED: ASSERT(is_internal_ordinary_ref(ref)); ERTS_BIF_PREP_RET(res, ref); break; case ERTS_PORT_OP_DONE: ERTS_BIF_PREP_RET(res, am_true); break; default: ERTS_INTERNAL_ERROR("Unexpected erts_port_output() result"); break; } if (ERTS_PROC_IS_EXITING(BIF_P)) { KILL_CATCHES(BIF_P); /* Must exit */ ERTS_BIF_PREP_ERROR(res, BIF_P, EXC_ERROR); } return res; }
BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2) { if (BIF_P != erts_code_purger) BIF_ERROR(BIF_P, EXC_NOTSUP); if (is_not_atom(BIF_ARG_1)) BIF_ERROR(BIF_P, BADARG); switch (BIF_ARG_2) { case am_prepare: case am_prepare_on_load: { /* * Prepare for purge by marking all fun * entries referring to the code to purge * with "pending purge" markers. */ ErtsCodeIndex code_ix; Module* modp; Eterm res; if (is_value(purge_state.module)) BIF_ERROR(BIF_P, BADARG); code_ix = erts_active_code_ix(); /* * Correct module? */ modp = erts_get_module(BIF_ARG_1, code_ix); if (!modp) res = am_false; else { /* * Any code to purge? */ if (BIF_ARG_2 == am_prepare_on_load) { erts_rwlock_old_code(code_ix); } else { erts_rlock_old_code(code_ix); } if (BIF_ARG_2 == am_prepare_on_load) { ASSERT(modp->on_load); ASSERT(modp->on_load->code_hdr); purge_state.saved_old = modp->old; modp->old = *modp->on_load; erts_free(ERTS_ALC_T_PREPARED_CODE, (void *) modp->on_load); modp->on_load = 0; } if (!modp->old.code_hdr) res = am_false; else { BeamInstr* code; BeamInstr* end; erts_smp_mtx_lock(&purge_state.mtx); purge_state.module = BIF_ARG_1; erts_smp_mtx_unlock(&purge_state.mtx); res = am_true; code = (BeamInstr*) modp->old.code_hdr; end = (BeamInstr *)((char *)code + modp->old.code_length); erts_fun_purge_prepare(code, end); } if (BIF_ARG_2 == am_prepare_on_load) { erts_rwunlock_old_code(code_ix); } else { erts_runlock_old_code(code_ix); } } #ifndef ERTS_SMP BIF_RET(res); #else if (res != am_true) BIF_RET(res); else { /* * We'll be resumed when all schedulers are guaranteed * to see the "pending purge" markers that we've made on * all fun entries of the code that we are about to purge. * Processes trying to call these funs will be suspended * before calling the funs. That is we are guaranteed not * to get any more direct references into the code while * checking for such references... */ erts_schedule_thr_prgr_later_op(resume_purger, NULL, &purger_lop_data); erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL); ERTS_BIF_YIELD_RETURN(BIF_P, am_true); } #endif } case am_abort: { /* * Soft purge that detected direct references into the code * we set out to purge. Abort the purge. */ if (purge_state.module != BIF_ARG_1) BIF_ERROR(BIF_P, BADARG); erts_fun_purge_abort_prepare(purge_state.funs, purge_state.fe_ix); #ifndef ERTS_SMP erts_fun_purge_abort_finalize(purge_state.funs, purge_state.fe_ix); finalize_purge_operation(BIF_P, 0); BIF_RET(am_false); #else /* * We need to restore the code addresses of the funs in * two stages in order to ensure that we do not get any * stale suspended processes due to the purge abort. * Restore address pointer (erts_fun_purge_abort_prepare); * wait for thread progress; clear pending purge address * pointer (erts_fun_purge_abort_finalize), and then * resume processes that got suspended * (finalize_purge_operation). */ erts_schedule_thr_prgr_later_op(finalize_purge_abort, NULL, &purger_lop_data); erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL); ERTS_BIF_YIELD_RETURN(BIF_P, am_false); #endif } case am_complete: { ErtsCodeIndex code_ix; BeamInstr* code; Module* modp; int is_blocking = 0; Eterm ret; ErtsLiteralArea *literals = NULL; /* * We have no direct references into the code. * Complete to purge. */ if (purge_state.module != BIF_ARG_1) BIF_ERROR(BIF_P, BADARG); if (!erts_try_seize_code_write_permission(BIF_P)) { ERTS_BIF_YIELD2(bif_export[BIF_erts_internal_purge_module_2], BIF_P, BIF_ARG_1, BIF_ARG_2); } code_ix = erts_active_code_ix(); /* * Correct module? */ if ((modp = erts_get_module(BIF_ARG_1, code_ix)) == NULL) { ERTS_BIF_PREP_RET(ret, am_false); } else { erts_rwlock_old_code(code_ix); /* * Any code to purge? */ if (!modp->old.code_hdr) { ERTS_BIF_PREP_RET(ret, am_false); } else { /* * Unload any NIF library */ if (modp->old.nif != NULL || IF_HIPE(hipe_purge_need_blocking(modp))) { /* ToDo: Do unload nif without blocking */ erts_rwunlock_old_code(code_ix); erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_smp_thr_progress_block(); is_blocking = 1; erts_rwlock_old_code(code_ix); if (modp->old.nif) { erts_unload_nif(modp->old.nif); modp->old.nif = NULL; } } /* * Remove the old code. */ ASSERT(erts_total_code_size >= modp->old.code_length); erts_total_code_size -= modp->old.code_length; code = (BeamInstr*) modp->old.code_hdr; erts_fun_purge_complete(purge_state.funs, purge_state.fe_ix); beam_catches_delmod(modp->old.catches, code, modp->old.code_length, code_ix); literals = modp->old.code_hdr->literal_area; modp->old.code_hdr->literal_area = NULL; erts_free(ERTS_ALC_T_CODE, (void *) code); modp->old.code_hdr = NULL; modp->old.code_length = 0; modp->old.catches = BEAM_CATCHES_NIL; erts_remove_from_ranges(code); #ifdef HIPE hipe_purge_module(modp, is_blocking); #endif ERTS_BIF_PREP_RET(ret, am_true); } if (purge_state.saved_old.code_hdr) { modp->old = purge_state.saved_old; purge_state.saved_old.code_hdr = 0; } erts_rwunlock_old_code(code_ix); } if (is_blocking) { erts_smp_thr_progress_unblock(); erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); } erts_release_code_write_permission(); finalize_purge_operation(BIF_P, ret == am_true); if (literals) { ErtsLiteralAreaRef *ref; ref = erts_alloc(ERTS_ALC_T_LITERAL_REF, sizeof(ErtsLiteralAreaRef)); ref->literal_area = literals; ref->next = NULL; erts_smp_mtx_lock(&release_literal_areas.mtx); if (release_literal_areas.last) { release_literal_areas.last->next = ref; release_literal_areas.last = ref; } else { release_literal_areas.first = ref; release_literal_areas.last = ref; } erts_smp_mtx_unlock(&release_literal_areas.mtx); erts_queue_message(erts_literal_area_collector, 0, erts_alloc_message(0, NULL), am_copy_literals, BIF_P->common.id); } return ret; } default: BIF_ERROR(BIF_P, BADARG); } }
BIF_RETTYPE erts_internal_open_port_2(BIF_ALIST_2) { BIF_RETTYPE ret; Port *port; Eterm res; char *str; int err_type, err_num; ErtsLinkData *ldp; ErtsLink *lnk; port = open_port(BIF_P, BIF_ARG_1, BIF_ARG_2, &err_type, &err_num); if (!port) { if (err_type == -3) { ASSERT(err_num == BADARG || err_num == SYSTEM_LIMIT); if (err_num == BADARG) res = am_badarg; else if (err_num == SYSTEM_LIMIT) res = am_system_limit; else /* this is only here to silence gcc, it should not happen */ BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR); } else if (err_type == -2) { str = erl_errno_id(err_num); res = erts_atom_put((byte *) str, sys_strlen(str), ERTS_ATOM_ENC_LATIN1, 1); } else { res = am_einval; } BIF_RET(res); } ldp = erts_link_create(ERTS_LNK_TYPE_PORT, BIF_P->common.id, port->common.id); ASSERT(ldp->a.other.item == port->common.id); ASSERT(ldp->b.other.item == BIF_P->common.id); /* * This link should not already be present, but can potentially * due to id wrapping... */ lnk = erts_link_tree_lookup_insert(&ERTS_P_LINKS(BIF_P), &ldp->a); erts_link_tree_insert(&ERTS_P_LINKS(port), &ldp->b); if (port->drv_ptr->flags & ERL_DRV_FLAG_USE_INIT_ACK) { /* Copied from erl_port_task.c */ port->async_open_port = erts_alloc(ERTS_ALC_T_PRTSD, sizeof(*port->async_open_port)); erts_make_ref_in_array(port->async_open_port->ref); port->async_open_port->to = BIF_P->common.id; /* * We unconditionaly *must* do a receive on a message * containing the reference after this... */ ERTS_RECV_MARK_SAVE(BIF_P); ERTS_RECV_MARK_SET(BIF_P); res = erts_proc_store_ref(BIF_P, port->async_open_port->ref); } else { res = port->common.id; } if (IS_TRACED_FL(BIF_P, F_TRACE_PROCS)) trace_proc(BIF_P, ERTS_PROC_LOCK_MAIN, BIF_P, am_link, port->common.id); ERTS_BIF_PREP_RET(ret, res); erts_port_release(port); if (lnk) erts_link_release(lnk); return ret; }
static BIF_RETTYPE do_port_command(Process *BIF_P, Eterm arg1, Eterm arg2, Eterm arg3, Uint32 flags) { BIF_RETTYPE res; Port *p; /* Trace sched out before lock check wait */ if (IS_TRACED_FL(BIF_P, F_TRACE_SCHED_PROCS)) { trace_virtual_sched(BIF_P, am_out); } if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) { profile_runnable_proc(BIF_P, am_inactive); } p = id_or_name2port(BIF_P, arg1); if (!p) { if (IS_TRACED_FL(BIF_P, F_TRACE_SCHED_PROCS)) { trace_virtual_sched(BIF_P, am_in); } if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) { profile_runnable_proc(BIF_P, am_active); } BIF_ERROR(BIF_P, BADARG); } /* Trace port in, id_or_name2port causes wait */ if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) { trace_sched_ports_where(p, am_in, am_command); } if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(p)) { profile_runnable_port(p, am_active); } ERTS_BIF_PREP_RET(res, am_true); if ((flags & ERTS_PORT_COMMAND_FLAG_FORCE) && !(p->drv_ptr->flags & ERL_DRV_FLAG_SOFT_BUSY)) { ERTS_BIF_PREP_ERROR(res, BIF_P, EXC_NOTSUP); } else if (!(flags & ERTS_PORT_COMMAND_FLAG_FORCE) && p->status & ERTS_PORT_SFLG_PORT_BUSY) { if (flags & ERTS_PORT_COMMAND_FLAG_NOSUSPEND) { ERTS_BIF_PREP_RET(res, am_false); } else { erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, p); if (erts_system_monitor_flags.busy_port) { monitor_generic(BIF_P, am_busy_port, p->id); } ERTS_BIF_PREP_YIELD3(res, bif_export[BIF_port_command_3], BIF_P, arg1, arg2, arg3); } } else { int wres; erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); ERTS_SMP_CHK_NO_PROC_LOCKS; wres = erts_write_to_port(BIF_P->id, p, arg2); erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); if (wres != 0) { ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG); } } if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) { trace_sched_ports_where(p, am_out, am_command); } if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(p)) { profile_runnable_port(p, am_inactive); } erts_port_release(p); /* Trace sched in after port release */ if (IS_TRACED_FL(BIF_P, F_TRACE_SCHED_PROCS)) { trace_virtual_sched(BIF_P, am_in); } if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) { profile_runnable_proc(BIF_P, am_active); } if (ERTS_PROC_IS_EXITING(BIF_P)) { KILL_CATCHES(BIF_P); /* Must exit */ ERTS_BIF_PREP_ERROR(res, BIF_P, EXC_ERROR); } return res; }