/* * Register a process or port (can't be registered twice). * Returns 0 if name, process or port is already registered. * * When smp support is enabled: * * Assumes that main lock is locked (and only main lock) * on c_p. * */ int erts_register_name(Process *c_p, Eterm name, Eterm id) { int res = 0; Process *proc = NULL; Port *port = NULL; RegProc r, *rp; ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p); if (is_not_atom(name) || name == am_undefined) return res; if (c_p->common.id == id) /* A very common case I think... */ proc = c_p; else { if (is_not_internal_pid(id) && is_not_internal_port(id)) return res; erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); if (is_internal_port(id)) { port = erts_id2port(id); if (!port) goto done; } } #ifdef ERTS_SMP { ErtsProcLocks proc_locks = proc ? ERTS_PROC_LOCK_MAIN : 0; reg_safe_write_lock(proc, &proc_locks); if (proc && !proc_locks) erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); } #endif if (is_internal_pid(id)) { if (!proc) proc = erts_pid2proc(NULL, 0, id, ERTS_PROC_LOCK_MAIN); r.p = proc; if (!proc) goto done; if (proc->common.u.alive.reg) goto done; r.pt = NULL; } else { ASSERT(!INVALID_PORT(port, id)); ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(port)); r.pt = port; if (r.pt->common.u.alive.reg) goto done; r.p = NULL; } r.name = name; rp = (RegProc*) hash_put(&process_reg, (void*) &r); if (proc && rp->p == proc) { if (IS_TRACED_FL(proc, F_TRACE_PROCS)) { trace_proc(proc, ERTS_PROC_LOCK_MAIN, proc, am_register, name); } proc->common.u.alive.reg = rp; } else if (port && rp->pt == port) { if (IS_TRACED_FL(port, F_TRACE_PORTS)) { trace_port(port, am_register, name); } port->common.u.alive.reg = rp; } if ((rp->p && rp->p->common.id == id) || (rp->pt && rp->pt->common.id == id)) { res = 1; } done: reg_write_unlock(); if (port) erts_port_release(port); if (c_p != proc) { if (proc) erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN); erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); } return res; }
/* Add a message last in message queue */ void erts_queue_message(Process* receiver, ErtsProcLocks *receiver_locks, ErlHeapFragment* bp, Eterm message, Eterm seq_trace_token) { ErlMessage* mp; #ifdef ERTS_SMP ErtsProcLocks need_locks; #else ASSERT(bp != NULL || receiver->mbuf == NULL); #endif ERTS_SMP_LC_ASSERT(*receiver_locks == erts_proc_lc_my_proc_locks(receiver)); mp = message_alloc(); #ifdef ERTS_SMP need_locks = ~(*receiver_locks) & (ERTS_PROC_LOCK_MSGQ | ERTS_PROC_LOCK_STATUS); if (need_locks) { *receiver_locks |= need_locks; if (erts_smp_proc_trylock(receiver, need_locks) == EBUSY) { if (need_locks == ERTS_PROC_LOCK_MSGQ) { erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_STATUS); need_locks = (ERTS_PROC_LOCK_MSGQ | ERTS_PROC_LOCK_STATUS); } erts_smp_proc_lock(receiver, need_locks); } } if (receiver->is_exiting || ERTS_PROC_PENDING_EXIT(receiver)) { /* Drop message if receiver is exiting or has a pending * exit ... */ if (bp) free_message_buffer(bp); message_free(mp); return; } #endif ERL_MESSAGE_TERM(mp) = message; ERL_MESSAGE_TOKEN(mp) = seq_trace_token; mp->next = NULL; #ifdef ERTS_SMP if (*receiver_locks & ERTS_PROC_LOCK_MAIN) { mp->data.heap_frag = bp; /* * We move 'in queue' to 'private queue' and place * message at the end of 'private queue' in order * to ensure that the 'in queue' doesn't contain * references into the heap. By ensuring this, * we don't need to include the 'in queue' in * the root set when garbage collecting. */ ERTS_SMP_MSGQ_MV_INQ2PRIVQ(receiver); LINK_MESSAGE_PRIVQ(receiver, mp); } else { mp->data.heap_frag = bp; LINK_MESSAGE(receiver, mp); } #else mp->data.heap_frag = bp; LINK_MESSAGE(receiver, mp); #endif notify_new_message(receiver); if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)) { trace_receive(receiver, message); } #ifndef ERTS_SMP ERTS_HOLE_CHECK(receiver); #endif }
BIF_RETTYPE erts_debug_breakpoint_2(BIF_ALIST_2) { Process* p = BIF_P; Eterm MFA = BIF_ARG_1; Eterm boolean = BIF_ARG_2; Eterm* tp; Eterm mfa[3]; int i; int specified = 0; Eterm res; BpFunctions f; if (boolean != am_true && boolean != am_false) goto error; if (is_not_tuple(MFA)) { goto error; } tp = tuple_val(MFA); if (*tp != make_arityval(3)) { goto error; } mfa[0] = tp[1]; mfa[1] = tp[2]; mfa[2] = tp[3]; if (!is_atom(mfa[0]) || !is_atom(mfa[1]) || (!is_small(mfa[2]) && mfa[2] != am_Underscore)) { goto error; } for (i = 0; i < 3 && mfa[i] != am_Underscore; i++, specified++) { /* Empty loop body */ } for (i = specified; i < 3; i++) { if (mfa[i] != am_Underscore) { goto error; } } if (is_small(mfa[2])) { mfa[2] = signed_val(mfa[2]); } if (!erts_try_seize_code_write_permission(BIF_P)) { ERTS_BIF_YIELD2(bif_export[BIF_erts_debug_breakpoint_2], BIF_P, BIF_ARG_1, BIF_ARG_2); } erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN); erts_smp_thr_progress_block(); erts_bp_match_functions(&f, mfa, specified); if (boolean == am_true) { erts_set_debug_break(&f); erts_install_breakpoints(&f); erts_commit_staged_bp(); } else { erts_clear_debug_break(&f); erts_commit_staged_bp(); erts_uninstall_breakpoints(&f); } erts_consolidate_bp_data(&f, 1); res = make_small(f.matched); erts_bp_free_matched_functions(&f); erts_smp_thr_progress_unblock(); erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); erts_release_code_write_permission(); return res; error: BIF_ERROR(p, BADARG); }
/* Add messages last in message queue */ static Sint queue_messages(Process *c_p, Process* receiver, erts_aint32_t *receiver_state, ErtsProcLocks *receiver_locks, ErtsMessage* first, ErtsMessage** last, Uint len) { Sint res; int locked_msgq = 0; erts_aint32_t state; ASSERT(is_value(ERL_MESSAGE_TERM(first))); ASSERT(ERL_MESSAGE_TOKEN(first) == am_undefined || ERL_MESSAGE_TOKEN(first) == NIL || is_tuple(ERL_MESSAGE_TOKEN(first))); #ifdef ERTS_SMP #ifdef ERTS_ENABLE_LOCK_CHECK ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(receiver) < ERTS_PROC_LOCK_MSGQ || *receiver_locks == erts_proc_lc_my_proc_locks(receiver)); #endif if (!(*receiver_locks & ERTS_PROC_LOCK_MSGQ)) { if (erts_smp_proc_trylock(receiver, ERTS_PROC_LOCK_MSGQ) == EBUSY) { ErtsProcLocks need_locks = ERTS_PROC_LOCK_MSGQ; if (receiver_state) state = *receiver_state; else state = erts_smp_atomic32_read_nob(&receiver->state); if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) goto exiting; if (*receiver_locks & ERTS_PROC_LOCK_STATUS) { erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_STATUS); need_locks |= ERTS_PROC_LOCK_STATUS; } erts_smp_proc_lock(receiver, need_locks); } locked_msgq = 1; } #endif state = erts_smp_atomic32_read_nob(&receiver->state); if (state & (ERTS_PSFLG_PENDING_EXIT|ERTS_PSFLG_EXITING)) { #ifdef ERTS_SMP exiting: #endif /* Drop message if receiver is exiting or has a pending exit... */ if (locked_msgq) erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ); erts_cleanup_messages(first); return 0; } res = receiver->msg.len; #ifdef ERTS_SMP if (*receiver_locks & ERTS_PROC_LOCK_MAIN) { /* * We move 'in queue' to 'private queue' and place * message at the end of 'private queue' in order * to ensure that the 'in queue' doesn't contain * references into the heap. By ensuring this, * we don't need to include the 'in queue' in * the root set when garbage collecting. */ res += receiver->msg_inq.len; ERTS_SMP_MSGQ_MV_INQ2PRIVQ(receiver); LINK_MESSAGE_PRIVQ(receiver, first, last, len); } else #endif { LINK_MESSAGE(receiver, first, last, len); } if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)) { ErtsMessage *msg = first; #ifdef USE_VM_PROBES if (DTRACE_ENABLED(message_queued)) { DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE); Sint tok_label = 0; Sint tok_lastcnt = 0; Sint tok_serial = 0; Eterm seq_trace_token = ERL_MESSAGE_TOKEN(msg); dtrace_proc_str(receiver, receiver_name); if (seq_trace_token != NIL && is_tuple(seq_trace_token)) { tok_label = signed_val(SEQ_TRACE_T_LABEL(seq_trace_token)); tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(seq_trace_token)); tok_serial = signed_val(SEQ_TRACE_T_SERIAL(seq_trace_token)); } DTRACE6(message_queued, receiver_name, size_object(ERL_MESSAGE_TERM(msg)), receiver->msg.len, tok_label, tok_lastcnt, tok_serial); } #endif while (msg) { trace_receive(receiver, ERL_MESSAGE_TERM(msg)); msg = msg->next; } } if (locked_msgq) erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ); #ifdef ERTS_SMP erts_proc_notify_new_message(receiver, *receiver_locks); #else erts_proc_notify_new_message(receiver, 0); ERTS_HOLE_CHECK(receiver); #endif return res; }
static Port * open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump) { int i; Eterm option; Uint arity; Eterm* tp; Uint* nargs; erts_driver_t* driver; char* name_buf = NULL; SysDriverOpts opts; Sint linebuf; Eterm edir = NIL; byte dir[MAXPATHLEN]; erts_aint32_t sflgs = 0; Port *port; /* These are the defaults */ opts.packet_bytes = 0; opts.use_stdio = 1; opts.redir_stderr = 0; opts.read_write = 0; opts.hide_window = 0; opts.wd = NULL; opts.envir = NULL; opts.exit_status = 0; opts.overlapped_io = 0; opts.spawn_type = ERTS_SPAWN_ANY; opts.argv = NULL; opts.parallelism = erts_port_parallelism; linebuf = 0; *err_nump = 0; if (is_not_list(settings) && is_not_nil(settings)) { goto badarg; } /* * Parse the settings. */ if (is_not_nil(settings)) { nargs = list_val(settings); while (1) { if (is_tuple_arity(*nargs, 2)) { tp = tuple_val(*nargs); arity = *tp++; option = *tp++; if (option == am_packet) { if (is_not_small(*tp)) { goto badarg; } opts.packet_bytes = signed_val(*tp); switch (opts.packet_bytes) { case 1: case 2: case 4: break; default: goto badarg; } } else if (option == am_line) { if (is_not_small(*tp)) { goto badarg; } linebuf = signed_val(*tp); if (linebuf <= 0) { goto badarg; } } else if (option == am_env) { byte* bytes; if ((bytes = convert_environment(p, *tp)) == NULL) { goto badarg; } opts.envir = (char *) bytes; } else if (option == am_args) { char **av; char **oav = opts.argv; if ((av = convert_args(*tp)) == NULL) { goto badarg; } opts.argv = av; if (oav) { opts.argv[0] = oav[0]; oav[0] = erts_default_arg0; free_args(oav); } } else if (option == am_arg0) { char *a0; if ((a0 = erts_convert_filename_to_native(*tp, NULL, 0, ERTS_ALC_T_TMP, 1, 1, NULL)) == NULL) { goto badarg; } if (opts.argv == NULL) { opts.argv = erts_alloc(ERTS_ALC_T_TMP, 2 * sizeof(char **)); opts.argv[0] = a0; opts.argv[1] = NULL; } else { if (opts.argv[0] != erts_default_arg0) { erts_free(ERTS_ALC_T_TMP, opts.argv[0]); } opts.argv[0] = a0; } } else if (option == am_cd) { edir = *tp; } else if (option == am_parallelism) { if (*tp == am_true) opts.parallelism = 1; else if (*tp == am_false) opts.parallelism = 0; else goto badarg; } else { goto badarg; } } else if (*nargs == am_stream) { opts.packet_bytes = 0; } else if (*nargs == am_use_stdio) { opts.use_stdio = 1; } else if (*nargs == am_stderr_to_stdout) { opts.redir_stderr = 1; } else if (*nargs == am_line) { linebuf = 512; } else if (*nargs == am_nouse_stdio) { opts.use_stdio = 0; } else if (*nargs == am_binary) { sflgs |= ERTS_PORT_SFLG_BINARY_IO; } else if (*nargs == am_in) { opts.read_write |= DO_READ; } else if (*nargs == am_out) { opts.read_write |= DO_WRITE; } else if (*nargs == am_eof) { sflgs |= ERTS_PORT_SFLG_SOFT_EOF; } else if (*nargs == am_hide) { opts.hide_window = 1; } else if (*nargs == am_exit_status) { opts.exit_status = 1; } else if (*nargs == am_overlapped_io) { opts.overlapped_io = 1; } else { goto badarg; } if (is_nil(*++nargs)) break; if (is_not_list(*nargs)) { goto badarg; } nargs = list_val(*nargs); } } if (opts.read_write == 0) /* implement default */ opts.read_write = DO_READ|DO_WRITE; /* Mutually exclusive arguments. */ if((linebuf && opts.packet_bytes) || (opts.redir_stderr && !opts.use_stdio)) { goto badarg; } /* * Parse the first argument and start the appropriate driver. */ if (is_atom(name) || (i = is_string(name))) { /* a vanilla port */ if (is_atom(name)) { name_buf = (char *) erts_alloc(ERTS_ALC_T_TMP, atom_tab(atom_val(name))->len+1); sys_memcpy((void *) name_buf, (void *) atom_tab(atom_val(name))->name, atom_tab(atom_val(name))->len); name_buf[atom_tab(atom_val(name))->len] = '\0'; } else { name_buf = (char *) erts_alloc(ERTS_ALC_T_TMP, i + 1); if (intlist_to_buf(name, name_buf, i) != i) erl_exit(1, "%s:%d: Internal error\n", __FILE__, __LINE__); name_buf[i] = '\0'; } driver = &vanilla_driver; } else { if (is_not_tuple(name)) { goto badarg; /* Not a process or fd port */ } tp = tuple_val(name); arity = *tp++; if (arity == make_arityval(0)) { goto badarg; } if (*tp == am_spawn || *tp == am_spawn_driver || *tp == am_spawn_executable) { /* A process port */ int encoding; if (arity != make_arityval(2)) { goto badarg; } name = tp[1]; encoding = erts_get_native_filename_encoding(); /* Do not convert the command to utf-16le yet, do that in win32 specific code */ /* since the cmd is used for comparsion with drivers names and copied to port info */ if (encoding == ERL_FILENAME_WIN_WCHAR) { encoding = ERL_FILENAME_UTF8; } if ((name_buf = erts_convert_filename_to_encoding(name, NULL, 0, ERTS_ALC_T_TMP,0,1, encoding, NULL)) == NULL) { goto badarg; } if (*tp == am_spawn_driver) { opts.spawn_type = ERTS_SPAWN_DRIVER; } else if (*tp == am_spawn_executable) { opts.spawn_type = ERTS_SPAWN_EXECUTABLE; } driver = &spawn_driver; } else if (*tp == am_fd) { /* An fd port */ int n; struct Sint_buf sbuf; char* p; if (arity != make_arityval(3)) { goto badarg; } if (is_not_small(tp[1]) || is_not_small(tp[2])) { goto badarg; } opts.ifd = unsigned_val(tp[1]); opts.ofd = unsigned_val(tp[2]); /* Syntesize name from input and output descriptor. */ name_buf = erts_alloc(ERTS_ALC_T_TMP, 2*sizeof(struct Sint_buf) + 2); p = Sint_to_buf(opts.ifd, &sbuf); n = sys_strlen(p); sys_strncpy(name_buf, p, n); name_buf[n] = '/'; p = Sint_to_buf(opts.ofd, &sbuf); sys_strcpy(name_buf+n+1, p); driver = &fd_driver; } else { goto badarg; } } if ((driver != &spawn_driver && opts.argv != NULL) || (driver == &spawn_driver && opts.spawn_type != ERTS_SPAWN_EXECUTABLE && opts.argv != NULL)) { /* Argument vector only if explicit spawn_executable */ goto badarg; } if (edir != NIL) { if ((opts.wd = erts_convert_filename_to_native(edir, NULL, 0, ERTS_ALC_T_TMP,0,1,NULL)) == NULL) { goto badarg; } } if (driver != &spawn_driver && opts.exit_status) { goto badarg; } if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) { trace_virtual_sched(p, am_out); } erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN); port = erts_open_driver(driver, p->common.id, name_buf, &opts, err_typep, err_nump); #ifdef USE_VM_PROBES if (port && DTRACE_ENABLED(port_open)) { DTRACE_CHARBUF(process_str, DTRACE_TERM_BUF_SIZE); DTRACE_CHARBUF(port_str, DTRACE_TERM_BUF_SIZE); dtrace_proc_str(p, process_str); erts_snprintf(port_str, sizeof(port_str), "%T", port->common.id); DTRACE3(port_open, process_str, name_buf, port_str); } #endif erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); if (!port) { DEBUGF(("open_driver returned (%d:%d)\n", err_typep ? *err_typep : 4711, err_nump ? *err_nump : 4711)); if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) { trace_virtual_sched(p, am_in); } goto do_return; } if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) { trace_virtual_sched(p, am_in); } if (linebuf && port->linebuf == NULL){ port->linebuf = allocate_linebuf(linebuf); sflgs |= ERTS_PORT_SFLG_LINEBUF_IO; } if (sflgs) erts_atomic32_read_bor_relb(&port->state, sflgs); do_return: if (name_buf) erts_free(ERTS_ALC_T_TMP, (void *) name_buf); if (opts.argv) { free_args(opts.argv); } if (opts.wd && opts.wd != ((char *)dir)) { erts_free(ERTS_ALC_T_TMP, (void *) opts.wd); } return port; badarg: if (err_typep) *err_typep = -3; if (err_nump) *err_nump = BADARG; port = NULL; goto do_return; }
BIF_RETTYPE purge_module_1(BIF_ALIST_1) { ErtsCodeIndex code_ix; BeamInstr* code; BeamInstr* end; Module* modp; int is_blocking = 0; Eterm ret; if (is_not_atom(BIF_ARG_1)) { BIF_ERROR(BIF_P, BADARG); } if (!erts_try_seize_code_write_permission(BIF_P)) { ERTS_BIF_YIELD1(bif_export[BIF_purge_module_1], BIF_P, BIF_ARG_1); } code_ix = erts_active_code_ix(); /* * Correct module? */ if ((modp = erts_get_module(BIF_ARG_1, code_ix)) == NULL) { ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG); } else { erts_rwlock_old_code(code_ix); /* * Any code to purge? */ if (!modp->old.code_hdr) { ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG); } else { /* * Unload any NIF library */ if (modp->old.nif != NULL) { /* ToDo: Do unload nif without blocking */ erts_rwunlock_old_code(code_ix); erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_smp_thr_progress_block(); is_blocking = 1; erts_rwlock_old_code(code_ix); erts_unload_nif(modp->old.nif); modp->old.nif = NULL; } /* * Remove the old code. */ ASSERT(erts_total_code_size >= modp->old.code_length); erts_total_code_size -= modp->old.code_length; code = (BeamInstr*) modp->old.code_hdr; end = (BeamInstr *)((char *)code + modp->old.code_length); erts_cleanup_funs_on_purge(code, end); beam_catches_delmod(modp->old.catches, code, modp->old.code_length, code_ix); decrement_refc(modp->old.code_hdr); if (modp->old.code_hdr->literals_start) { erts_free(ERTS_ALC_T_LITERAL, modp->old.code_hdr->literals_start); } erts_free(ERTS_ALC_T_CODE, (void *) code); modp->old.code_hdr = NULL; modp->old.code_length = 0; modp->old.catches = BEAM_CATCHES_NIL; erts_remove_from_ranges(code); ERTS_BIF_PREP_RET(ret, am_true); } erts_rwunlock_old_code(code_ix); } if (is_blocking) { erts_smp_thr_progress_unblock(); erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); } erts_release_code_write_permission(); return ret; }
static Eterm check_process_code(Process* rp, Module* modp, int allow_gc, int *redsp) { BeamInstr* start; char* literals; Uint lit_bsize; char* mod_start; Uint mod_size; Eterm* sp; int done_gc = 0; int need_gc = 0; ErtsMessage *msgp; ErlHeapFragment *hfrag; #define ERTS_ORDINARY_GC__ (1 << 0) #define ERTS_LITERAL_GC__ (1 << 1) /* * Pick up limits for the module. */ start = (BeamInstr*) modp->old.code_hdr; mod_start = (char *) start; mod_size = modp->old.code_length; /* * Check if current instruction or continuation pointer points into module. */ if (ErtsInArea(rp->i, mod_start, mod_size) || ErtsInArea(rp->cp, mod_start, mod_size)) { return am_true; } /* * Check all continuation pointers stored on the stack. */ for (sp = rp->stop; sp < STACK_START(rp); sp++) { if (is_CP(*sp) && ErtsInArea(cp_val(*sp), mod_start, mod_size)) { return am_true; } } /* * Check all continuation pointers stored in stackdump * and clear exception stackdump if there is a pointer * to the module. */ if (rp->ftrace != NIL) { struct StackTrace *s; ASSERT(is_list(rp->ftrace)); s = (struct StackTrace *) big_val(CDR(list_val(rp->ftrace))); if ((s->pc && ErtsInArea(s->pc, mod_start, mod_size)) || (s->current && ErtsInArea(s->current, mod_start, mod_size))) { rp->freason = EXC_NULL; rp->fvalue = NIL; rp->ftrace = NIL; } else { int i; for (i = 0; i < s->depth; i++) { if (ErtsInArea(s->trace[i], mod_start, mod_size)) { rp->freason = EXC_NULL; rp->fvalue = NIL; rp->ftrace = NIL; break; } } } } if (rp->flags & F_DISABLE_GC) { /* * Cannot proceed. Process has disabled gc in order to * safely leave inconsistent data on the heap and/or * off heap lists. Need to wait for gc to be enabled * again. */ return THE_NON_VALUE; } /* * Message queue can contains funs, but (at least currently) no * constants. If we got references to this module from the message * queue, a GC cannot remove these... */ erts_smp_proc_lock(rp, ERTS_PROC_LOCK_MSGQ); ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp); erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MSGQ); for (msgp = rp->msg.first; msgp; msgp = msgp->next) { if (msgp->data.attached == ERTS_MSG_COMBINED_HFRAG) hfrag = &msgp->hfrag; else if (is_value(ERL_MESSAGE_TERM(msgp)) && msgp->data.heap_frag) hfrag = msgp->data.heap_frag; else continue; for (; hfrag; hfrag = hfrag->next) { if (check_mod_funs(rp, &hfrag->off_heap, mod_start, mod_size)) return am_true; /* Should not contain any constants... */ ASSERT(!any_heap_refs(&hfrag->mem[0], &hfrag->mem[hfrag->used_size], mod_start, mod_size)); } } literals = (char*) modp->old.code_hdr->literals_start; lit_bsize = (char*) modp->old.code_hdr->literals_end - literals; while (1) { /* Check heap, stack etc... */ if (check_mod_funs(rp, &rp->off_heap, mod_start, mod_size)) goto try_gc; if (any_heap_ref_ptrs(&rp->fvalue, &rp->fvalue+1, literals, lit_bsize)) { rp->freason = EXC_NULL; rp->fvalue = NIL; rp->ftrace = NIL; } if (any_heap_ref_ptrs(rp->stop, rp->hend, literals, lit_bsize)) goto try_literal_gc; if (any_heap_refs(rp->heap, rp->htop, literals, lit_bsize)) goto try_literal_gc; if (any_heap_refs(rp->old_heap, rp->old_htop, literals, lit_bsize)) goto try_literal_gc; /* Check dictionary */ if (rp->dictionary) { Eterm* start = rp->dictionary->data; Eterm* end = start + rp->dictionary->used; if (any_heap_ref_ptrs(start, end, literals, lit_bsize)) goto try_literal_gc; } /* Check heap fragments */ for (hfrag = rp->mbuf; hfrag; hfrag = hfrag->next) { Eterm *hp, *hp_end; /* Off heap lists should already have been moved into process */ ASSERT(!check_mod_funs(rp, &hfrag->off_heap, mod_start, mod_size)); hp = &hfrag->mem[0]; hp_end = &hfrag->mem[hfrag->used_size]; if (any_heap_refs(hp, hp_end, mod_start, lit_bsize)) goto try_literal_gc; } #ifdef DEBUG /* * Message buffer fragments should not have any references * to constants, and off heap lists should already have * been moved into process off heap structure. */ for (msgp = rp->msg_frag; msgp; msgp = msgp->next) { if (msgp->data.attached == ERTS_MSG_COMBINED_HFRAG) hfrag = &msgp->hfrag; else hfrag = msgp->data.heap_frag; for (; hfrag; hfrag = hfrag->next) { Eterm *hp, *hp_end; ASSERT(!check_mod_funs(rp, &hfrag->off_heap, mod_start, mod_size)); hp = &hfrag->mem[0]; hp_end = &hfrag->mem[hfrag->used_size]; ASSERT(!any_heap_refs(hp, hp_end, mod_start, lit_bsize)); } } #endif return am_false; try_literal_gc: need_gc |= ERTS_LITERAL_GC__; try_gc: need_gc |= ERTS_ORDINARY_GC__; if ((done_gc & need_gc) == need_gc) return am_true; if (!allow_gc) return am_aborted; need_gc &= ~done_gc; /* * Try to get rid of constants by by garbage collecting. * Clear both fvalue and ftrace. */ rp->freason = EXC_NULL; rp->fvalue = NIL; rp->ftrace = NIL; if (need_gc & ERTS_ORDINARY_GC__) { FLAGS(rp) |= F_NEED_FULLSWEEP; *redsp += erts_garbage_collect_nobump(rp, 0, rp->arg_reg, rp->arity); done_gc |= ERTS_ORDINARY_GC__; } if (need_gc & ERTS_LITERAL_GC__) { struct erl_off_heap_header* oh; oh = modp->old.code_hdr->literals_off_heap; *redsp += lit_bsize / 64; /* Need, better value... */ erts_garbage_collect_literals(rp, (Eterm*)literals, lit_bsize, oh); done_gc |= ERTS_LITERAL_GC__; } need_gc = 0; } #undef ERTS_ORDINARY_GC__ #undef ERTS_LITERAL_GC__ }
void erts_queue_dist_message(Process *rcvr, ErtsProcLocks *rcvr_locks, ErtsDistExternal *dist_ext, Eterm token) { ErlMessage* mp; #ifdef USE_VM_PROBES Sint tok_label = 0; Sint tok_lastcnt = 0; Sint tok_serial = 0; #endif #ifdef ERTS_SMP ErtsProcLocks need_locks; #endif ERTS_SMP_LC_ASSERT(*rcvr_locks == erts_proc_lc_my_proc_locks(rcvr)); mp = message_alloc(); #ifdef ERTS_SMP need_locks = ~(*rcvr_locks) & (ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); if (need_locks) { *rcvr_locks |= need_locks; if (erts_smp_proc_trylock(rcvr, need_locks) == EBUSY) { if (need_locks == ERTS_PROC_LOCK_MSGQ) { erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_STATUS); need_locks = (ERTS_PROC_LOCK_MSGQ | ERTS_PROC_LOCK_STATUS); } erts_smp_proc_lock(rcvr, need_locks); } } if (rcvr->is_exiting || ERTS_PROC_PENDING_EXIT(rcvr)) { /* Drop message if receiver is exiting or has a pending exit ... */ if (is_not_nil(token)) { ErlHeapFragment *heap_frag; heap_frag = erts_dist_ext_trailer(mp->data.dist_ext); erts_cleanup_offheap(&heap_frag->off_heap); } erts_free_dist_ext_copy(dist_ext); message_free(mp); } else #endif if (IS_TRACED_FL(rcvr, F_TRACE_RECEIVE)) { /* Ahh... need to decode it in order to trace it... */ ErlHeapFragment *mbuf; Eterm msg; message_free(mp); msg = erts_msg_distext2heap(rcvr, rcvr_locks, &mbuf, &token, dist_ext); if (is_value(msg)) #ifdef USE_VM_PROBES if (DTRACE_ENABLED(message_queued)) { DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE); dtrace_proc_str(rcvr, receiver_name); if (token != NIL && token != am_have_dt_utag) { tok_label = signed_val(SEQ_TRACE_T_LABEL(token)); tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(token)); tok_serial = signed_val(SEQ_TRACE_T_SERIAL(token)); } DTRACE6(message_queued, receiver_name, size_object(msg), rcvr->msg.len, tok_label, tok_lastcnt, tok_serial); } #endif erts_queue_message(rcvr, rcvr_locks, mbuf, msg, token #ifdef USE_VM_PROBES , NIL #endif ); } else { /* Enqueue message on external format */ ERL_MESSAGE_TERM(mp) = THE_NON_VALUE; #ifdef USE_VM_PROBES ERL_MESSAGE_DT_UTAG(mp) = NIL; if (token == am_have_dt_utag) { ERL_MESSAGE_TOKEN(mp) = NIL; } else { #endif ERL_MESSAGE_TOKEN(mp) = token; #ifdef USE_VM_PROBES } #endif mp->next = NULL; #ifdef USE_VM_PROBES if (DTRACE_ENABLED(message_queued)) { DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE); dtrace_proc_str(rcvr, receiver_name); if (token != NIL && token != am_have_dt_utag) { tok_label = signed_val(SEQ_TRACE_T_LABEL(token)); tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(token)); tok_serial = signed_val(SEQ_TRACE_T_SERIAL(token)); } /* * TODO: We don't know the real size of the external message here. * -1 will appear to a D script as 4294967295. */ DTRACE6(message_queued, receiver_name, -1, rcvr->msg.len + 1, tok_label, tok_lastcnt, tok_serial); } #endif mp->data.dist_ext = dist_ext; LINK_MESSAGE(rcvr, mp); notify_new_message(rcvr); } }
/* Add a message last in message queue */ void erts_queue_message(Process* receiver, ErtsProcLocks *receiver_locks, ErlHeapFragment* bp, Eterm message, Eterm seq_trace_token #ifdef USE_VM_PROBES , Eterm dt_utag #endif ) { ErlMessage* mp; #ifdef ERTS_SMP ErtsProcLocks need_locks; #else ASSERT(bp != NULL || receiver->mbuf == NULL); #endif ERTS_SMP_LC_ASSERT(*receiver_locks == erts_proc_lc_my_proc_locks(receiver)); mp = message_alloc(); #ifdef ERTS_SMP need_locks = ~(*receiver_locks) & (ERTS_PROC_LOCK_MSGQ | ERTS_PROC_LOCK_STATUS); if (need_locks) { *receiver_locks |= need_locks; if (erts_smp_proc_trylock(receiver, need_locks) == EBUSY) { if (need_locks == ERTS_PROC_LOCK_MSGQ) { erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_STATUS); need_locks = (ERTS_PROC_LOCK_MSGQ | ERTS_PROC_LOCK_STATUS); } erts_smp_proc_lock(receiver, need_locks); } } if (receiver->is_exiting || ERTS_PROC_PENDING_EXIT(receiver)) { /* Drop message if receiver is exiting or has a pending * exit ... */ if (bp) free_message_buffer(bp); message_free(mp); return; } #endif ERL_MESSAGE_TERM(mp) = message; ERL_MESSAGE_TOKEN(mp) = seq_trace_token; #ifdef USE_VM_PROBES ERL_MESSAGE_DT_UTAG(mp) = dt_utag; #endif mp->next = NULL; mp->data.heap_frag = bp; #ifdef ERTS_SMP if (*receiver_locks & ERTS_PROC_LOCK_MAIN) { /* * We move 'in queue' to 'private queue' and place * message at the end of 'private queue' in order * to ensure that the 'in queue' doesn't contain * references into the heap. By ensuring this, * we don't need to include the 'in queue' in * the root set when garbage collecting. */ ERTS_SMP_MSGQ_MV_INQ2PRIVQ(receiver); LINK_MESSAGE_PRIVQ(receiver, mp); } else { LINK_MESSAGE(receiver, mp); } #else LINK_MESSAGE(receiver, mp); #endif #ifdef USE_VM_PROBES if (DTRACE_ENABLED(message_queued)) { DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE); Sint tok_label = 0; Sint tok_lastcnt = 0; Sint tok_serial = 0; dtrace_proc_str(receiver, receiver_name); if (seq_trace_token != NIL && is_tuple(seq_trace_token)) { tok_label = signed_val(SEQ_TRACE_T_LABEL(seq_trace_token)); tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(seq_trace_token)); tok_serial = signed_val(SEQ_TRACE_T_SERIAL(seq_trace_token)); } DTRACE6(message_queued, receiver_name, size_object(message), receiver->msg.len, tok_label, tok_lastcnt, tok_serial); } #endif notify_new_message(receiver); if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)) { trace_receive(receiver, message); } #ifndef ERTS_SMP ERTS_HOLE_CHECK(receiver); #endif }
/* * Unregister a name * Return 0 if not registered * Otherwise returns 1 * */ int erts_unregister_name(Process *c_p, ErtsProcLocks c_p_locks, Port *c_prt, Eterm name) { int res = 0; RegProc r, *rp; Port *port = c_prt; #ifdef ERTS_SMP ErtsProcLocks current_c_p_locks; /* * SMP note: If 'c_prt != NULL' and 'c_prt->reg->name == name', * we are *not* allowed to temporarily release the lock * on c_prt. */ if (!c_p) c_p_locks = 0; current_c_p_locks = c_p_locks; restart: reg_safe_write_lock(c_p, ¤t_c_p_locks); #endif r.name = name; if ((rp = (RegProc*) hash_get(&process_reg, (void*) &r)) != NULL) { if (rp->pt) { #ifdef ERTS_SMP if (port != rp->pt) { if (port) { ERTS_SMP_LC_ASSERT(port != c_prt); erts_smp_port_unlock(port); port = NULL; } if (erts_smp_port_trylock(rp->pt) == EBUSY) { Eterm id = rp->pt->id; /* id read only... */ /* Unlock all locks, acquire port lock, and restart... */ if (current_c_p_locks) { erts_smp_proc_unlock(c_p, current_c_p_locks); current_c_p_locks = 0; } reg_read_unlock(); port = erts_id2port(id, NULL, 0); goto restart; } port = rp->pt; } #endif ERTS_SMP_LC_ASSERT(rp->pt == port && erts_lc_is_port_locked(port)); rp->pt->reg = NULL; if (IS_TRACED_FL(port, F_TRACE_PORTS)) { trace_port(port, am_unregister, name); } } else if (rp->p) { Process* p = rp->p; #ifdef ERTS_SMP erts_proc_safelock(c_p, current_c_p_locks, c_p_locks, rp->p, 0, ERTS_PROC_LOCK_MAIN); current_c_p_locks = c_p_locks; #endif p->reg = NULL; #ifdef ERTS_SMP if (rp->p != c_p) erts_smp_proc_unlock(rp->p, ERTS_PROC_LOCK_MAIN); #endif if (IS_TRACED_FL(p, F_TRACE_PROCS)) { trace_proc(c_p, p, am_unregister, name); } } hash_erase(&process_reg, (void*) &r); res = 1; } reg_write_unlock(); if (c_prt != port) { if (port) erts_smp_port_unlock(port); if (c_prt) erts_smp_port_lock(c_prt); } #ifdef ERTS_SMP if (c_p && !current_c_p_locks) erts_smp_proc_lock(c_p, c_p_locks); #endif return res; }
Process *hipe_mode_switch(Process *p, unsigned cmd, Eterm reg[]) { unsigned result; #if NR_ARG_REGS > 5 /* When NR_ARG_REGS > 5, we need to protect the process' input reduction count (which BEAM stores in def_arg_reg[5]) from being clobbered by the arch glue code. */ Eterm reds_in = p->def_arg_reg[5]; #endif #if NR_ARG_REGS > 4 Eterm o_reds = p->def_arg_reg[4]; #endif p->i = NULL; /* Set current_function to undefined. stdlib hibernate tests rely on it. */ p->current = NULL; DPRINTF("cmd == %#x (%s)", cmd, code_str(cmd)); HIPE_CHECK_PCB(p); p->arity = 0; switch (cmd & 0xFF) { case HIPE_MODE_SWITCH_CMD_CALL: { /* BEAM calls a native code function */ unsigned arity = cmd >> 8; /* p->hipe.u.ncallee set in beam_emu */ if (p->cp == hipe_beam_pc_return) { /* Native called BEAM, which now tailcalls native. */ hipe_pop_beam_trap_frame(p); result = hipe_tailcall_to_native(p, arity, reg); break; } DPRINTF("calling %#lx/%u", (long)p->hipe.u.ncallee, arity); result = hipe_call_to_native(p, arity, reg); break; } case HIPE_MODE_SWITCH_CMD_CALL_CLOSURE: { /* BEAM calls a native code closure */ unsigned arity = cmd >> 8; /* #formals + #fvs (closure not counted) */ Eterm fun; ErlFunThing *funp; /* drop the fvs, move the closure, correct arity */ fun = reg[arity]; HIPE_ASSERT(is_fun(fun)); funp = (ErlFunThing*)fun_val(fun); HIPE_ASSERT(funp->num_free <= arity); arity -= funp->num_free; /* arity == #formals */ reg[arity] = fun; ++arity; /* correct for having added the closure */ /* HIPE_ASSERT(p->hipe.u.ncallee == (void(*)(void))funp->native_address); */ /* just like a normal call from now on */ /* p->hipe.u.ncallee set in beam_emu */ if (p->cp == hipe_beam_pc_return) { /* Native called BEAM, which now tailcalls native. */ hipe_pop_beam_trap_frame(p); result = hipe_tailcall_to_native(p, arity, reg); break; } DPRINTF("calling %#lx/%u", (long)p->hipe.u.ncallee, arity); result = hipe_call_to_native(p, arity, reg); break; } case HIPE_MODE_SWITCH_CMD_THROW: { /* BEAM just executed hipe_beam_pc_throw[] */ /* Native called BEAM, which now throws an exception back to native. */ DPRINTF("beam throws freason %#lx fvalue %#lx", p->freason, p->fvalue); hipe_pop_beam_trap_frame(p); do_throw_to_native: p->def_arg_reg[0] = exception_tag[GET_EXC_CLASS(p->freason)]; hipe_find_handler(p); result = hipe_throw_to_native(p); break; } case HIPE_MODE_SWITCH_CMD_RETURN: { /* BEAM just executed hipe_beam_pc_return[] */ /* Native called BEAM, which now returns back to native. */ /* pop trap frame off estack */ hipe_pop_beam_trap_frame(p); p->def_arg_reg[0] = reg[0]; result = hipe_return_to_native(p); break; } do_resume: case HIPE_MODE_SWITCH_CMD_RESUME: { /* BEAM just executed hipe_beam_pc_resume[] */ /* BEAM called native, which suspended. */ if (p->flags & F_TIMO) { /* XXX: The process will immediately execute 'clear_timeout', repeating these two statements. Remove them? */ p->flags &= ~F_TIMO; JOIN_MESSAGE(p); p->def_arg_reg[0] = 0; /* make_small(0)? */ } else p->def_arg_reg[0] = 1; /* make_small(1)? */ result = hipe_return_to_native(p); break; } default: erts_exit(ERTS_ERROR_EXIT, "hipe_mode_switch: cmd %#x\r\n", cmd); } do_return_from_native: DPRINTF("result == %#x (%s)", result, code_str(result)); HIPE_CHECK_PCB(p); switch (result) { case HIPE_MODE_SWITCH_RES_RETURN: { hipe_return_from_native(p); reg[0] = p->def_arg_reg[0]; DPRINTF("returning with r(0) == %#lx", reg[0]); break; } case HIPE_MODE_SWITCH_RES_THROW: { DPRINTF("native throws freason %#lx fvalue %#lx", p->freason, p->fvalue); hipe_throw_from_native(p); break; } case HIPE_MODE_SWITCH_RES_TRAP: { /* * Native code called a BIF, which "failed" with a TRAP to BEAM. * Prior to returning, the BIF stored (see BIF_TRAP<N>): * the callee's address in p->i * the callee's parameters in reg[0..2] * the callee's arity in p->arity (for BEAM gc purposes) * * We need to remove the BIF's parameters from the native * stack: to this end hipe_${ARCH}_glue.S stores the BIF's * arity in p->hipe.narity. * * If the BIF emptied the stack (typically hibernate), p->hipe.nstack * is NULL and there is no need to get rid of stacked parameters. */ unsigned int i, is_recursive = 0; if (p->hipe.nstack != NULL) { ASSERT(p->hipe.nsp != NULL); is_recursive = hipe_trap_from_native_is_recursive(p); } else { /* Some architectures (risc) need this re-reset of nsp as the * BIF wrapper do not detect stack change and causes an obsolete * stack pointer to be saved in p->hipe.nsp before return to us. */ p->hipe.nsp = NULL; } /* Schedule next process if current process was hibernated or is waiting for messages */ if (p->flags & F_HIBERNATE_SCHED) { p->flags &= ~F_HIBERNATE_SCHED; goto do_schedule; } if (!(erts_smp_atomic32_read_acqb(&p->state) & ERTS_PSFLG_ACTIVE)) { for (i = 0; i < p->arity; ++i) p->arg_reg[i] = reg[i]; goto do_schedule; } if (is_recursive) hipe_push_beam_trap_frame(p, reg, p->arity); result = HIPE_MODE_SWITCH_RES_CALL_BEAM; break; } case HIPE_MODE_SWITCH_RES_CALL_EXPORTED: { /* Native code calls or tailcalls BEAM. * * p->hipe.u.callee_exp is the callee's export entry * p->arity is the callee's arity * p->def_arg_reg[] contains the register parameters * p->hipe.nsp[] contains the stacked parameters */ if (hipe_call_from_native_is_recursive(p, reg)) { /* BEAM called native, which now calls BEAM */ hipe_push_beam_trap_frame(p, reg, p->arity); } break; } case HIPE_MODE_SWITCH_RES_CALL_CLOSURE: { /* Native code calls or tailcalls a closure in BEAM * * In native code a call to a closure of arity n looks like * F(A1, ..., AN, Closure), * The BEAM code for a closure expects to get: * F(A1, ..., AN, FV1, ..., FVM, Closure) * (Where Ai is argument i and FVj is free variable j) * * p->hipe.u.closure contains the closure * p->def_arg_reg[] contains the register parameters * p->hipe.nsp[] contains the stacked parameters */ ErlFunThing *closure; unsigned num_free, arity, i, is_recursive; HIPE_ASSERT(is_fun(p->hipe.u.closure)); closure = (ErlFunThing*)fun_val(p->hipe.u.closure); num_free = closure->num_free; arity = closure->fe->arity; /* Store the arity in p->arity for the stack popping. */ /* Note: we already have the closure so only need to move arity values to reg[]. However, there are arity+1 parameters in the native code state that need to be removed. */ p->arity = arity+1; /* +1 for the closure */ /* Get parameters, don't do GC just yet. */ is_recursive = hipe_call_from_native_is_recursive(p, reg); if ((Sint)closure->fe->address[-1] < 0) { /* Unloaded. Let beam_emu.c:call_fun() deal with it. */ result = HIPE_MODE_SWITCH_RES_CALL_CLOSURE; } else { /* The BEAM code is present. Prepare to call it. */ /* Append the free vars after the actual parameters. */ for (i = 0; i < num_free; ++i) reg[arity+i] = closure->env[i]; /* Update arity to reflect the new parameters. */ arity += i; /* Make a call to the closure's BEAM code. */ p->i = closure->fe->address; /* Change result code to the faster plain CALL type. */ result = HIPE_MODE_SWITCH_RES_CALL_BEAM; } /* Append the closure as the last parameter. Don't increment arity. */ reg[arity] = p->hipe.u.closure; if (is_recursive) { /* BEAM called native, which now calls BEAM. Need to put a trap-frame on the beam stack. This may cause GC, which is safe now that the arguments, free vars, and most importantly the closure, all are in reg[]. */ hipe_push_beam_trap_frame(p, reg, arity+1); } break; } case HIPE_MODE_SWITCH_RES_SUSPEND: { p->i = hipe_beam_pc_resume; p->arity = 0; goto do_schedule; } case HIPE_MODE_SWITCH_RES_WAIT: case HIPE_MODE_SWITCH_RES_WAIT_TIMEOUT: { /* same semantics, different debug trace messages */ #ifdef ERTS_SMP /* XXX: BEAM has different entries for the locked and unlocked cases. HiPE doesn't, so we must check dynamically. */ if (p->hipe_smp.have_receive_locks) p->hipe_smp.have_receive_locks = 0; else erts_smp_proc_lock(p, ERTS_PROC_LOCKS_MSG_RECEIVE); #endif p->i = hipe_beam_pc_resume; p->arity = 0; erts_smp_atomic32_read_band_relb(&p->state, ~ERTS_PSFLG_ACTIVE); erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_MSG_RECEIVE); do_schedule: { #if !(NR_ARG_REGS > 5) int reds_in = p->def_arg_reg[5]; #endif ERTS_SMP_UNREQ_PROC_MAIN_LOCK(p); p = schedule(p, reds_in - p->fcalls); ERTS_SMP_REQ_PROC_MAIN_LOCK(p); #ifdef ERTS_SMP p->hipe_smp.have_receive_locks = 0; reg = p->scheduler_data->x_reg_array; #endif } { Eterm *argp; int i; argp = p->arg_reg; for (i = p->arity; --i >= 0;) reg[i] = argp[i]; } { #if !(NR_ARG_REGS > 5) Eterm reds_in; #endif #if !(NR_ARG_REGS > 4) Eterm o_reds; #endif reds_in = p->fcalls; o_reds = 0; if (ERTS_PROC_GET_SAVED_CALLS_BUF(p)) { o_reds = reds_in; reds_in = 0; p->fcalls = 0; } p->def_arg_reg[4] = o_reds; p->def_arg_reg[5] = reds_in; if (p->i == hipe_beam_pc_resume) { p->i = NULL; p->arity = 0; goto do_resume; } } HIPE_CHECK_PCB(p); result = HIPE_MODE_SWITCH_RES_CALL_BEAM; p->def_arg_reg[3] = result; return p; } case HIPE_MODE_SWITCH_RES_APPLY: { Eterm mfa[3], args; unsigned int arity; void *address; hipe_pop_params(p, 3, &mfa[0]); /* Unroll the arglist onto reg[]. */ args = mfa[2]; arity = 0; while (is_list(args)) { if (arity < 255) { reg[arity++] = CAR(list_val(args)); args = CDR(list_val(args)); } else goto do_apply_fail; } if (is_not_nil(args)) goto do_apply_fail; /* find a native code entry point for {M,F,A} for a remote call */ address = hipe_get_remote_na(mfa[0], mfa[1], arity); if (!address) goto do_apply_fail; p->hipe.u.ncallee = (void(*)(void)) address; result = hipe_tailcall_to_native(p, arity, reg); goto do_return_from_native; do_apply_fail: p->freason = BADARG; goto do_throw_to_native; } default: erts_exit(ERTS_ERROR_EXIT, "hipe_mode_switch: result %#x\r\n", result); } HIPE_CHECK_PCB(p); p->def_arg_reg[3] = result; #if NR_ARG_REGS > 4 p->def_arg_reg[4] = o_reds; #endif #if NR_ARG_REGS > 5 p->def_arg_reg[5] = reds_in; #endif return p; }
Eterm load_module_2(BIF_ALIST_2) { Eterm reason; Eterm* hp; int i; int sz; byte* code; int trace_pattern_is_on; Binary *match_spec; Binary *meta_match_spec; struct trace_pattern_flags trace_pattern_flags; Eterm meta_tracer_pid; Eterm res; byte* temp_alloc = NULL; if (is_not_atom(BIF_ARG_1)) { error: erts_free_aligned_binary_bytes(temp_alloc); BIF_ERROR(BIF_P, BADARG); } if ((code = erts_get_aligned_binary_bytes(BIF_ARG_2, &temp_alloc)) == NULL) { goto error; } erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_smp_block_system(0); erts_export_consolidate(); hp = HAlloc(BIF_P, 3); sz = binary_size(BIF_ARG_2); if ((i = erts_load_module(BIF_P, 0, BIF_P->group_leader, &BIF_ARG_1, code, sz)) < 0) { switch (i) { case -1: reason = am_badfile; break; case -2: reason = am_nofile; break; case -3: reason = am_not_purged; break; case -4: reason = am_atom_put("native_code", sizeof("native_code")-1); break; default: reason = am_badfile; break; } res = TUPLE2(hp, am_error, reason); goto done; } erts_get_default_trace_pattern(&trace_pattern_is_on, &match_spec, &meta_match_spec, &trace_pattern_flags, &meta_tracer_pid); if (trace_pattern_is_on) { Eterm mfa[1]; mfa[0] = BIF_ARG_1; (void) erts_set_trace_pattern(mfa, 1, match_spec, meta_match_spec, 1, trace_pattern_flags, meta_tracer_pid); } res = TUPLE2(hp, am_module, BIF_ARG_1); done: erts_free_aligned_binary_bytes(temp_alloc); erts_smp_release_system(); erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); BIF_RET(res); }
Eterm load_module_2(BIF_ALIST_2) { Eterm reason; Eterm* hp; int i; int sz; byte* code; Eterm res; byte* temp_alloc = NULL; if (is_not_atom(BIF_ARG_1)) { error: erts_free_aligned_binary_bytes(temp_alloc); BIF_ERROR(BIF_P, BADARG); } if ((code = erts_get_aligned_binary_bytes(BIF_ARG_2, &temp_alloc)) == NULL) { goto error; } erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_smp_block_system(0); erts_export_consolidate(); hp = HAlloc(BIF_P, 3); sz = binary_size(BIF_ARG_2); if ((i = erts_load_module(BIF_P, 0, BIF_P->group_leader, &BIF_ARG_1, code, sz)) < 0) { switch (i) { case -1: reason = am_badfile; break; case -2: reason = am_nofile; break; case -3: reason = am_not_purged; break; case -4: reason = am_atom_put("native_code", sizeof("native_code")-1); break; case -5: { /* * The module contains an on_load function. The loader * has loaded the module as usual, except that the * export entries does not point into the module, so it * is not possible to call any code in the module. */ ERTS_DECL_AM(on_load); reason = AM_on_load; break; } default: reason = am_badfile; break; } res = TUPLE2(hp, am_error, reason); goto done; } set_default_trace_pattern(BIF_ARG_1); res = TUPLE2(hp, am_module, BIF_ARG_1); done: erts_free_aligned_binary_bytes(temp_alloc); erts_smp_release_system(); erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); BIF_RET(res); }
BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2) { Module* modp = erts_get_module(BIF_ARG_1); Eterm on_load; if (!modp || modp->code == 0) { error: BIF_ERROR(BIF_P, BADARG); } if ((on_load = modp->code[MI_ON_LOAD_FUNCTION_PTR]) == 0) { goto error; } if (BIF_ARG_2 != am_false && BIF_ARG_2 != am_true) { goto error; } erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_smp_block_system(0); if (BIF_ARG_2 == am_true) { int i; /* * The on_load function succeded. Fix up export entries. */ for (i = 0; i < export_list_size(); i++) { Export *ep = export_list(i); if (ep != NULL && ep->code[0] == BIF_ARG_1 && ep->code[4] != 0) { ep->address = (void *) ep->code[4]; ep->code[3] = 0; ep->code[4] = 0; } } modp->code[MI_ON_LOAD_FUNCTION_PTR] = 0; set_default_trace_pattern(BIF_ARG_1); } else if (BIF_ARG_2 == am_false) { Eterm* code; Eterm* end; /* * The on_load function failed. Remove the loaded code. * This is an combination of delete and purge. We purge * the current code; the old code is not touched. */ erts_total_code_size -= modp->code_length; code = modp->code; end = (Eterm *)((char *)code + modp->code_length); erts_cleanup_funs_on_purge(code, end); beam_catches_delmod(modp->catches, code, modp->code_length); erts_free(ERTS_ALC_T_CODE, (void *) code); modp->code = NULL; modp->code_length = 0; modp->catches = BEAM_CATCHES_NIL; remove_from_address_table(code); } erts_smp_release_system(); erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); BIF_RET(am_true); }
void erts_whereis_name(Process *c_p, ErtsProcLocks c_p_locks, Eterm name, Process** proc, ErtsProcLocks need_locks, int flags, Port** port, int lock_port) { RegProc* rp = NULL; HashValue hval; int ix; HashBucket* b; #ifdef ERTS_SMP ErtsProcLocks current_c_p_locks; Port *pending_port = NULL; if (!c_p) c_p_locks = 0; current_c_p_locks = c_p_locks; restart: reg_safe_read_lock(c_p, ¤t_c_p_locks); /* Locked locks: * - port lock on pending_port if pending_port != NULL * - read reg lock * - current_c_p_locks (either c_p_locks or 0) on c_p */ #endif hval = REG_HASH(name); ix = hval % process_reg.size; b = process_reg.bucket[ix]; /* * Note: We have inlined the code from hash.c for speed. */ while (b) { if (((RegProc *) b)->name == name) { rp = (RegProc *) b; break; } b = b->next; } if (proc) { if (!rp) *proc = NULL; else { #ifdef ERTS_SMP if (!rp->p) *proc = NULL; else { if (need_locks) { erts_proc_safelock(c_p, current_c_p_locks, c_p_locks, rp->p, 0, need_locks); current_c_p_locks = c_p_locks; } if ((flags & ERTS_P2P_FLG_ALLOW_OTHER_X) || is_proc_alive(rp->p)) *proc = rp->p; else { if (need_locks) erts_smp_proc_unlock(rp->p, need_locks); *proc = NULL; } } #else if (rp->p && ((flags & ERTS_P2P_FLG_ALLOW_OTHER_X) || is_proc_alive(rp->p))) *proc = rp->p; else *proc = NULL; #endif if (*proc && (flags & ERTS_P2P_FLG_INC_REFC)) erts_proc_inc_refc(*proc); } } if (port) { if (!rp || !rp->pt) *port = NULL; else { #ifdef ERTS_SMP if (lock_port) { if (pending_port == rp->pt) pending_port = NULL; else { if (pending_port) { /* Ahh! Registered port changed while reg lock was unlocked... */ erts_port_release(pending_port); pending_port = NULL; } if (erts_smp_port_trylock(rp->pt) == EBUSY) { Eterm id = rp->pt->common.id; /* id read only... */ /* Unlock all locks, acquire port lock, and restart... */ if (current_c_p_locks) { erts_smp_proc_unlock(c_p, current_c_p_locks); current_c_p_locks = 0; } reg_read_unlock(); pending_port = erts_id2port(id); goto restart; } } ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(rp->pt)); } #endif *port = rp->pt; } } #ifdef ERTS_SMP if (c_p && !current_c_p_locks) erts_smp_proc_lock(c_p, c_p_locks); if (pending_port) erts_port_release(pending_port); #endif reg_read_unlock(); }
static BIF_RETTYPE do_port_command(Process *BIF_P, Eterm arg1, Eterm arg2, Eterm arg3, Uint32 flags) { BIF_RETTYPE res; Port *p; /* Trace sched out before lock check wait */ if (IS_TRACED_FL(BIF_P, F_TRACE_SCHED_PROCS)) { trace_virtual_sched(BIF_P, am_out); } if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) { profile_runnable_proc(BIF_P, am_inactive); } p = id_or_name2port(BIF_P, arg1); if (!p) { if (IS_TRACED_FL(BIF_P, F_TRACE_SCHED_PROCS)) { trace_virtual_sched(BIF_P, am_in); } if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) { profile_runnable_proc(BIF_P, am_active); } BIF_ERROR(BIF_P, BADARG); } /* Trace port in, id_or_name2port causes wait */ if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) { trace_sched_ports_where(p, am_in, am_command); } if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(p)) { profile_runnable_port(p, am_active); } ERTS_BIF_PREP_RET(res, am_true); if ((flags & ERTS_PORT_COMMAND_FLAG_FORCE) && !(p->drv_ptr->flags & ERL_DRV_FLAG_SOFT_BUSY)) { ERTS_BIF_PREP_ERROR(res, BIF_P, EXC_NOTSUP); } else if (!(flags & ERTS_PORT_COMMAND_FLAG_FORCE) && p->status & ERTS_PORT_SFLG_PORT_BUSY) { if (flags & ERTS_PORT_COMMAND_FLAG_NOSUSPEND) { ERTS_BIF_PREP_RET(res, am_false); } else { erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, p); if (erts_system_monitor_flags.busy_port) { monitor_generic(BIF_P, am_busy_port, p->id); } ERTS_BIF_PREP_YIELD3(res, bif_export[BIF_port_command_3], BIF_P, arg1, arg2, arg3); } } else { int wres; erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); ERTS_SMP_CHK_NO_PROC_LOCKS; wres = erts_write_to_port(BIF_P->id, p, arg2); erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); if (wres != 0) { ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG); } } if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) { trace_sched_ports_where(p, am_out, am_command); } if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(p)) { profile_runnable_port(p, am_inactive); } erts_port_release(p); /* Trace sched in after port release */ if (IS_TRACED_FL(BIF_P, F_TRACE_SCHED_PROCS)) { trace_virtual_sched(BIF_P, am_in); } if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) { profile_runnable_proc(BIF_P, am_active); } if (ERTS_PROC_IS_EXITING(BIF_P)) { KILL_CATCHES(BIF_P); /* Must exit */ ERTS_BIF_PREP_ERROR(res, BIF_P, EXC_ERROR); } return res; }
/* * Unregister a name * Return 0 if not registered * Otherwise returns 1 * */ int erts_unregister_name(Process *c_p, ErtsProcLocks c_p_locks, Port *c_prt, Eterm name) { int res = 0; RegProc r, *rp; Port *port = c_prt; ErtsProcLocks current_c_p_locks = 0; #ifdef ERTS_SMP /* * SMP note: If 'c_prt != NULL' and 'c_prt->reg->name == name', * we are *not* allowed to temporarily release the lock * on c_prt. */ if (!c_p) { c_p_locks = 0; } current_c_p_locks = c_p_locks; restart: reg_safe_write_lock(c_p, ¤t_c_p_locks); #endif r.name = name; if (is_non_value(name)) { /* Unregister current process name */ ASSERT(c_p); #ifdef ERTS_SMP if (current_c_p_locks != c_p_locks) { erts_smp_proc_lock(c_p, c_p_locks); current_c_p_locks = c_p_locks; } #endif if (c_p->common.u.alive.reg) { r.name = c_p->common.u.alive.reg->name; } else { /* Name got unregistered while main lock was released */ res = 0; goto done; } } if ((rp = (RegProc*) hash_get(&process_reg, (void*) &r)) != NULL) { if (rp->pt) { if (port != rp->pt) { #ifdef ERTS_SMP if (port) { ASSERT(port != c_prt); erts_port_release(port); port = NULL; } if (erts_smp_port_trylock(rp->pt) == EBUSY) { Eterm id = rp->pt->common.id; /* id read only... */ /* Unlock all locks, acquire port lock, and restart... */ if (current_c_p_locks) { erts_smp_proc_unlock(c_p, current_c_p_locks); current_c_p_locks = 0; } reg_write_unlock(); port = erts_id2port(id); goto restart; } #endif port = rp->pt; } ASSERT(rp->pt == port); ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(port)); rp->pt->common.u.alive.reg = NULL; if (IS_TRACED_FL(port, F_TRACE_PORTS)) { if (current_c_p_locks) { erts_smp_proc_unlock(c_p, current_c_p_locks); current_c_p_locks = 0; } trace_port(port, am_unregister, r.name); } } else if (rp->p) { #ifdef ERTS_SMP erts_proc_safelock(c_p, current_c_p_locks, c_p_locks, rp->p, (c_p == rp->p) ? current_c_p_locks : 0, ERTS_PROC_LOCK_MAIN); current_c_p_locks = c_p_locks; #endif rp->p->common.u.alive.reg = NULL; if (IS_TRACED_FL(rp->p, F_TRACE_PROCS)) { trace_proc(rp->p, (c_p == rp->p) ? c_p_locks : ERTS_PROC_LOCK_MAIN, rp->p, am_unregister, r.name); } #ifdef ERTS_SMP if (rp->p != c_p) { erts_smp_proc_unlock(rp->p, ERTS_PROC_LOCK_MAIN); } #endif } hash_erase(&process_reg, (void*) &r); res = 1; } done: reg_write_unlock(); if (c_prt != port) { if (port) { erts_port_release(port); } if (c_prt) { erts_smp_port_lock(c_prt); } } #ifdef ERTS_SMP if (c_p && !current_c_p_locks) { erts_smp_proc_lock(c_p, c_p_locks); } #endif return res; }
static BIF_RETTYPE port_call(Process* c_p, Eterm arg1, Eterm arg2, Eterm arg3) { Uint op; Port *p; Uint size; byte *bytes; byte *endp; ErlDrvSizeT real_size; erts_driver_t *drv; byte port_input[256]; /* Default input buffer to encode in */ byte port_result[256]; /* Buffer for result from port. */ byte* port_resp; /* Pointer to result buffer. */ char *prc; ErlDrvSSizeT ret; Eterm res; Sint result_size; Eterm *hp; Eterm *hp_end; /* To satisfy hybrid heap architecture */ unsigned ret_flags = 0U; int fpe_was_unmasked; bytes = &port_input[0]; port_resp = port_result; /* trace of port scheduling with virtual process descheduling * lock wait */ if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS)) { trace_virtual_sched(c_p, am_out); } if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) { profile_runnable_proc(c_p, am_inactive); } p = id_or_name2port(c_p, arg1); if (!p) { error: if (port_resp != port_result && !(ret_flags & DRIVER_CALL_KEEP_BUFFER)) { driver_free(port_resp); } if (bytes != &port_input[0]) erts_free(ERTS_ALC_T_PORT_CALL_BUF, bytes); /* Need to virtual schedule in the process if there * was an error. */ if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS)) { trace_virtual_sched(c_p, am_in); } if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) { profile_runnable_proc(c_p, am_active); } if (p) erts_port_release(p); #ifdef ERTS_SMP ERTS_SMP_BIF_CHK_PENDING_EXIT(c_p, ERTS_PROC_LOCK_MAIN); #else ERTS_BIF_CHK_EXITED(c_p); #endif BIF_ERROR(c_p, BADARG); } if ((drv = p->drv_ptr) == NULL) { goto error; } if (drv->call == NULL) { goto error; } if (!term_to_Uint(arg2, &op)) { goto error; } p->caller = c_p->id; /* Lock taken, virtual schedule of port */ if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) { trace_sched_ports_where(p, am_in, am_call); } if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(p)) { profile_runnable_port(p, am_active); } size = erts_encode_ext_size(arg3); if (size > sizeof(port_input)) bytes = erts_alloc(ERTS_ALC_T_PORT_CALL_BUF, size); endp = bytes; erts_encode_ext(arg3, &endp); real_size = endp - bytes; if (real_size > size) { erl_exit(1, "%s, line %d: buffer overflow: %d word(s)\n", __FILE__, __LINE__, endp - (bytes + size)); } erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); #ifdef USE_VM_PROBES if (DTRACE_ENABLED(driver_call)) { DTRACE_CHARBUF(process_str, DTRACE_TERM_BUF_SIZE); DTRACE_CHARBUF(port_str, DTRACE_TERM_BUF_SIZE); dtrace_pid_str(p->connected, process_str); dtrace_port_str(p, port_str); DTRACE5(driver_call, process_str, port_str, p->name, op, real_size); } #endif prc = (char *) port_resp; fpe_was_unmasked = erts_block_fpe(); ret = drv->call((ErlDrvData)p->drv_data, (unsigned) op, (char *) bytes, (int) real_size, &prc, (int) sizeof(port_result), &ret_flags); erts_unblock_fpe(fpe_was_unmasked); if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) { trace_sched_ports_where(p, am_out, am_call); } if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(p)) { profile_runnable_port(p, am_inactive); } port_resp = (byte *) prc; p->caller = NIL; erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); #ifdef HARDDEBUG { ErlDrvSizeT z; printf("real_size = %ld,%d, ret = %ld,%d\r\n", (unsigned long) real_size, (int) real_size, (unsigned long)ret, (int) ret); printf("["); for(z = 0; z < real_size; ++z) { printf("%d, ",(int) bytes[z]); } printf("]\r\n"); printf("["); for(z = 0; z < ret; ++z) { printf("%d, ",(int) port_resp[z]); } printf("]\r\n"); } #endif if (ret <= 0 || port_resp[0] != VERSION_MAGIC) { /* Error or a binary without magic/ with wrong magic */ goto error; } result_size = erts_decode_ext_size(port_resp, ret); if (result_size < 0) { goto error; } hp = HAlloc(c_p, result_size); hp_end = hp + result_size; endp = port_resp; res = erts_decode_ext(&hp, &MSO(c_p), &endp); if (res == THE_NON_VALUE) { goto error; } HRelease(c_p, hp_end, hp); if (port_resp != port_result && !(ret_flags & DRIVER_CALL_KEEP_BUFFER)) { driver_free(port_resp); } if (bytes != &port_input[0]) erts_free(ERTS_ALC_T_PORT_CALL_BUF, bytes); if (p) erts_port_release(p); #ifdef ERTS_SMP ERTS_SMP_BIF_CHK_PENDING_EXIT(c_p, ERTS_PROC_LOCK_MAIN); #else ERTS_BIF_CHK_EXITED(c_p); #endif if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS)) { trace_virtual_sched(c_p, am_in); } if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) { profile_runnable_proc(c_p, am_active); } return res; }
BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2) { ErtsCodeIndex code_ix; Module* modp; if (!erts_try_seize_code_write_permission(BIF_P)) { ERTS_BIF_YIELD2(bif_export[BIF_finish_after_on_load_2], BIF_P, BIF_ARG_1, BIF_ARG_2); } /* ToDo: Use code_ix staging instead of thread blocking */ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_smp_thr_progress_block(); code_ix = erts_active_code_ix(); modp = erts_get_module(BIF_ARG_1, code_ix); if (!modp || !modp->curr.code_hdr) { error: erts_smp_thr_progress_unblock(); erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_release_code_write_permission(); BIF_ERROR(BIF_P, BADARG); } if (modp->curr.code_hdr->on_load_function_ptr == NULL) { goto error; } if (BIF_ARG_2 != am_false && BIF_ARG_2 != am_true) { goto error; } if (BIF_ARG_2 == am_true) { int i; /* * The on_load function succeded. Fix up export entries. */ for (i = 0; i < export_list_size(code_ix); i++) { Export *ep = export_list(i,code_ix); if (ep != NULL && ep->code[0] == BIF_ARG_1 && ep->code[4] != 0) { ep->addressv[code_ix] = (void *) ep->code[4]; ep->code[4] = 0; } } modp->curr.code_hdr->on_load_function_ptr = NULL; set_default_trace_pattern(BIF_ARG_1); } else if (BIF_ARG_2 == am_false) { BeamInstr* code; BeamInstr* end; /* * The on_load function failed. Remove the loaded code. * This is an combination of delete and purge. We purge * the current code; the old code is not touched. */ erts_total_code_size -= modp->curr.code_length; code = (BeamInstr*) modp->curr.code_hdr; end = (BeamInstr *) ((char *)code + modp->curr.code_length); erts_cleanup_funs_on_purge(code, end); beam_catches_delmod(modp->curr.catches, code, modp->curr.code_length, erts_active_code_ix()); if (modp->curr.code_hdr->literals_start) { erts_free(ERTS_ALC_T_LITERAL, modp->curr.code_hdr->literals_start); } erts_free(ERTS_ALC_T_CODE, modp->curr.code_hdr); modp->curr.code_hdr = NULL; modp->curr.code_length = 0; modp->curr.catches = BEAM_CATCHES_NIL; erts_remove_from_ranges(code); } erts_smp_thr_progress_unblock(); erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_release_code_write_permission(); BIF_RET(am_true); }
static int open_port(Process* p, Eterm name, Eterm settings, int *err_nump) { #define OPEN_PORT_ERROR(VAL) do { port_num = (VAL); goto do_return; } while (0) int i, port_num; Eterm option; Uint arity; Eterm* tp; Uint* nargs; erts_driver_t* driver; char* name_buf = NULL; SysDriverOpts opts; int binary_io; int soft_eof; Sint linebuf; Eterm edir = NIL; byte dir[MAXPATHLEN]; /* These are the defaults */ opts.packet_bytes = 0; opts.use_stdio = 1; opts.redir_stderr = 0; opts.read_write = 0; opts.hide_window = 0; opts.wd = NULL; opts.envir = NULL; opts.exit_status = 0; opts.overlapped_io = 0; opts.spawn_type = ERTS_SPAWN_ANY; opts.argv = NULL; binary_io = 0; soft_eof = 0; linebuf = 0; *err_nump = 0; if (is_not_list(settings) && is_not_nil(settings)) { goto badarg; } /* * Parse the settings. */ if (is_not_nil(settings)) { nargs = list_val(settings); while (1) { if (is_tuple_arity(*nargs, 2)) { tp = tuple_val(*nargs); arity = *tp++; option = *tp++; if (option == am_packet) { if (is_not_small(*tp)) { goto badarg; } opts.packet_bytes = signed_val(*tp); switch (opts.packet_bytes) { case 1: case 2: case 4: break; default: goto badarg; } } else if (option == am_line) { if (is_not_small(*tp)) { goto badarg; } linebuf = signed_val(*tp); if (linebuf <= 0) { goto badarg; } } else if (option == am_env) { byte* bytes; if ((bytes = convert_environment(p, *tp)) == NULL) { goto badarg; } opts.envir = (char *) bytes; } else if (option == am_args) { char **av; char **oav = opts.argv; if ((av = convert_args(*tp)) == NULL) { goto badarg; } opts.argv = av; if (oav) { opts.argv[0] = oav[0]; oav[0] = erts_default_arg0; free_args(oav); } } else if (option == am_arg0) { char *a0; if ((a0 = erts_convert_filename_to_native(*tp, ERTS_ALC_T_TMP, 1)) == NULL) { goto badarg; } if (opts.argv == NULL) { opts.argv = erts_alloc(ERTS_ALC_T_TMP, 2 * sizeof(char **)); opts.argv[0] = a0; opts.argv[1] = NULL; } else { if (opts.argv[0] != erts_default_arg0) { erts_free(ERTS_ALC_T_TMP, opts.argv[0]); } opts.argv[0] = a0; } } else if (option == am_cd) { edir = *tp; } else { goto badarg; } } else if (*nargs == am_stream) { opts.packet_bytes = 0; } else if (*nargs == am_use_stdio) { opts.use_stdio = 1; } else if (*nargs == am_stderr_to_stdout) { opts.redir_stderr = 1; } else if (*nargs == am_line) { linebuf = 512; } else if (*nargs == am_nouse_stdio) { opts.use_stdio = 0; } else if (*nargs == am_binary) { binary_io = 1; } else if (*nargs == am_in) { opts.read_write |= DO_READ; } else if (*nargs == am_out) { opts.read_write |= DO_WRITE; } else if (*nargs == am_eof) { soft_eof = 1; } else if (*nargs == am_hide) { opts.hide_window = 1; } else if (*nargs == am_exit_status) { opts.exit_status = 1; } else if (*nargs == am_overlapped_io) { opts.overlapped_io = 1; } else { goto badarg; } if (is_nil(*++nargs)) break; if (is_not_list(*nargs)) { goto badarg; } nargs = list_val(*nargs); } } if (opts.read_write == 0) /* implement default */ opts.read_write = DO_READ|DO_WRITE; /* Mutually exclusive arguments. */ if((linebuf && opts.packet_bytes) || (opts.redir_stderr && !opts.use_stdio)) { goto badarg; } /* * Parse the first argument and start the appropriate driver. */ if (is_atom(name) || (i = is_string(name))) { /* a vanilla port */ if (is_atom(name)) { name_buf = (char *) erts_alloc(ERTS_ALC_T_TMP, atom_tab(atom_val(name))->len+1); sys_memcpy((void *) name_buf, (void *) atom_tab(atom_val(name))->name, atom_tab(atom_val(name))->len); name_buf[atom_tab(atom_val(name))->len] = '\0'; } else { name_buf = (char *) erts_alloc(ERTS_ALC_T_TMP, i + 1); if (intlist_to_buf(name, name_buf, i) != i) erl_exit(1, "%s:%d: Internal error\n", __FILE__, __LINE__); name_buf[i] = '\0'; } driver = &vanilla_driver; } else { if (is_not_tuple(name)) { goto badarg; /* Not a process or fd port */ } tp = tuple_val(name); arity = *tp++; if (arity == make_arityval(0)) { goto badarg; } if (*tp == am_spawn || *tp == am_spawn_driver) { /* A process port */ if (arity != make_arityval(2)) { goto badarg; } name = tp[1]; if (is_atom(name)) { name_buf = (char *) erts_alloc(ERTS_ALC_T_TMP, atom_tab(atom_val(name))->len+1); sys_memcpy((void *) name_buf, (void *) atom_tab(atom_val(name))->name, atom_tab(atom_val(name))->len); name_buf[atom_tab(atom_val(name))->len] = '\0'; } else if ((i = is_string(name))) { name_buf = (char *) erts_alloc(ERTS_ALC_T_TMP, i + 1); if (intlist_to_buf(name, name_buf, i) != i) erl_exit(1, "%s:%d: Internal error\n", __FILE__, __LINE__); name_buf[i] = '\0'; } else { goto badarg; } if (*tp == am_spawn_driver) { opts.spawn_type = ERTS_SPAWN_DRIVER; } driver = &spawn_driver; } else if (*tp == am_spawn_executable) { /* A program */ /* * {spawn_executable,Progname} */ if (arity != make_arityval(2)) { goto badarg; } name = tp[1]; if ((name_buf = erts_convert_filename_to_native(name,ERTS_ALC_T_TMP,0)) == NULL) { goto badarg; } opts.spawn_type = ERTS_SPAWN_EXECUTABLE; driver = &spawn_driver; } else if (*tp == am_fd) { /* An fd port */ int n; struct Sint_buf sbuf; char* p; if (arity != make_arityval(3)) { goto badarg; } if (is_not_small(tp[1]) || is_not_small(tp[2])) { goto badarg; } opts.ifd = unsigned_val(tp[1]); opts.ofd = unsigned_val(tp[2]); /* Syntesize name from input and output descriptor. */ name_buf = erts_alloc(ERTS_ALC_T_TMP, 2*sizeof(struct Sint_buf) + 2); p = Sint_to_buf(opts.ifd, &sbuf); n = sys_strlen(p); sys_strncpy(name_buf, p, n); name_buf[n] = '/'; p = Sint_to_buf(opts.ofd, &sbuf); sys_strcpy(name_buf+n+1, p); driver = &fd_driver; } else { goto badarg; } } if ((driver != &spawn_driver && opts.argv != NULL) || (driver == &spawn_driver && opts.spawn_type != ERTS_SPAWN_EXECUTABLE && opts.argv != NULL)) { /* Argument vector only if explicit spawn_executable */ goto badarg; } if (edir != NIL) { /* A working directory is expressed differently if spawn_executable, i.e. Unicode is handles for spawn_executable... */ if (opts.spawn_type != ERTS_SPAWN_EXECUTABLE) { Eterm iolist; DeclareTmpHeap(heap,4,p); int r; UseTmpHeap(4,p); heap[0] = edir; heap[1] = make_list(heap+2); heap[2] = make_small(0); heap[3] = NIL; iolist = make_list(heap); r = io_list_to_buf(iolist, (char*) dir, MAXPATHLEN); UnUseTmpHeap(4,p); if (r < 0) { goto badarg; } opts.wd = (char *) dir; } else { if ((opts.wd = erts_convert_filename_to_native(edir,ERTS_ALC_T_TMP,0)) == NULL) { goto badarg; } } } if (driver != &spawn_driver && opts.exit_status) { goto badarg; } if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) { trace_virtual_sched(p, am_out); } erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN); port_num = erts_open_driver(driver, p->id, name_buf, &opts, err_nump); #ifdef USE_VM_PROBES if (port_num >= 0 && DTRACE_ENABLED(port_open)) { DTRACE_CHARBUF(process_str, DTRACE_TERM_BUF_SIZE); DTRACE_CHARBUF(port_str, DTRACE_TERM_BUF_SIZE); dtrace_proc_str(p, process_str); erts_snprintf(port_str, sizeof(port_str), "%T", erts_port[port_num].id); DTRACE3(port_open, process_str, name_buf, port_str); } #endif erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); if (port_num < 0) { DEBUGF(("open_driver returned %d(%d)\n", port_num, *err_nump)); if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) { trace_virtual_sched(p, am_in); } OPEN_PORT_ERROR(port_num); } if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) { trace_virtual_sched(p, am_in); } if (binary_io) { erts_port_status_bor_set(&erts_port[port_num], ERTS_PORT_SFLG_BINARY_IO); } if (soft_eof) { erts_port_status_bor_set(&erts_port[port_num], ERTS_PORT_SFLG_SOFT_EOF); } if (linebuf && erts_port[port_num].linebuf == NULL){ erts_port[port_num].linebuf = allocate_linebuf(linebuf); erts_port_status_bor_set(&erts_port[port_num], ERTS_PORT_SFLG_LINEBUF_IO); } do_return: if (name_buf) erts_free(ERTS_ALC_T_TMP, (void *) name_buf); if (opts.argv) { free_args(opts.argv); } if (opts.wd && opts.wd != ((char *)dir)) { erts_free(ERTS_ALC_T_TMP, (void *) opts.wd); } return port_num; badarg: *err_nump = BADARG; OPEN_PORT_ERROR(-3); goto do_return; #undef OPEN_PORT_ERROR }
void erts_queue_dist_message(Process *rcvr, ErtsProcLocks *rcvr_locks, ErtsDistExternal *dist_ext, Eterm token) { ErtsMessage* mp; #ifdef USE_VM_PROBES Sint tok_label = 0; Sint tok_lastcnt = 0; Sint tok_serial = 0; #endif #ifdef ERTS_SMP erts_aint_t state; #endif ERTS_SMP_LC_ASSERT(*rcvr_locks == erts_proc_lc_my_proc_locks(rcvr)); mp = erts_alloc_message(0, NULL); mp->data.dist_ext = dist_ext; ERL_MESSAGE_TERM(mp) = THE_NON_VALUE; #ifdef USE_VM_PROBES ERL_MESSAGE_DT_UTAG(mp) = NIL; if (token == am_have_dt_utag) ERL_MESSAGE_TOKEN(mp) = NIL; else #endif ERL_MESSAGE_TOKEN(mp) = token; #ifdef ERTS_SMP if (!(*rcvr_locks & ERTS_PROC_LOCK_MSGQ)) { if (erts_smp_proc_trylock(rcvr, ERTS_PROC_LOCK_MSGQ) == EBUSY) { ErtsProcLocks need_locks = ERTS_PROC_LOCK_MSGQ; if (*rcvr_locks & ERTS_PROC_LOCK_STATUS) { erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_STATUS); need_locks |= ERTS_PROC_LOCK_STATUS; } erts_smp_proc_lock(rcvr, need_locks); } } state = erts_smp_atomic32_read_acqb(&rcvr->state); if (state & (ERTS_PSFLG_PENDING_EXIT|ERTS_PSFLG_EXITING)) { if (!(*rcvr_locks & ERTS_PROC_LOCK_MSGQ)) erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ); /* Drop message if receiver is exiting or has a pending exit ... */ erts_cleanup_messages(mp); } else #endif if (IS_TRACED_FL(rcvr, F_TRACE_RECEIVE)) { /* Ahh... need to decode it in order to trace it... */ if (!(*rcvr_locks & ERTS_PROC_LOCK_MSGQ)) erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ); if (!erts_decode_dist_message(rcvr, *rcvr_locks, mp, 0)) erts_free_message(mp); else { Eterm msg = ERL_MESSAGE_TERM(mp); token = ERL_MESSAGE_TOKEN(mp); #ifdef USE_VM_PROBES if (DTRACE_ENABLED(message_queued)) { DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE); dtrace_proc_str(rcvr, receiver_name); if (have_seqtrace(token)) { tok_label = signed_val(SEQ_TRACE_T_LABEL(token)); tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(token)); tok_serial = signed_val(SEQ_TRACE_T_SERIAL(token)); } DTRACE6(message_queued, receiver_name, size_object(msg), rcvr->msg.len, tok_label, tok_lastcnt, tok_serial); } #endif erts_queue_message(rcvr, rcvr_locks, mp, msg); } } else { /* Enqueue message on external format */ #ifdef USE_VM_PROBES if (DTRACE_ENABLED(message_queued)) { DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE); dtrace_proc_str(rcvr, receiver_name); if (have_seqtrace(token)) { tok_label = signed_val(SEQ_TRACE_T_LABEL(token)); tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(token)); tok_serial = signed_val(SEQ_TRACE_T_SERIAL(token)); } /* * TODO: We don't know the real size of the external message here. * -1 will appear to a D script as 4294967295. */ DTRACE6(message_queued, receiver_name, -1, rcvr->msg.len + 1, tok_label, tok_lastcnt, tok_serial); } #endif LINK_MESSAGE(rcvr, mp, &mp->next, 1); if (!(*rcvr_locks & ERTS_PROC_LOCK_MSGQ)) erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ); erts_proc_notify_new_message(rcvr, #ifdef ERTS_SMP *rcvr_locks #else 0 #endif ); } }
/* Add a message last in message queue */ static Sint queue_message(Process *c_p, Process* receiver, ErtsProcLocks *receiver_locks, erts_aint32_t *receiver_state, ErlHeapFragment* bp, Eterm message, Eterm seq_trace_token #ifdef USE_VM_PROBES , Eterm dt_utag #endif ) { Sint res; ErlMessage* mp; int locked_msgq = 0; erts_aint_t state; #ifndef ERTS_SMP ASSERT(bp != NULL || receiver->mbuf == NULL); #endif ERTS_SMP_LC_ASSERT(*receiver_locks == erts_proc_lc_my_proc_locks(receiver)); mp = message_alloc(); if (receiver_state) state = *receiver_state; else state = erts_smp_atomic32_read_acqb(&receiver->state); #ifdef ERTS_SMP if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) goto exiting; if (!(*receiver_locks & ERTS_PROC_LOCK_MSGQ)) { if (erts_smp_proc_trylock(receiver, ERTS_PROC_LOCK_MSGQ) == EBUSY) { ErtsProcLocks need_locks = ERTS_PROC_LOCK_MSGQ; if (*receiver_locks & ERTS_PROC_LOCK_STATUS) { erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_STATUS); need_locks |= ERTS_PROC_LOCK_STATUS; } erts_smp_proc_lock(receiver, need_locks); } locked_msgq = 1; state = erts_smp_atomic32_read_nob(&receiver->state); if (receiver_state) *receiver_state = state; } #endif if (state & (ERTS_PSFLG_PENDING_EXIT|ERTS_PSFLG_EXITING)) { #ifdef ERTS_SMP exiting: #endif /* Drop message if receiver is exiting or has a pending exit... */ if (locked_msgq) erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ); if (bp) free_message_buffer(bp); message_free(mp); return 0; } ERL_MESSAGE_TERM(mp) = message; ERL_MESSAGE_TOKEN(mp) = seq_trace_token; #ifdef USE_VM_PROBES ERL_MESSAGE_DT_UTAG(mp) = dt_utag; #endif mp->next = NULL; mp->data.heap_frag = bp; #ifndef ERTS_SMP res = receiver->msg.len; #else res = receiver->msg_inq.len; if (*receiver_locks & ERTS_PROC_LOCK_MAIN) { /* * We move 'in queue' to 'private queue' and place * message at the end of 'private queue' in order * to ensure that the 'in queue' doesn't contain * references into the heap. By ensuring this, * we don't need to include the 'in queue' in * the root set when garbage collecting. */ res += receiver->msg.len; ERTS_SMP_MSGQ_MV_INQ2PRIVQ(receiver); LINK_MESSAGE_PRIVQ(receiver, mp); } else #endif { LINK_MESSAGE(receiver, mp); } #ifdef USE_VM_PROBES if (DTRACE_ENABLED(message_queued)) { DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE); Sint tok_label = 0; Sint tok_lastcnt = 0; Sint tok_serial = 0; dtrace_proc_str(receiver, receiver_name); if (seq_trace_token != NIL && is_tuple(seq_trace_token)) { tok_label = signed_val(SEQ_TRACE_T_LABEL(seq_trace_token)); tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(seq_trace_token)); tok_serial = signed_val(SEQ_TRACE_T_SERIAL(seq_trace_token)); } DTRACE6(message_queued, receiver_name, size_object(message), receiver->msg.len, tok_label, tok_lastcnt, tok_serial); } #endif if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)) trace_receive(receiver, message); if (locked_msgq) erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ); erts_proc_notify_new_message(receiver, #ifdef ERTS_SMP *receiver_locks #else 0 #endif ); #ifndef ERTS_SMP ERTS_HOLE_CHECK(receiver); #endif return res; }
static void proc_safelock(int is_managed, Process *a_proc, ErtsProcLocks a_have_locks, ErtsProcLocks a_need_locks, Process *b_proc, ErtsProcLocks b_have_locks, ErtsProcLocks b_need_locks) { Process *p1, *p2; #ifdef ERTS_ENABLE_LOCK_CHECK Eterm pid1, pid2; #endif ErtsProcLocks need_locks1, have_locks1, need_locks2, have_locks2; ErtsProcLocks unlock_mask; int lock_no, refc1 = 0, refc2 = 0; ERTS_LC_ASSERT(b_proc); /* Determine inter process lock order... * Locks with the same lock order should be locked on p1 before p2. */ if (a_proc) { if (a_proc->common.id < b_proc->common.id) { p1 = a_proc; #ifdef ERTS_ENABLE_LOCK_CHECK pid1 = a_proc->common.id; #endif need_locks1 = a_need_locks; have_locks1 = a_have_locks; p2 = b_proc; #ifdef ERTS_ENABLE_LOCK_CHECK pid2 = b_proc->common.id; #endif need_locks2 = b_need_locks; have_locks2 = b_have_locks; } else if (a_proc->common.id > b_proc->common.id) { p1 = b_proc; #ifdef ERTS_ENABLE_LOCK_CHECK pid1 = b_proc->common.id; #endif need_locks1 = b_need_locks; have_locks1 = b_have_locks; p2 = a_proc; #ifdef ERTS_ENABLE_LOCK_CHECK pid2 = a_proc->common.id; #endif need_locks2 = a_need_locks; have_locks2 = a_have_locks; } else { ERTS_LC_ASSERT(a_proc == b_proc); ERTS_LC_ASSERT(a_proc->common.id == b_proc->common.id); p1 = a_proc; #ifdef ERTS_ENABLE_LOCK_CHECK pid1 = a_proc->common.id; #endif need_locks1 = a_need_locks | b_need_locks; have_locks1 = a_have_locks | b_have_locks; p2 = NULL; #ifdef ERTS_ENABLE_LOCK_CHECK pid2 = 0; #endif need_locks2 = 0; have_locks2 = 0; } } else { p1 = b_proc; #ifdef ERTS_ENABLE_LOCK_CHECK pid1 = b_proc->common.id; #endif need_locks1 = b_need_locks; have_locks1 = b_have_locks; p2 = NULL; #ifdef ERTS_ENABLE_LOCK_CHECK pid2 = 0; #endif need_locks2 = 0; have_locks2 = 0; #ifdef ERTS_ENABLE_LOCK_CHECK a_need_locks = 0; a_have_locks = 0; #endif } #ifdef ERTS_ENABLE_LOCK_CHECK if (p1) erts_proc_lc_chk_proc_locks(p1, have_locks1); if (p2) erts_proc_lc_chk_proc_locks(p2, have_locks2); if ((need_locks1 & have_locks1) != have_locks1) erts_lc_fail("Thread tries to release process lock(s) " "on %T via erts_proc_safelock().", pid1); if ((need_locks2 & have_locks2) != have_locks2) erts_lc_fail("Thread tries to release process lock(s) " "on %T via erts_proc_safelock().", pid2); #endif need_locks1 &= ~have_locks1; need_locks2 &= ~have_locks2; /* Figure out the range of locks that needs to be unlocked... */ unlock_mask = ERTS_PROC_LOCKS_ALL; for (lock_no = 0; lock_no <= ERTS_PROC_LOCK_MAX_BIT; lock_no++) { ErtsProcLocks lock = (1 << lock_no); if (lock & need_locks1) break; unlock_mask &= ~lock; if (lock & need_locks2) break; } /* ... and unlock locks in that range... */ if (have_locks1 || have_locks2) { ErtsProcLocks unlock_locks; unlock_locks = unlock_mask & have_locks1; if (unlock_locks) { have_locks1 &= ~unlock_locks; need_locks1 |= unlock_locks; if (!is_managed && !have_locks1) { refc1 = 1; erts_smp_proc_inc_refc(p1); } erts_smp_proc_unlock(p1, unlock_locks); } unlock_locks = unlock_mask & have_locks2; if (unlock_locks) { have_locks2 &= ~unlock_locks; need_locks2 |= unlock_locks; if (!is_managed && !have_locks2) { refc2 = 1; erts_smp_proc_inc_refc(p2); } erts_smp_proc_unlock(p2, unlock_locks); } } /* * lock_no equals the number of the first lock to lock on * either p1 *or* p2. */ #ifdef ERTS_ENABLE_LOCK_CHECK if (p1) erts_proc_lc_chk_proc_locks(p1, have_locks1); if (p2) erts_proc_lc_chk_proc_locks(p2, have_locks2); #endif /* Lock locks in lock order... */ while (lock_no <= ERTS_PROC_LOCK_MAX_BIT) { ErtsProcLocks locks; ErtsProcLocks lock = (1 << lock_no); ErtsProcLocks lock_mask = 0; if (need_locks1 & lock) { do { lock = (1 << lock_no++); lock_mask |= lock; } while (lock_no <= ERTS_PROC_LOCK_MAX_BIT && !(need_locks2 & lock)); if (need_locks2 & lock) lock_no--; locks = need_locks1 & lock_mask; erts_smp_proc_lock(p1, locks); have_locks1 |= locks; need_locks1 &= ~locks; } else if (need_locks2 & lock) { while (lock_no <= ERTS_PROC_LOCK_MAX_BIT && !(need_locks1 & lock)) { lock_mask |= lock; lock = (1 << ++lock_no); } locks = need_locks2 & lock_mask; erts_smp_proc_lock(p2, locks); have_locks2 |= locks; need_locks2 &= ~locks; } else lock_no++; } #ifdef ERTS_ENABLE_LOCK_CHECK if (p1) erts_proc_lc_chk_proc_locks(p1, have_locks1); if (p2) erts_proc_lc_chk_proc_locks(p2, have_locks2); if (p1 && p2) { if (p1 == a_proc) { ERTS_LC_ASSERT(a_need_locks == have_locks1); ERTS_LC_ASSERT(b_need_locks == have_locks2); } else { ERTS_LC_ASSERT(a_need_locks == have_locks2); ERTS_LC_ASSERT(b_need_locks == have_locks1); } } else { ERTS_LC_ASSERT(p1); if (a_proc) { ERTS_LC_ASSERT(have_locks1 == (a_need_locks | b_need_locks)); } else { ERTS_LC_ASSERT(have_locks1 == b_need_locks); } } #endif if (!is_managed) { if (refc1) erts_smp_proc_dec_refc(p1); if (refc2) erts_smp_proc_dec_refc(p2); } }
Sint erts_send_message(Process* sender, Process* receiver, ErtsProcLocks *receiver_locks, Eterm message, unsigned flags) { Uint msize; ErlHeapFragment* bp = NULL; Eterm token = NIL; Sint res = 0; #ifdef USE_VM_PROBES DTRACE_CHARBUF(sender_name, 64); DTRACE_CHARBUF(receiver_name, 64); Sint tok_label = 0; Sint tok_lastcnt = 0; Sint tok_serial = 0; #endif BM_STOP_TIMER(system); BM_MESSAGE(message,sender,receiver); BM_START_TIMER(send); #ifdef USE_VM_PROBES *sender_name = *receiver_name = '\0'; if (DTRACE_ENABLED(message_send)) { erts_snprintf(sender_name, sizeof(DTRACE_CHARBUF_NAME(sender_name)), "%T", sender->common.id); erts_snprintf(receiver_name, sizeof(DTRACE_CHARBUF_NAME(receiver_name)), "%T", receiver->common.id); } #endif if (SEQ_TRACE_TOKEN(sender) != NIL && !(flags & ERTS_SND_FLG_NO_SEQ_TRACE)) { Eterm* hp; Eterm stoken = SEQ_TRACE_TOKEN(sender); Uint seq_trace_size = 0; #ifdef USE_VM_PROBES Uint dt_utag_size = 0; Eterm utag = NIL; #endif BM_SWAP_TIMER(send,size); msize = size_object(message); BM_SWAP_TIMER(size,send); #ifdef USE_VM_PROBES if (stoken != am_have_dt_utag) { #endif seq_trace_update_send(sender); seq_trace_output(stoken, message, SEQ_TRACE_SEND, receiver->common.id, sender); seq_trace_size = 6; /* TUPLE5 */ #ifdef USE_VM_PROBES } if (DT_UTAG_FLAGS(sender) & DT_UTAG_SPREADING) { dt_utag_size = size_object(DT_UTAG(sender)); } else if (stoken == am_have_dt_utag ) { stoken = NIL; } #endif bp = new_message_buffer(msize + seq_trace_size #ifdef USE_VM_PROBES + dt_utag_size #endif ); hp = bp->mem; BM_SWAP_TIMER(send,copy); token = copy_struct(stoken, seq_trace_size, &hp, &bp->off_heap); message = copy_struct(message, msize, &hp, &bp->off_heap); #ifdef USE_VM_PROBES if (DT_UTAG_FLAGS(sender) & DT_UTAG_SPREADING) { utag = copy_struct(DT_UTAG(sender), dt_utag_size, &hp, &bp->off_heap); #ifdef DTRACE_TAG_HARDDEBUG erts_fprintf(stderr, "Dtrace -> (%T) Spreading tag (%T) with " "message %T!\r\n",sender->common.id, utag, message); #endif } #endif BM_MESSAGE_COPIED(msize); BM_SWAP_TIMER(copy,send); #ifdef USE_VM_PROBES if (DTRACE_ENABLED(message_send)) { if (stoken != NIL && stoken != am_have_dt_utag) { tok_label = signed_val(SEQ_TRACE_T_LABEL(stoken)); tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(stoken)); tok_serial = signed_val(SEQ_TRACE_T_SERIAL(stoken)); } DTRACE6(message_send, sender_name, receiver_name, msize, tok_label, tok_lastcnt, tok_serial); } #endif res = queue_message(NULL, receiver, receiver_locks, NULL, bp, message, token #ifdef USE_VM_PROBES , utag #endif ); BM_SWAP_TIMER(send,system); } else if (sender == receiver) { /* Drop message if receiver has a pending exit ... */ #ifdef ERTS_SMP ErtsProcLocks need_locks = (~(*receiver_locks) & (ERTS_PROC_LOCK_MSGQ | ERTS_PROC_LOCK_STATUS)); if (need_locks) { *receiver_locks |= need_locks; if (erts_smp_proc_trylock(receiver, need_locks) == EBUSY) { if (need_locks == ERTS_PROC_LOCK_MSGQ) { erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_STATUS); need_locks = ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS; } erts_smp_proc_lock(receiver, need_locks); } } if (!ERTS_PROC_PENDING_EXIT(receiver)) #endif { ErlMessage* mp = message_alloc(); DTRACE6(message_send, sender_name, receiver_name, size_object(message), tok_label, tok_lastcnt, tok_serial); mp->data.attached = NULL; ERL_MESSAGE_TERM(mp) = message; ERL_MESSAGE_TOKEN(mp) = NIL; #ifdef USE_VM_PROBES ERL_MESSAGE_DT_UTAG(mp) = NIL; #endif mp->next = NULL; /* * We move 'in queue' to 'private queue' and place * message at the end of 'private queue' in order * to ensure that the 'in queue' doesn't contain * references into the heap. By ensuring this, * we don't need to include the 'in queue' in * the root set when garbage collecting. */ ERTS_SMP_MSGQ_MV_INQ2PRIVQ(receiver); LINK_MESSAGE_PRIVQ(receiver, mp); res = receiver->msg.len; if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)) { trace_receive(receiver, message); } } BM_SWAP_TIMER(send,system); } else { ErlOffHeap *ohp; Eterm *hp; erts_aint32_t state; BM_SWAP_TIMER(send,size); msize = size_object(message); BM_SWAP_TIMER(size,send); hp = erts_alloc_message_heap_state(msize, &bp, &ohp, receiver, receiver_locks, &state); BM_SWAP_TIMER(send,copy); message = copy_struct(message, msize, &hp, ohp); BM_MESSAGE_COPIED(msz); BM_SWAP_TIMER(copy,send); DTRACE6(message_send, sender_name, receiver_name, msize, tok_label, tok_lastcnt, tok_serial); res = queue_message(sender, receiver, receiver_locks, &state, bp, message, token #ifdef USE_VM_PROBES , NIL #endif ); BM_SWAP_TIMER(send,system); } return res; }
BIF_RETTYPE load_nif_2(BIF_ALIST_2) { static const char bad_lib[] = "bad_lib"; static const char reload[] = "reload"; static const char upgrade[] = "upgrade"; char* lib_name = NULL; void* handle = NULL; void* init_func; ErlNifEntry* entry = NULL; ErlNifEnv env; int len, i, err; Module* mod; Eterm mod_atom; Eterm f_atom; BeamInstr* caller; ErtsSysDdllError errdesc = ERTS_SYS_DDLL_ERROR_INIT; Eterm ret = am_ok; int veto; struct erl_module_nif* lib = NULL; len = list_length(BIF_ARG_1); if (len < 0) { BIF_ERROR(BIF_P, BADARG); } lib_name = (char *) erts_alloc(ERTS_ALC_T_TMP, len + 1); if (intlist_to_buf(BIF_ARG_1, lib_name, len) != len) { erts_free(ERTS_ALC_T_TMP, lib_name); BIF_ERROR(BIF_P, BADARG); } lib_name[len] = '\0'; /* Block system (is this the right place to do it?) */ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_smp_thr_progress_block(); /* Find calling module */ ASSERT(BIF_P->current != NULL); ASSERT(BIF_P->current[0] == am_erlang && BIF_P->current[1] == am_load_nif && BIF_P->current[2] == 2); caller = find_function_from_pc(BIF_P->cp); ASSERT(caller != NULL); mod_atom = caller[0]; ASSERT(is_atom(mod_atom)); mod=erts_get_module(mod_atom); ASSERT(mod != NULL); if (!in_area(caller, mod->code, mod->code_length)) { ASSERT(in_area(caller, mod->old_code, mod->old_code_length)); ret = load_nif_error(BIF_P, "old_code", "Calling load_nif from old " "module '%T' not allowed", mod_atom); } else if ((err=erts_sys_ddll_open2(lib_name, &handle, &errdesc)) != ERL_DE_NO_ERROR) { const char slogan[] = "Failed to load NIF library"; if (strstr(errdesc.str, lib_name) != NULL) { ret = load_nif_error(BIF_P, "load_failed", "%s: '%s'", slogan, errdesc.str); } else { ret = load_nif_error(BIF_P, "load_failed", "%s %s: '%s'", slogan, lib_name, errdesc.str); } } else if (erts_sys_ddll_load_nif_init(handle, &init_func, &errdesc) != ERL_DE_NO_ERROR) { ret = load_nif_error(BIF_P, bad_lib, "Failed to find library init" " function: '%s'", errdesc.str); } else if ((add_taint(mod_atom), (entry = erts_sys_ddll_call_nif_init(init_func)) == NULL)) { ret = load_nif_error(BIF_P, bad_lib, "Library init-call unsuccessful"); } else if (entry->major != ERL_NIF_MAJOR_VERSION || entry->minor > ERL_NIF_MINOR_VERSION) { ret = load_nif_error(BIF_P, bad_lib, "Library version (%d.%d) not compatible (with %d.%d).", entry->major, entry->minor, ERL_NIF_MAJOR_VERSION, ERL_NIF_MINOR_VERSION); } else if (entry->minor >= 1 && sys_strcmp(entry->vm_variant, ERL_NIF_VM_VARIANT) != 0) { ret = load_nif_error(BIF_P, bad_lib, "Library (%s) not compiled for " "this vm variant (%s).", entry->vm_variant, ERL_NIF_VM_VARIANT); } else if (!erts_is_atom_str((char*)entry->name, mod_atom)) { ret = load_nif_error(BIF_P, bad_lib, "Library module name '%s' does not" " match calling module '%T'", entry->name, mod_atom); } else { /*erts_fprintf(stderr, "Found module %T\r\n", mod_atom);*/ for (i=0; i < entry->num_of_funcs && ret==am_ok; i++) { BeamInstr** code_pp; ErlNifFunc* f = &entry->funcs[i]; if (!erts_atom_get(f->name, sys_strlen(f->name), &f_atom) || (code_pp = get_func_pp(mod->code, f_atom, f->arity))==NULL) { ret = load_nif_error(BIF_P,bad_lib,"Function not found %T:%s/%u", mod_atom, f->name, f->arity); } else if (code_pp[1] - code_pp[0] < (5+3)) { ret = load_nif_error(BIF_P,bad_lib,"No explicit call to load_nif" " in module (%T:%s/%u to small)", mod_atom, entry->funcs[i].name, entry->funcs[i].arity); } /*erts_fprintf(stderr, "Found NIF %T:%s/%u\r\n", mod_atom, entry->funcs[i].name, entry->funcs[i].arity);*/ } } if (ret != am_ok) { goto error; } /* Call load, reload or upgrade: */ lib = erts_alloc(ERTS_ALC_T_NIF, sizeof(struct erl_module_nif)); lib->handle = handle; lib->entry = entry; erts_refc_init(&lib->rt_cnt, 0); erts_refc_init(&lib->rt_dtor_cnt, 0); lib->mod = mod; env.mod_nif = lib; if (mod->nif != NULL) { /* Reload */ int k; lib->priv_data = mod->nif->priv_data; ASSERT(mod->nif->entry != NULL); if (entry->reload == NULL) { ret = load_nif_error(BIF_P,reload,"Reload not supported by this NIF library."); goto error; } /* Check that no NIF is removed */ for (k=0; k < mod->nif->entry->num_of_funcs; k++) { ErlNifFunc* old_func = &mod->nif->entry->funcs[k]; for (i=0; i < entry->num_of_funcs; i++) { if (old_func->arity == entry->funcs[i].arity && sys_strcmp(old_func->name, entry->funcs[i].name) == 0) { break; } } if (i == entry->num_of_funcs) { ret = load_nif_error(BIF_P,reload,"Reloaded library missing " "function %T:%s/%u\r\n", mod_atom, old_func->name, old_func->arity); goto error; } } erts_pre_nif(&env, BIF_P, lib); veto = entry->reload(&env, &lib->priv_data, BIF_ARG_2); erts_post_nif(&env); if (veto) { ret = load_nif_error(BIF_P, reload, "Library reload-call unsuccessful."); } else { mod->nif->entry = NULL; /* to prevent 'unload' callback */ erts_unload_nif(mod->nif); } } else { lib->priv_data = NULL; if (mod->old_nif != NULL) { /* Upgrade */ void* prev_old_data = mod->old_nif->priv_data; if (entry->upgrade == NULL) { ret = load_nif_error(BIF_P, upgrade, "Upgrade not supported by this NIF library."); goto error; } erts_pre_nif(&env, BIF_P, lib); veto = entry->upgrade(&env, &lib->priv_data, &mod->old_nif->priv_data, BIF_ARG_2); erts_post_nif(&env); if (veto) { mod->old_nif->priv_data = prev_old_data; ret = load_nif_error(BIF_P, upgrade, "Library upgrade-call unsuccessful."); } /*else if (mod->old_nif->priv_data != prev_old_data) { refresh_cached_nif_data(mod->old_code, mod->old_nif); }*/ } else if (entry->load != NULL) { /* Initial load */ erts_pre_nif(&env, BIF_P, lib); veto = entry->load(&env, &lib->priv_data, BIF_ARG_2); erts_post_nif(&env); if (veto) { ret = load_nif_error(BIF_P, "load", "Library load-call unsuccessful."); } } } if (ret == am_ok) { /* ** Everything ok, patch the beam code with op_call_nif */ mod->nif = lib; for (i=0; i < entry->num_of_funcs; i++) { BeamInstr* code_ptr; erts_atom_get(entry->funcs[i].name, sys_strlen(entry->funcs[i].name), &f_atom); code_ptr = *get_func_pp(mod->code, f_atom, entry->funcs[i].arity); if (code_ptr[1] == 0) { code_ptr[5+0] = (BeamInstr) BeamOp(op_call_nif); } else { /* Function traced, patch the original instruction word */ BpData** bps = (BpData**) code_ptr[1]; BpData* bp = (BpData*) bps[bp_sched2ix()]; bp->orig_instr = (BeamInstr) BeamOp(op_call_nif); } code_ptr[5+1] = (BeamInstr) entry->funcs[i].fptr; code_ptr[5+2] = (BeamInstr) lib; } } else { error: ASSERT(ret != am_ok); if (lib != NULL) { erts_free(ERTS_ALC_T_NIF, lib); } if (handle != NULL) { erts_sys_ddll_close(handle); } erts_sys_ddll_free_error(&errdesc); } erts_smp_thr_progress_unblock(); erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_free(ERTS_ALC_T_TMP, lib_name); BIF_RET(ret); }
void erts_queue_dist_message(Process *rcvr, ErtsProcLocks *rcvr_locks, ErtsDistExternal *dist_ext, Eterm token) { ErlMessage* mp; #ifdef ERTS_SMP ErtsProcLocks need_locks; #endif ERTS_SMP_LC_ASSERT(*rcvr_locks == erts_proc_lc_my_proc_locks(rcvr)); mp = message_alloc(); #ifdef ERTS_SMP need_locks = ~(*rcvr_locks) & (ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); if (need_locks) { *rcvr_locks |= need_locks; if (erts_smp_proc_trylock(rcvr, need_locks) == EBUSY) { if (need_locks == ERTS_PROC_LOCK_MSGQ) { erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_STATUS); need_locks = (ERTS_PROC_LOCK_MSGQ | ERTS_PROC_LOCK_STATUS); } erts_smp_proc_lock(rcvr, need_locks); } } if (rcvr->is_exiting || ERTS_PROC_PENDING_EXIT(rcvr)) { /* Drop message if receiver is exiting or has a pending exit ... */ if (is_not_nil(token)) { ErlHeapFragment *heap_frag; heap_frag = erts_dist_ext_trailer(mp->data.dist_ext); erts_cleanup_offheap(&heap_frag->off_heap); } erts_free_dist_ext_copy(dist_ext); message_free(mp); } else #endif if (IS_TRACED_FL(rcvr, F_TRACE_RECEIVE)) { /* Ahh... need to decode it in order to trace it... */ ErlHeapFragment *mbuf; Eterm msg; message_free(mp); msg = erts_msg_distext2heap(rcvr, rcvr_locks, &mbuf, &token, dist_ext); if (is_value(msg)) erts_queue_message(rcvr, rcvr_locks, mbuf, msg, token); } else { /* Enqueue message on external format */ ERL_MESSAGE_TERM(mp) = THE_NON_VALUE; ERL_MESSAGE_TOKEN(mp) = token; mp->next = NULL; mp->data.dist_ext = dist_ext; LINK_MESSAGE(rcvr, mp); notify_new_message(rcvr); } }