/** * * @brief Send one message from *NOT* a local process. * * seq_trace does not work with this type of messages * to it is set to am_undefined which means that the * receiving process will not remove the seq_trace token * when it gets this message. * */ void erts_queue_message(Process* receiver, ErtsProcLocks receiver_locks, ErtsMessage* mp, Eterm msg, Eterm from) { ASSERT(is_not_internal_pid(from)); ERL_MESSAGE_TERM(mp) = msg; ERL_MESSAGE_FROM(mp) = from; ERL_MESSAGE_TOKEN(mp) = am_undefined; queue_messages(receiver, receiver_locks, mp, &mp->next, 1); }
BIF_RETTYPE port_connect_2(BIF_ALIST_2) { Port* prt; Process* rp; Eterm pid = BIF_ARG_2; if (is_not_internal_pid(pid)) { error: BIF_ERROR(BIF_P, BADARG); } prt = id_or_name2port(BIF_P, BIF_ARG_1); if (!prt) { goto error; } rp = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN, pid, ERTS_PROC_LOCK_LINK); if (!rp) { erts_smp_port_unlock(prt); ERTS_SMP_ASSERT_IS_NOT_EXITING(BIF_P); goto error; } erts_add_link(&(rp->nlinks), LINK_PID, prt->id); erts_add_link(&(prt->nlinks), LINK_PID, pid); erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); prt->connected = pid; /* internal pid */ erts_smp_port_unlock(prt); #ifdef USE_VM_PROBES if (DTRACE_ENABLED(port_connect)) { DTRACE_CHARBUF(process_str, DTRACE_TERM_BUF_SIZE); DTRACE_CHARBUF(port_str, DTRACE_TERM_BUF_SIZE); DTRACE_CHARBUF(newprocess_str, DTRACE_TERM_BUF_SIZE); dtrace_pid_str(prt->connected, process_str); erts_snprintf(port_str, sizeof(port_str), "%T", prt->id); dtrace_proc_str(rp, newprocess_str); DTRACE4(port_connect, process_str, port_str, prt->name, newprocess_str); } #endif BIF_RET(am_true); }
BIF_RETTYPE erts_internal_check_dirty_process_code_2(BIF_ALIST_2) { #if !defined(ERTS_DIRTY_SCHEDULERS) BIF_ERROR(BIF_P, EXC_NOTSUP); #else Process *rp; int reds = 0; Eterm res; if (BIF_P != erts_dirty_process_code_checker) BIF_ERROR(BIF_P, EXC_NOTSUP); if (is_not_internal_pid(BIF_ARG_1)) BIF_ERROR(BIF_P, BADARG); if (is_not_atom(BIF_ARG_2)) BIF_ERROR(BIF_P, BADARG); rp = erts_pid2proc_not_running(BIF_P, ERTS_PROC_LOCK_MAIN, BIF_ARG_1, ERTS_PROC_LOCK_MAIN); if (rp == ERTS_PROC_LOCK_BUSY) ERTS_BIF_YIELD2(bif_export[BIF_erts_internal_check_dirty_process_code_2], BIF_P, BIF_ARG_1, BIF_ARG_2); if (!rp) BIF_RET(am_false); res = erts_check_process_code(rp, BIF_ARG_2, &reds, BIF_P->fcalls); if (BIF_P != rp) erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN); ASSERT(is_value(res)); BIF_RET2(res, reds); #endif }
Eterm erts_instr_get_memory_map(Process *proc) { MapStatBlock_t *org_mem_anchor; Eterm hdr_tuple, md_list, res; Eterm *hp; Uint hsz; MapStatBlock_t *bp; #ifdef DEBUG Eterm *end_hp; #endif if (!erts_instr_memory_map) return am_false; if (!atoms_initialized) init_atoms(); if (!am_n) init_am_n(); if (!am_c) init_am_c(); if (!am_a) init_am_a(); erts_mtx_lock(&instr_x_mutex); erts_mtx_lock(&instr_mutex); /* Header size */ hsz = 5 + 1 + (ERTS_ALC_N_MAX+1-ERTS_ALC_N_MIN)*(1 + 4); /* Memory data list */ for (bp = mem_anchor; bp; bp = bp->next) { if (is_internal_pid(bp->pid)) { #if (_PID_NUM_SIZE - 1 > MAX_SMALL) if (internal_pid_number(bp->pid) > MAX_SMALL) hsz += BIG_UINT_HEAP_SIZE; #endif #if (_PID_SER_SIZE - 1 > MAX_SMALL) if (internal_pid_serial(bp->pid) > MAX_SMALL) hsz += BIG_UINT_HEAP_SIZE; #endif hsz += 4; } if ((UWord) bp->mem > MAX_SMALL) hsz += BIG_UINT_HEAP_SIZE; if (bp->size > MAX_SMALL) hsz += BIG_UINT_HEAP_SIZE; hsz += 5 + 2; } hsz += 3; /* Root tuple */ org_mem_anchor = mem_anchor; mem_anchor = NULL; erts_mtx_unlock(&instr_mutex); hp = HAlloc(proc, hsz); /* May end up calling map_stat_alloc() */ erts_mtx_lock(&instr_mutex); #ifdef DEBUG end_hp = hp + hsz; #endif { /* Build header */ ErtsAlcType_t n; Eterm type_map; Uint *hp2 = hp; #ifdef DEBUG Uint *hp2_end; #endif hp += (ERTS_ALC_N_MAX + 1 - ERTS_ALC_N_MIN)*4; #ifdef DEBUG hp2_end = hp; #endif type_map = make_tuple(hp); *(hp++) = make_arityval(ERTS_ALC_N_MAX + 1 - ERTS_ALC_N_MIN); for (n = ERTS_ALC_N_MIN; n <= ERTS_ALC_N_MAX; n++) { ErtsAlcType_t t = ERTS_ALC_N2T(n); ErtsAlcType_t a = ERTS_ALC_T2A(t); ErtsAlcType_t c = ERTS_ALC_T2C(t); if (!erts_allctrs_info[a].enabled) a = ERTS_ALC_A_SYSTEM; *(hp++) = TUPLE3(hp2, am_n[n], am_a[a], am_c[c]); hp2 += 4; } ASSERT(hp2 == hp2_end); hdr_tuple = TUPLE4(hp, am.instr_hdr, make_small(ERTS_INSTR_VSN), make_small(MAP_STAT_BLOCK_HEADER_SIZE), type_map); hp += 5; } /* Build memory data list */ for (md_list = NIL, bp = org_mem_anchor; bp; bp = bp->next) { Eterm tuple; Eterm type; Eterm ptr; Eterm size; Eterm pid; if (is_not_internal_pid(bp->pid)) pid = am_undefined; else { Eterm c; Eterm n; Eterm s; #if (ERST_INTERNAL_CHANNEL_NO > MAX_SMALL) #error Oversized internal channel number #endif c = make_small(ERST_INTERNAL_CHANNEL_NO); #if (_PID_NUM_SIZE - 1 > MAX_SMALL) if (internal_pid_number(bp->pid) > MAX_SMALL) { n = uint_to_big(internal_pid_number(bp->pid), hp); hp += BIG_UINT_HEAP_SIZE; } else #endif n = make_small(internal_pid_number(bp->pid)); #if (_PID_SER_SIZE - 1 > MAX_SMALL) if (internal_pid_serial(bp->pid) > MAX_SMALL) { s = uint_to_big(internal_pid_serial(bp->pid), hp); hp += BIG_UINT_HEAP_SIZE; } else #endif s = make_small(internal_pid_serial(bp->pid)); pid = TUPLE3(hp, c, n, s); hp += 4; } #if ERTS_ALC_N_MAX > MAX_SMALL #error Oversized memory type number #endif type = make_small(bp->type_no); if ((UWord) bp->mem > MAX_SMALL) { ptr = uint_to_big((UWord) bp->mem, hp); hp += BIG_UINT_HEAP_SIZE; } else ptr = make_small((UWord) bp->mem); if (bp->size > MAX_SMALL) { size = uint_to_big(bp->size, hp); hp += BIG_UINT_HEAP_SIZE; } else size = make_small(bp->size); tuple = TUPLE4(hp, type, ptr, size, pid); hp += 5; md_list = CONS(hp, tuple, md_list); hp += 2; } res = TUPLE2(hp, hdr_tuple, md_list); ASSERT(hp + 3 == end_hp); if (mem_anchor) { for (bp = mem_anchor; bp->next; bp = bp->next) ; ASSERT(org_mem_anchor); org_mem_anchor->prev = bp; bp->next = org_mem_anchor; } else { mem_anchor = org_mem_anchor; } erts_mtx_unlock(&instr_mutex); erts_mtx_unlock(&instr_x_mutex); return res; }
Process * erts_pid2proc_opt(Process *c_p, ErtsProcLocks c_p_have_locks, Eterm pid, ErtsProcLocks pid_need_locks, int flags) { Process *dec_refc_proc = NULL; ErtsThrPrgrDelayHandle dhndl; ErtsProcLocks need_locks; Uint pix; Process *proc; #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) ErtsProcLocks lcnt_locks; #endif #ifdef ERTS_ENABLE_LOCK_CHECK if (c_p) { ErtsProcLocks might_unlock = c_p_have_locks & pid_need_locks; if (might_unlock) erts_proc_lc_might_unlock(c_p, might_unlock); } #endif if (is_not_internal_pid(pid)) return NULL; pix = internal_pid_index(pid); ERTS_LC_ASSERT((pid_need_locks & ERTS_PROC_LOCKS_ALL) == pid_need_locks); need_locks = pid_need_locks; if (c_p && c_p->common.id == pid) { ASSERT(c_p->common.id != ERTS_INVALID_PID); ASSERT(c_p == erts_pix2proc(pix)); if (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X) && ERTS_PROC_IS_EXITING(c_p)) return NULL; need_locks &= ~c_p_have_locks; if (!need_locks) { if (flags & ERTS_P2P_FLG_INC_REFC) erts_proc_inc_refc(c_p); return c_p; } } dhndl = erts_thr_progress_unmanaged_delay(); proc = (Process *) erts_ptab_pix2intptr_ddrb(&erts_proc, pix); if (proc) { if (proc->common.id != pid) proc = NULL; else if (!need_locks) { if (flags & ERTS_P2P_FLG_INC_REFC) erts_proc_inc_refc(proc); } else { int busy; #if ERTS_PROC_LOCK_OWN_IMPL #ifdef ERTS_ENABLE_LOCK_COUNT lcnt_locks = need_locks; if (!(flags & ERTS_P2P_FLG_TRY_LOCK)) { erts_lcnt_proc_lock(&proc->lock, need_locks); } #endif #ifdef ERTS_ENABLE_LOCK_CHECK /* Make sure erts_pid2proc_safelock() is enough to handle a potential lock order violation situation... */ busy = erts_proc_lc_trylock_force_busy(proc, need_locks); if (!busy) #endif #endif /* ERTS_PROC_LOCK_OWN_IMPL */ { /* Try a quick trylock to grab all the locks we need. */ busy = (int) erts_smp_proc_raw_trylock__(proc, need_locks); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_CHECK) erts_proc_lc_trylock(proc, need_locks, !busy, __FILE__,__LINE__); #endif #ifdef ERTS_PROC_LOCK_DEBUG if (!busy) erts_proc_lock_op_debug(proc, need_locks, 1); #endif } #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) if (flags & ERTS_P2P_FLG_TRY_LOCK) erts_lcnt_proc_trylock(&proc->lock, need_locks, busy ? EBUSY : 0); #endif if (!busy) { if (flags & ERTS_P2P_FLG_INC_REFC) erts_proc_inc_refc(proc); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) /* all is great */ if (!(flags & ERTS_P2P_FLG_TRY_LOCK)) erts_lcnt_proc_lock_post_x(&proc->lock, lcnt_locks, __FILE__, __LINE__); #endif } else { if (flags & ERTS_P2P_FLG_TRY_LOCK) proc = ERTS_PROC_LOCK_BUSY; else { int managed; if (flags & ERTS_P2P_FLG_INC_REFC) erts_proc_inc_refc(proc); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) erts_lcnt_proc_lock_unaquire(&proc->lock, lcnt_locks); #endif managed = dhndl == ERTS_THR_PRGR_DHANDLE_MANAGED; if (!managed) { erts_proc_inc_refc(proc); erts_thr_progress_unmanaged_continue(dhndl); dec_refc_proc = proc; /* * We don't want to call * erts_thr_progress_unmanaged_continue() * again. */ dhndl = ERTS_THR_PRGR_DHANDLE_MANAGED; } proc_safelock(managed, c_p, c_p_have_locks, c_p_have_locks, proc, 0, need_locks); } } } } if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) erts_thr_progress_unmanaged_continue(dhndl); if (need_locks && proc && proc != ERTS_PROC_LOCK_BUSY && (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X) ? ERTS_PROC_IS_EXITING(proc) : (proc != (Process *) erts_ptab_pix2intptr_nob(&erts_proc, pix)))) { erts_smp_proc_unlock(proc, need_locks); if (flags & ERTS_P2P_FLG_INC_REFC) dec_refc_proc = proc; proc = NULL; } if (dec_refc_proc) erts_proc_dec_refc(dec_refc_proc); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_PROC_LOCK_DEBUG) ERTS_LC_ASSERT(!proc || proc == ERTS_PROC_LOCK_BUSY || (pid_need_locks == (ERTS_PROC_LOCK_FLGS_READ_(&proc->lock) & pid_need_locks))); #endif return proc; }
/* * Register a process or port (can't be registered twice). * Returns 0 if name, process or port is already registered. * * When smp support is enabled: * * Assumes that main lock is locked (and only main lock) * on c_p. * */ int erts_register_name(Process *c_p, Eterm name, Eterm id) { int res = 0; Process *proc = NULL; Port *port = NULL; RegProc r, *rp; ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p); if (is_not_atom(name) || name == am_undefined) return res; if (c_p->common.id == id) /* A very common case I think... */ proc = c_p; else { if (is_not_internal_pid(id) && is_not_internal_port(id)) return res; erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); if (is_internal_port(id)) { port = erts_id2port(id); if (!port) goto done; } } #ifdef ERTS_SMP { ErtsProcLocks proc_locks = proc ? ERTS_PROC_LOCK_MAIN : 0; reg_safe_write_lock(proc, &proc_locks); if (proc && !proc_locks) erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); } #endif if (is_internal_pid(id)) { if (!proc) proc = erts_pid2proc(NULL, 0, id, ERTS_PROC_LOCK_MAIN); r.p = proc; if (!proc) goto done; if (proc->common.u.alive.reg) goto done; r.pt = NULL; } else { ASSERT(!INVALID_PORT(port, id)); ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(port)); r.pt = port; if (r.pt->common.u.alive.reg) goto done; r.p = NULL; } r.name = name; rp = (RegProc*) hash_put(&process_reg, (void*) &r); if (proc && rp->p == proc) { if (IS_TRACED_FL(proc, F_TRACE_PROCS)) { trace_proc(proc, ERTS_PROC_LOCK_MAIN, proc, am_register, name); } proc->common.u.alive.reg = rp; } else if (port && rp->pt == port) { if (IS_TRACED_FL(port, F_TRACE_PORTS)) { trace_port(port, am_register, name); } port->common.u.alive.reg = rp; } if ((rp->p && rp->p->common.id == id) || (rp->pt && rp->pt->common.id == id)) { res = 1; } done: reg_write_unlock(); if (port) erts_port_release(port); if (c_p != proc) { if (proc) erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN); erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); } return res; }
Process * erts_pid2proc_opt(Process *c_p, ErtsProcLocks c_p_have_locks, Eterm pid, ErtsProcLocks pid_need_locks, int flags) { Process *dec_refc_proc = NULL; int need_ptl; ErtsProcLocks need_locks; Uint pix; Process *proc; #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) ErtsProcLocks lcnt_locks; #endif #ifdef ERTS_ENABLE_LOCK_CHECK if (c_p) { ErtsProcLocks might_unlock = c_p_have_locks & pid_need_locks; if (might_unlock) erts_proc_lc_might_unlock(c_p, might_unlock); } #endif if (is_not_internal_pid(pid)) return NULL; pix = internal_pid_index(pid); ERTS_LC_ASSERT((pid_need_locks & ERTS_PROC_LOCKS_ALL) == pid_need_locks); need_locks = pid_need_locks; if (c_p && c_p->id == pid) { ASSERT(c_p->id != ERTS_INVALID_PID); ASSERT(c_p == erts_pix2proc(pix)); if (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X) && ERTS_PROC_IS_EXITING(c_p)) return NULL; need_locks &= ~c_p_have_locks; if (!need_locks) { if (flags & ERTS_P2P_FLG_SMP_INC_REFC) erts_smp_proc_inc_refc(c_p); return c_p; } } need_ptl = !erts_get_scheduler_id(); if (need_ptl) erts_smp_rwmtx_rwlock(&erts_proc_tab_rwmtx); proc = (Process *) erts_smp_atomic_read_ddrb(&erts_proc.tab[pix]); if (proc) { if (proc->id != pid) proc = NULL; else if (!need_locks) { if (flags & ERTS_P2P_FLG_SMP_INC_REFC) erts_smp_proc_inc_refc(proc); } else { int busy; #if ERTS_PROC_LOCK_OWN_IMPL #ifdef ERTS_ENABLE_LOCK_COUNT lcnt_locks = need_locks; if (!(flags & ERTS_P2P_FLG_TRY_LOCK)) { erts_lcnt_proc_lock(&proc->lock, need_locks); } #endif #ifdef ERTS_ENABLE_LOCK_CHECK /* Make sure erts_pid2proc_safelock() is enough to handle a potential lock order violation situation... */ busy = erts_proc_lc_trylock_force_busy(proc, need_locks); if (!busy) #endif #endif /* ERTS_PROC_LOCK_OWN_IMPL */ { /* Try a quick trylock to grab all the locks we need. */ busy = (int) erts_smp_proc_raw_trylock__(proc, need_locks); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_CHECK) erts_proc_lc_trylock(proc, need_locks, !busy); #endif #ifdef ERTS_PROC_LOCK_DEBUG if (!busy) erts_proc_lock_op_debug(proc, need_locks, 1); #endif } #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) if (flags & ERTS_P2P_FLG_TRY_LOCK) erts_lcnt_proc_trylock(&proc->lock, need_locks, busy ? EBUSY : 0); #endif if (!busy) { if (flags & ERTS_P2P_FLG_SMP_INC_REFC) erts_smp_proc_inc_refc(proc); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) /* all is great */ if (!(flags & ERTS_P2P_FLG_TRY_LOCK)) erts_lcnt_proc_lock_post_x(&proc->lock, lcnt_locks, __FILE__, __LINE__); #endif } else { if (flags & ERTS_P2P_FLG_TRY_LOCK) proc = ERTS_PROC_LOCK_BUSY; else { if (flags & ERTS_P2P_FLG_SMP_INC_REFC) erts_smp_proc_inc_refc(proc); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) erts_lcnt_proc_lock_unaquire(&proc->lock, lcnt_locks); #endif if (need_ptl) { erts_smp_proc_inc_refc(proc); dec_refc_proc = proc; erts_smp_rwmtx_rwunlock(&erts_proc_tab_rwmtx); need_ptl = 0; } proc_safelock(!need_ptl, c_p, c_p_have_locks, c_p_have_locks, proc, 0, need_locks); } } } } if (need_ptl) erts_smp_rwmtx_rwunlock(&erts_proc_tab_rwmtx); if (need_locks && proc && proc != ERTS_PROC_LOCK_BUSY && (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X) ? ERTS_PROC_IS_EXITING(proc) : (proc != (Process *) erts_smp_atomic_read_nob(&erts_proc.tab[pix])))) { erts_smp_proc_unlock(proc, need_locks); if (flags & ERTS_P2P_FLG_SMP_INC_REFC) dec_refc_proc = proc; proc = NULL; } if (dec_refc_proc) erts_smp_proc_dec_refc(dec_refc_proc); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_PROC_LOCK_DEBUG) ERTS_LC_ASSERT(!proc || proc == ERTS_PROC_LOCK_BUSY || (pid_need_locks == (ERTS_PROC_LOCK_FLGS_READ_(&proc->lock) & pid_need_locks))); #endif return proc; }