void erts_deep_process_dump(fmtfn_t to, void *to_arg) { int i, max = erts_ptab_max(&erts_proc); all_binaries = NULL; init_literal_areas(); erts_init_persistent_dumping(); for (i = 0; i < max; i++) { Process *p = erts_pix2proc(i); if (p && p->i != ENULL) { erts_aint32_t state = erts_atomic32_read_acqb(&p->state); if (state & ERTS_PSFLG_EXITING) continue; if (state & ERTS_PSFLG_GC) { ErtsSchedulerData *sdp = erts_get_scheduler_data(); if (!sdp || p != sdp->current_process) continue; /* We want to dump the garbing process that caused the dump */ } dump_process_info(to, to_arg, p); } } dump_persistent_terms(to, to_arg); dump_literals(to, to_arg); dump_binaries(to, to_arg, all_binaries); }
static void bin_check(void) { Process *rp; struct erl_off_heap_header* hdr; int i, printed = 0, max = erts_ptab_max(&erts_proc); for (i=0; i < max; i++) { rp = erts_pix2proc(i); if (!rp) continue; for (hdr = rp->off_heap.first; hdr; hdr = hdr->next) { if (hdr->thing_word == HEADER_PROC_BIN) { ProcBin *bp = (ProcBin*) hdr; if (!printed) { erts_printf("Process %T holding binary data \n", rp->common.id); printed = 1; } erts_printf("%p orig_size: %bpd, norefs = %bpd\n", bp->val, bp->val->orig_size, erts_smp_atomic_read_nob(&bp->val->refc)); } } if (printed) { erts_printf("--------------------------------------\n"); printed = 0; } } /* db_bin_check() has to be rewritten for the AVL trees... */ /*db_bin_check();*/ }
static void process_killer(void) { int i, j, max = erts_ptab_max(&erts_proc); Process* rp; erts_printf("\n\nProcess Information\n\n"); erts_printf("--------------------------------------------------\n"); for (i = max-1; i >= 0; i--) { rp = erts_pix2proc(i); if (rp && rp->i != ENULL) { int br; print_process_info(ERTS_PRINT_STDOUT, NULL, rp); erts_printf("(k)ill (n)ext (r)eturn:\n"); while(1) { if ((j = sys_get_key(0)) <= 0) erts_exit(0, ""); switch(j) { case 'k': { ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_XSIG_SEND; erts_aint32_t state; erts_proc_inc_refc(rp); erts_smp_proc_lock(rp, rp_locks); state = erts_smp_atomic32_read_acqb(&rp->state); if (state & (ERTS_PSFLG_FREE | ERTS_PSFLG_EXITING | ERTS_PSFLG_ACTIVE | ERTS_PSFLG_ACTIVE_SYS | ERTS_PSFLG_IN_RUNQ | ERTS_PSFLG_RUNNING | ERTS_PSFLG_RUNNING_SYS | ERTS_PSFLG_DIRTY_RUNNING | ERTS_PSFLG_DIRTY_RUNNING_SYS)) { erts_printf("Can only kill WAITING processes this way\n"); } else { (void) erts_send_exit_signal(NULL, NIL, rp, &rp_locks, am_kill, NIL, NULL, 0); } erts_smp_proc_unlock(rp, rp_locks); erts_proc_dec_refc(rp); } case 'n': br = 1; break; case 'r': return; default: return; } if (br == 1) break; } } } }
void process_info(int to, void *to_arg) { int i; for (i = 0; i < erts_max_processes; i++) { Process *p = erts_pix2proc(i); if (p && p->i != ENULL) { if (!ERTS_PROC_IS_EXITING(p)) print_process_info(to, to_arg, p); } } port_info(to, to_arg); }
void process_info(int to, void *to_arg) { int i, max = erts_ptab_max(&erts_proc); for (i = 0; i < max; i++) { Process *p = erts_pix2proc(i); if (p && p->i != ENULL) { /* Do not include processes with no heap, * they are most likely just created and has invalid data */ if (!ERTS_PROC_IS_EXITING(p) && p->heap != NULL) print_process_info(to, to_arg, p); } } port_info(to, to_arg); }
void erts_deep_process_dump(int to, void *to_arg) { int i, max = erts_ptab_max(&erts_proc); all_binaries = NULL; for (i = 0; i < max; i++) { Process *p = erts_pix2proc(i); if (p && p->i != ENULL) { erts_aint32_t state = erts_smp_atomic32_read_acqb(&p->state); if (!(state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_GC))) dump_process_info(to, to_arg, p); } } dump_binaries(to, to_arg, all_binaries); }
void erts_lcnt_enable_proc_lock_count(int enable) { int i; for (i = 0; i < erts_max_processes; ++i) { Process* p = erts_pix2proc(i); if (p) { if (enable) { if (!ERTS_LCNT_LOCK_TYPE(&(p->lock.lcnt_main))) { erts_lcnt_proc_lock_init(p); } } else { if (ERTS_LCNT_LOCK_TYPE(&(p->lock.lcnt_main))) { erts_lcnt_proc_lock_destroy(p); } } } } }
Process * erts_pid2proc_opt(Process *c_p, ErtsProcLocks c_p_have_locks, Eterm pid, ErtsProcLocks pid_need_locks, int flags) { Process *dec_refc_proc = NULL; ErtsThrPrgrDelayHandle dhndl; ErtsProcLocks need_locks; Uint pix; Process *proc; #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) ErtsProcLocks lcnt_locks; #endif #ifdef ERTS_ENABLE_LOCK_CHECK if (c_p) { ErtsProcLocks might_unlock = c_p_have_locks & pid_need_locks; if (might_unlock) erts_proc_lc_might_unlock(c_p, might_unlock); } #endif if (is_not_internal_pid(pid)) return NULL; pix = internal_pid_index(pid); ERTS_LC_ASSERT((pid_need_locks & ERTS_PROC_LOCKS_ALL) == pid_need_locks); need_locks = pid_need_locks; if (c_p && c_p->common.id == pid) { ASSERT(c_p->common.id != ERTS_INVALID_PID); ASSERT(c_p == erts_pix2proc(pix)); if (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X) && ERTS_PROC_IS_EXITING(c_p)) return NULL; need_locks &= ~c_p_have_locks; if (!need_locks) { if (flags & ERTS_P2P_FLG_INC_REFC) erts_proc_inc_refc(c_p); return c_p; } } dhndl = erts_thr_progress_unmanaged_delay(); proc = (Process *) erts_ptab_pix2intptr_ddrb(&erts_proc, pix); if (proc) { if (proc->common.id != pid) proc = NULL; else if (!need_locks) { if (flags & ERTS_P2P_FLG_INC_REFC) erts_proc_inc_refc(proc); } else { int busy; #if ERTS_PROC_LOCK_OWN_IMPL #ifdef ERTS_ENABLE_LOCK_COUNT lcnt_locks = need_locks; if (!(flags & ERTS_P2P_FLG_TRY_LOCK)) { erts_lcnt_proc_lock(&proc->lock, need_locks); } #endif #ifdef ERTS_ENABLE_LOCK_CHECK /* Make sure erts_pid2proc_safelock() is enough to handle a potential lock order violation situation... */ busy = erts_proc_lc_trylock_force_busy(proc, need_locks); if (!busy) #endif #endif /* ERTS_PROC_LOCK_OWN_IMPL */ { /* Try a quick trylock to grab all the locks we need. */ busy = (int) erts_smp_proc_raw_trylock__(proc, need_locks); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_CHECK) erts_proc_lc_trylock(proc, need_locks, !busy, __FILE__,__LINE__); #endif #ifdef ERTS_PROC_LOCK_DEBUG if (!busy) erts_proc_lock_op_debug(proc, need_locks, 1); #endif } #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) if (flags & ERTS_P2P_FLG_TRY_LOCK) erts_lcnt_proc_trylock(&proc->lock, need_locks, busy ? EBUSY : 0); #endif if (!busy) { if (flags & ERTS_P2P_FLG_INC_REFC) erts_proc_inc_refc(proc); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) /* all is great */ if (!(flags & ERTS_P2P_FLG_TRY_LOCK)) erts_lcnt_proc_lock_post_x(&proc->lock, lcnt_locks, __FILE__, __LINE__); #endif } else { if (flags & ERTS_P2P_FLG_TRY_LOCK) proc = ERTS_PROC_LOCK_BUSY; else { int managed; if (flags & ERTS_P2P_FLG_INC_REFC) erts_proc_inc_refc(proc); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) erts_lcnt_proc_lock_unaquire(&proc->lock, lcnt_locks); #endif managed = dhndl == ERTS_THR_PRGR_DHANDLE_MANAGED; if (!managed) { erts_proc_inc_refc(proc); erts_thr_progress_unmanaged_continue(dhndl); dec_refc_proc = proc; /* * We don't want to call * erts_thr_progress_unmanaged_continue() * again. */ dhndl = ERTS_THR_PRGR_DHANDLE_MANAGED; } proc_safelock(managed, c_p, c_p_have_locks, c_p_have_locks, proc, 0, need_locks); } } } } if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) erts_thr_progress_unmanaged_continue(dhndl); if (need_locks && proc && proc != ERTS_PROC_LOCK_BUSY && (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X) ? ERTS_PROC_IS_EXITING(proc) : (proc != (Process *) erts_ptab_pix2intptr_nob(&erts_proc, pix)))) { erts_smp_proc_unlock(proc, need_locks); if (flags & ERTS_P2P_FLG_INC_REFC) dec_refc_proc = proc; proc = NULL; } if (dec_refc_proc) erts_proc_dec_refc(dec_refc_proc); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_PROC_LOCK_DEBUG) ERTS_LC_ASSERT(!proc || proc == ERTS_PROC_LOCK_BUSY || (pid_need_locks == (ERTS_PROC_LOCK_FLGS_READ_(&proc->lock) & pid_need_locks))); #endif return proc; }
static void setup_reference_table(void) { ErlHeapFragment *hfp; DistEntry *dep; HashInfo hi; int i, max; DeclareTmpHeapNoproc(heap,3); inserted_bins = NULL; hash_get_info(&hi, &erts_node_table); referred_nodes = erts_alloc(ERTS_ALC_T_NC_TMP, hi.objs*sizeof(ReferredNode)); no_referred_nodes = 0; hash_foreach(&erts_node_table, init_referred_node, NULL); ASSERT(no_referred_nodes == hi.objs); hash_get_info(&hi, &erts_dist_table); referred_dists = erts_alloc(ERTS_ALC_T_NC_TMP, hi.objs*sizeof(ReferredDist)); no_referred_dists = 0; hash_foreach(&erts_dist_table, init_referred_dist, NULL); ASSERT(no_referred_dists == hi.objs); /* Go through the hole system, and build a table of all references to ErlNode and DistEntry structures */ erts_debug_callback_timer_foreach(try_delete_node, insert_delayed_delete_node, NULL); erts_debug_callback_timer_foreach(try_delete_dist_entry, insert_delayed_delete_dist_entry, NULL); UseTmpHeapNoproc(3); insert_node(erts_this_node, SYSTEM_REF, TUPLE2(&heap[0], AM_system, am_undefined)); UnUseTmpHeapNoproc(3); max = erts_ptab_max(&erts_proc); /* Insert all processes */ for (i = 0; i < max; i++) { Process *proc = erts_pix2proc(i); if (proc) { int mli; ErtsMessage *msg_list[] = { proc->msg.first, #ifdef ERTS_SMP proc->msg_inq.first, #endif proc->msg_frag}; /* Insert Heap */ insert_offheap(&(proc->off_heap), HEAP_REF, proc->common.id); /* Insert heap fragments buffers */ for(hfp = proc->mbuf; hfp; hfp = hfp->next) insert_offheap(&(hfp->off_heap), HEAP_REF, proc->common.id); /* Insert msg buffers */ for (mli = 0; mli < sizeof(msg_list)/sizeof(msg_list[0]); mli++) { ErtsMessage *msg; for (msg = msg_list[mli]; msg; msg = msg->next) { ErlHeapFragment *heap_frag = NULL; if (msg->data.attached) { if (msg->data.attached == ERTS_MSG_COMBINED_HFRAG) heap_frag = &msg->hfrag; else if (is_value(ERL_MESSAGE_TERM(msg))) heap_frag = msg->data.heap_frag; else { if (msg->data.dist_ext->dep) insert_dist_entry(msg->data.dist_ext->dep, HEAP_REF, proc->common.id, 0); if (is_not_nil(ERL_MESSAGE_TOKEN(msg))) heap_frag = erts_dist_ext_trailer(msg->data.dist_ext); } } while (heap_frag) { insert_offheap(&(heap_frag->off_heap), HEAP_REF, proc->common.id); heap_frag = heap_frag->next; } } } /* Insert links */ if (ERTS_P_LINKS(proc)) insert_links(ERTS_P_LINKS(proc), proc->common.id); if (ERTS_P_MONITORS(proc)) insert_monitors(ERTS_P_MONITORS(proc), proc->common.id); /* Insert controller */ { DistEntry *dep = ERTS_PROC_GET_DIST_ENTRY(proc); if (dep) insert_dist_entry(dep, CTRL_REF, proc->common.id, 0); } } } #ifdef ERTS_SMP erts_foreach_sys_msg_in_q(insert_sys_msg); #endif /* Insert all ports */ max = erts_ptab_max(&erts_port); for (i = 0; i < max; i++) { ErlOffHeap *ohp; erts_aint32_t state; Port *prt; prt = erts_pix2port(i); if (!prt) continue; state = erts_atomic32_read_nob(&prt->state); if (state & ERTS_PORT_SFLGS_DEAD) continue; /* Insert links */ if (ERTS_P_LINKS(prt)) insert_links(ERTS_P_LINKS(prt), prt->common.id); /* Insert monitors */ if (ERTS_P_MONITORS(prt)) insert_monitors(ERTS_P_MONITORS(prt), prt->common.id); /* Insert port data */ ohp = erts_port_data_offheap(prt); if (ohp) insert_offheap(ohp, HEAP_REF, prt->common.id); /* Insert controller */ if (prt->dist_entry) insert_dist_entry(prt->dist_entry, CTRL_REF, prt->common.id, 0); } { /* Add binaries stored elsewhere ... */ ErlOffHeap oh; ProcBin pb[2]; int i = 0; Binary *default_match_spec; Binary *default_meta_match_spec; oh.first = NULL; /* Only the ProcBin members thing_word, val and next will be inspected (by insert_offheap()) */ #undef ADD_BINARY #define ADD_BINARY(Bin) \ if ((Bin)) { \ pb[i].thing_word = REFC_BINARY_SUBTAG; \ pb[i].val = (Bin); \ pb[i].next = oh.first; \ oh.first = (struct erl_off_heap_header*) &pb[i]; \ i++; \ } erts_get_default_trace_pattern(NULL, &default_match_spec, &default_meta_match_spec, NULL, NULL); ADD_BINARY(default_match_spec); ADD_BINARY(default_meta_match_spec); insert_offheap(&oh, BIN_REF, AM_match_spec); #undef ADD_BINARY } /* Insert all dist links */ for(dep = erts_visible_dist_entries; dep; dep = dep->next) { if(dep->nlinks) insert_links2(dep->nlinks, dep->sysname); if(dep->node_links) insert_links(dep->node_links, dep->sysname); if(dep->monitors) insert_monitors(dep->monitors, dep->sysname); } for(dep = erts_hidden_dist_entries; dep; dep = dep->next) { if(dep->nlinks) insert_links2(dep->nlinks, dep->sysname); if(dep->node_links) insert_links(dep->node_links, dep->sysname); if(dep->monitors) insert_monitors(dep->monitors, dep->sysname); } /* Not connected dist entries should not have any links, but inspect them anyway */ for(dep = erts_not_connected_dist_entries; dep; dep = dep->next) { if(dep->nlinks) insert_links2(dep->nlinks, dep->sysname); if(dep->node_links) insert_links(dep->node_links, dep->sysname); if(dep->monitors) insert_monitors(dep->monitors, dep->sysname); } /* Insert all ets tables */ erts_db_foreach_table(insert_ets_table, NULL); /* Insert all bif timers */ erts_debug_bif_timer_foreach(insert_bif_timer, NULL); /* Insert node table (references to dist) */ hash_foreach(&erts_node_table, insert_erl_node, NULL); }
Process * erts_pid2proc_opt(Process *c_p, ErtsProcLocks c_p_have_locks, Eterm pid, ErtsProcLocks pid_need_locks, int flags) { Process *dec_refc_proc = NULL; int need_ptl; ErtsProcLocks need_locks; Uint pix; Process *proc; #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) ErtsProcLocks lcnt_locks; #endif #ifdef ERTS_ENABLE_LOCK_CHECK if (c_p) { ErtsProcLocks might_unlock = c_p_have_locks & pid_need_locks; if (might_unlock) erts_proc_lc_might_unlock(c_p, might_unlock); } #endif if (is_not_internal_pid(pid)) return NULL; pix = internal_pid_index(pid); ERTS_LC_ASSERT((pid_need_locks & ERTS_PROC_LOCKS_ALL) == pid_need_locks); need_locks = pid_need_locks; if (c_p && c_p->id == pid) { ASSERT(c_p->id != ERTS_INVALID_PID); ASSERT(c_p == erts_pix2proc(pix)); if (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X) && ERTS_PROC_IS_EXITING(c_p)) return NULL; need_locks &= ~c_p_have_locks; if (!need_locks) { if (flags & ERTS_P2P_FLG_SMP_INC_REFC) erts_smp_proc_inc_refc(c_p); return c_p; } } need_ptl = !erts_get_scheduler_id(); if (need_ptl) erts_smp_rwmtx_rwlock(&erts_proc_tab_rwmtx); proc = (Process *) erts_smp_atomic_read_ddrb(&erts_proc.tab[pix]); if (proc) { if (proc->id != pid) proc = NULL; else if (!need_locks) { if (flags & ERTS_P2P_FLG_SMP_INC_REFC) erts_smp_proc_inc_refc(proc); } else { int busy; #if ERTS_PROC_LOCK_OWN_IMPL #ifdef ERTS_ENABLE_LOCK_COUNT lcnt_locks = need_locks; if (!(flags & ERTS_P2P_FLG_TRY_LOCK)) { erts_lcnt_proc_lock(&proc->lock, need_locks); } #endif #ifdef ERTS_ENABLE_LOCK_CHECK /* Make sure erts_pid2proc_safelock() is enough to handle a potential lock order violation situation... */ busy = erts_proc_lc_trylock_force_busy(proc, need_locks); if (!busy) #endif #endif /* ERTS_PROC_LOCK_OWN_IMPL */ { /* Try a quick trylock to grab all the locks we need. */ busy = (int) erts_smp_proc_raw_trylock__(proc, need_locks); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_CHECK) erts_proc_lc_trylock(proc, need_locks, !busy); #endif #ifdef ERTS_PROC_LOCK_DEBUG if (!busy) erts_proc_lock_op_debug(proc, need_locks, 1); #endif } #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) if (flags & ERTS_P2P_FLG_TRY_LOCK) erts_lcnt_proc_trylock(&proc->lock, need_locks, busy ? EBUSY : 0); #endif if (!busy) { if (flags & ERTS_P2P_FLG_SMP_INC_REFC) erts_smp_proc_inc_refc(proc); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) /* all is great */ if (!(flags & ERTS_P2P_FLG_TRY_LOCK)) erts_lcnt_proc_lock_post_x(&proc->lock, lcnt_locks, __FILE__, __LINE__); #endif } else { if (flags & ERTS_P2P_FLG_TRY_LOCK) proc = ERTS_PROC_LOCK_BUSY; else { if (flags & ERTS_P2P_FLG_SMP_INC_REFC) erts_smp_proc_inc_refc(proc); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) erts_lcnt_proc_lock_unaquire(&proc->lock, lcnt_locks); #endif if (need_ptl) { erts_smp_proc_inc_refc(proc); dec_refc_proc = proc; erts_smp_rwmtx_rwunlock(&erts_proc_tab_rwmtx); need_ptl = 0; } proc_safelock(!need_ptl, c_p, c_p_have_locks, c_p_have_locks, proc, 0, need_locks); } } } } if (need_ptl) erts_smp_rwmtx_rwunlock(&erts_proc_tab_rwmtx); if (need_locks && proc && proc != ERTS_PROC_LOCK_BUSY && (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X) ? ERTS_PROC_IS_EXITING(proc) : (proc != (Process *) erts_smp_atomic_read_nob(&erts_proc.tab[pix])))) { erts_smp_proc_unlock(proc, need_locks); if (flags & ERTS_P2P_FLG_SMP_INC_REFC) dec_refc_proc = proc; proc = NULL; } if (dec_refc_proc) erts_smp_proc_dec_refc(dec_refc_proc); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_PROC_LOCK_DEBUG) ERTS_LC_ASSERT(!proc || proc == ERTS_PROC_LOCK_BUSY || (pid_need_locks == (ERTS_PROC_LOCK_FLGS_READ_(&proc->lock) & pid_need_locks))); #endif return proc; }