void erts_lc_fail(char *fmt, ...) { va_list args; erts_fprintf(stderr, "Lock check failed: "); va_start(args, fmt); erts_vfprintf(stderr, fmt, args); va_end(args); erts_fprintf(stderr, "\n"); print_curr_locks(get_my_locked_locks()); abort(); }
static void lock_mismatch(erts_lc_locked_locks_t *l_lcks, int exact, int failed_have, erts_lc_lock_t *have, int have_len, int failed_have_not, erts_lc_lock_t *have_not, int have_not_len) { int i; erts_fprintf(stderr, "Lock mismatch found!\n"); if (failed_have >= 0) { ASSERT(have && have_len > failed_have); print_lock2("At least the ", have[failed_have].id, have[failed_have].extra, 0, " lock is not locked when it should have been\n"); } else if (failed_have_not >= 0) { ASSERT(have_not && have_not_len > failed_have_not); print_lock2("At least the ", have_not[failed_have_not].id, have_not[failed_have_not].extra, 0, " lock is locked when it should not have been\n"); } if (exact) { if (!have || have_len <= 0) erts_fprintf(stderr, "Thread should not have any locks locked at all\n"); else { erts_fprintf(stderr, "Thread should have these and only these locks " "locked:\n"); for (i = 0; i < have_len; i++) print_lock2(" ", have[i].id, have[i].extra, 0, "\n"); } } else { if (have && have_len > 0) { erts_fprintf(stderr, "Thread should at least have these locks locked:\n"); for (i = 0; i < have_len; i++) print_lock2(" ", have[i].id, have[i].extra, 0, "\n"); } if (have_not && have_not_len > 0) { erts_fprintf(stderr, "Thread should at least not have these locks " "locked:\n"); for (i = 0; i < have_not_len; i++) print_lock2(" ", have_not[i].id, have_not[i].extra, 0, "\n"); } } print_curr_locks(l_lcks); lc_abort(); }
static void print_lock_order(void) { int i; erts_fprintf(stderr, "Lock order:\n"); for (i = 1; i < ERTS_LOCK_ORDER_SIZE; i++) { if (erts_lock_order[i].internal_order) erts_fprintf(stderr, " %s:%s\n", erts_lock_order[i].name, erts_lock_order[i].internal_order); else erts_fprintf(stderr, " %s\n", erts_lock_order[i].name); } }
void erts_thr_progress_dbg_print_state(void) { int id; int sz = intrnl->managed.no; erts_fprintf(stderr, "--- thread progress ---\n"); erts_fprintf(stderr,"current=%b64u\n", erts_thr_progress_current()); for (id = 0; id < sz; id++) { ErtsThrPrgrVal current = read_nob(&intrnl->thr[id].data.current); #ifdef ERTS_THR_PROGRESS_STATE_DEBUG erts_aint32_t state_debug; char *active, *leader; state_debug = erts_atomic32_read_nob(&intrnl->thr[id].data.state_debug); active = (state_debug & ERTS_THR_PROGRESS_STATE_DEBUG_ACTIVE ? "true" : "false"); leader = (state_debug & ERTS_THR_PROGRESS_STATE_DEBUG_LEADER ? "true" : "false"); #endif if (current == ERTS_THR_PRGR_VAL_WAITING) erts_fprintf(stderr, " id=%d, current=WAITING" #ifdef ERTS_THR_PROGRESS_STATE_DEBUG ", active=%s, leader=%s" #endif "\n", id #ifdef ERTS_THR_PROGRESS_STATE_DEBUG , active, leader #endif ); else erts_fprintf(stderr, " id=%d, current=%b64u" #ifdef ERTS_THR_PROGRESS_STATE_DEBUG ", active=%s, leader=%s" #endif "\n", id, current #ifdef ERTS_THR_PROGRESS_STATE_DEBUG , active, leader #endif ); } erts_fprintf(stderr, "-----------------------\n"); }
static ERTS_INLINE void async_add(ErtsAsync *a, ErtsAsyncQ* q) { #ifdef USE_VM_PROBES int len; #endif if (is_internal_port(a->port)) { #if ERTS_USE_ASYNC_READY_Q ErtsAsyncReadyQ *arq = async_ready_q(a->sched_id); a->q.prep_enq = erts_thr_q_prepare_enqueue(&arq->thr_q); #endif /* make sure the driver will stay around */ if (a->hndl) erts_ddll_reference_referenced_driver(a->hndl); } #if ERTS_ASYNC_PRINT_JOB erts_fprintf(stderr, "-> %ld\n", a->async_id); #endif erts_thr_q_enqueue(&q->thr_q, a); #ifdef USE_VM_PROBES if (DTRACE_ENABLED(aio_pool_add)) { DTRACE_CHARBUF(port_str, 16); erts_snprintf(port_str, sizeof(port_str), "%T", a->port); /* DTRACE TODO: Get the queue length from erts_thr_q_enqueue() ? */ len = -1; DTRACE2(aio_pool_add, port_str, len); } gcc_optimizer_hack++; #endif }
static ERTS_INLINE void async_reply(ErtsAsync *a, ErtsThrQPrepEnQ_t *prep_enq) { #if ERTS_USE_ASYNC_READY_Q ErtsAsyncReadyQ *arq; #if ERTS_ASYNC_PRINT_JOB erts_fprintf(stderr, "=>> %ld\n", a->async_id); #endif arq = async_ready_q(a->sched_id); #if ERTS_USE_ASYNC_READY_ENQ_MTX erts_mtx_lock(&arq->x.data.enq_mtx); #endif erts_thr_q_enqueue_prepared(&arq->thr_q, (void *) a, prep_enq); #if ERTS_USE_ASYNC_READY_ENQ_MTX erts_mtx_unlock(&arq->x.data.enq_mtx); #endif #else /* ERTS_USE_ASYNC_READY_Q */ call_async_ready(a); erts_free(ERTS_ALC_T_ASYNC, (void *) a); #endif /* ERTS_USE_ASYNC_READY_Q */ }
static void print_curr_locks(erts_lc_locked_locks_t *l_lcks) { erts_lc_locked_lock_t *l_lck; if (!l_lcks || !l_lcks->locked.first) erts_fprintf(stderr, "Currently no locks are locked by the %s thread.\n", l_lcks->thread_name); else { erts_fprintf(stderr, "Currently these locks are locked by the %s thread:\n", l_lcks->thread_name); for (l_lck = l_lcks->locked.first; l_lck; l_lck = l_lck->next) print_lock2(" ", l_lck->id, l_lck->extra, l_lck->flags, "\n"); } }
static void *lc_core_alloc(void) { int i; erts_lc_free_block_t *fbs; lc_unlock(); fbs = (erts_lc_free_block_t *) malloc(sizeof(erts_lc_free_block_t) * ERTS_LC_FB_CHUNK_SIZE); if (!fbs) { erts_fprintf(stderr, "Lock checker failed to allocate memory!\n"); lc_abort(); } for (i = 1; i < ERTS_LC_FB_CHUNK_SIZE - 1; i++) { #ifdef DEBUG memset((void *) &fbs[i], 0xdf, sizeof(erts_lc_free_block_t)); #endif fbs[i].next = &fbs[i+1]; } #ifdef DEBUG memset((void *) &fbs[ERTS_LC_FB_CHUNK_SIZE-1], 0xdf, sizeof(erts_lc_free_block_t)); #endif lc_lock(); fbs[ERTS_LC_FB_CHUNK_SIZE-1].next = free_blocks; free_blocks = &fbs[1]; return (void *) &fbs[0]; }
static void uninitialized_lock(void) { erts_fprintf(stderr, "Performing operations on uninitialized lock!\n"); print_curr_locks(get_my_locked_locks()); lc_abort(); }
/* Empty loop body */ } for (i = specified; i < 3; i++) { if (tp[i+1] != am_Underscore) { goto error; } } mfa.module = tp[1]; mfa.function = tp[2]; if (is_small(tp[3])) { mfa.arity = signed_val(tp[3]); } if (!erts_try_seize_code_write_permission(BIF_P)) { ERTS_BIF_YIELD2(bif_export[BIF_erts_debug_breakpoint_2], BIF_P, BIF_ARG_1, BIF_ARG_2); } erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN); erts_thr_progress_block(); erts_bp_match_functions(&f, &mfa, specified); if (boolean == am_true) { erts_set_debug_break(&f); erts_install_breakpoints(&f); erts_commit_staged_bp(); } else { erts_clear_debug_break(&f); erts_commit_staged_bp(); erts_uninstall_breakpoints(&f); } erts_consolidate_bp_data(&f, 1); res = make_small(f.matched); erts_bp_free_matched_functions(&f); erts_thr_progress_unblock(); erts_proc_lock(p, ERTS_PROC_LOCK_MAIN); erts_release_code_write_permission(); return res; error: BIF_ERROR(p, BADARG); } #if 0 /* Kept for conveninence when hard debugging. */ void debug_dump_code(BeamInstr *I, int num) { BeamInstr *code_ptr = I; BeamInstr *end = code_ptr + num; erts_dsprintf_buf_t *dsbufp; BeamInstr instr; int i; dsbufp = erts_create_tmp_dsbuf(0); while (code_ptr < end) { erts_print(ERTS_PRINT_DSBUF, (void *) dsbufp, HEXF ": ", code_ptr); instr = (BeamInstr) code_ptr[0]; for (i = 0; i < NUM_SPECIFIC_OPS; i++) { if (BeamIsOpCode(instr, i) && opc[i].name[0] != '\0') { code_ptr += print_op(ERTS_PRINT_DSBUF, (void *) dsbufp, i, opc[i].sz-1, code_ptr) + 1; break; } } if (i >= NUM_SPECIFIC_OPS) { erts_print(ERTS_PRINT_DSBUF, (void *) dsbufp, "unknown " HEXF "\n", instr); code_ptr++; } } dsbufp->str[dsbufp->str_len] = 0; erts_fprintf(stderr,"%s", dsbufp->str); erts_destroy_tmp_dsbuf(dsbufp); }
int erts_check_async_ready(void *varq) { ErtsAsyncReadyQ *arq = (ErtsAsyncReadyQ *) varq; int res = 1; int i; for (i = 0; i < ERTS_MAX_ASYNC_READY_CALLS_IN_SEQ; i++) { ErtsAsync *a = (ErtsAsync *) erts_thr_q_dequeue(&arq->thr_q); if (!a) { res = 0; break; } #if ERTS_ASYNC_PRINT_JOB erts_fprintf(stderr, "<<= %ld\n", a->async_id); #endif erts_thr_q_append_finalize_dequeue_data(&arq->fin_deq, &a->q.fin_deq); call_async_ready(a); erts_free(ERTS_ALC_T_ASYNC, (void *) a); } erts_thr_q_finalize_dequeue(&arq->fin_deq); return res; }
static void *async_main(void* arg) { ErtsAsyncQ *aq = (ErtsAsyncQ *) arg; erts_tse_t *tse = async_thread_init(aq); ERTS_MSACC_DECLARE_CACHE(); while (1) { ErtsThrQPrepEnQ_t *prep_enq; ErtsAsync *a = async_get(&aq->thr_q, tse, &prep_enq); if (is_nil(a->port)) break; /* Time to die */ ERTS_MSACC_UPDATE_CACHE(); #if ERTS_ASYNC_PRINT_JOB erts_fprintf(stderr, "<- %ld\n", a->async_id); #endif ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_PORT); a->async_invoke(a->async_data); ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_OTHER); async_reply(a, prep_enq); } return NULL; }
Eterm erts_pd_hash_get(Process *p, Eterm id) { unsigned int hval; Eterm tmp; ProcDict *pd = p->dictionary; if (pd == NULL) return am_undefined; hval = pd_hash_value(pd, id); tmp = ARRAY_GET(pd, hval); if (is_boxed(tmp)) { /* Tuple */ ASSERT(is_tuple(tmp)); if (EQ(tuple_val(tmp)[1], id)) { return tuple_val(tmp)[2]; } } else if (is_list(tmp)) { for (; tmp != NIL && !EQ(tuple_val(TCAR(tmp))[1], id); tmp = TCDR(tmp)) { ; } if (tmp != NIL) { return tuple_val(TCAR(tmp))[2]; } } else if (is_not_nil(tmp)) { #ifdef DEBUG erts_fprintf(stderr, "Process dictionary for process %T is broken, trying to " "display term found in line %d:\n" "%T\n", p->common.id, __LINE__, tmp); #endif erl_exit(1, "Damaged process dictionary found during get/1."); } return am_undefined; }
static Eterm array_put(ProcDict **ppdict, unsigned int ndx, Eterm term) { unsigned int i; Eterm ret; if (*ppdict == NULL) { Uint siz = next_array_size(ndx+1); ProcDict *p; p = PD_ALLOC(PD_SZ2BYTES(siz)); for (i = 0; i < siz; ++i) p->data[i] = NIL; p->size = siz; p->homeSize = p->splitPosition = p->numElements = p->used = 0; *ppdict = p; } else if (ndx >= (*ppdict)->size) { Uint osize = (*ppdict)->size; Uint nsize = next_array_size(ndx+1); *ppdict = PD_REALLOC(((void *) *ppdict), PD_SZ2BYTES(osize), PD_SZ2BYTES(nsize)); for (i = osize; i < nsize; ++i) (*ppdict)->data[i] = NIL; (*ppdict)->size = nsize; } ret = (*ppdict)->data[ndx]; (*ppdict)->data[ndx] = term; if ((ndx + 1) > (*ppdict)->used) (*ppdict)->used = ndx + 1; #ifdef HARDDEBUG HDEBUGF(("array_put: (*ppdict)->size = %d, (*ppdict)->used = %d, ndx = %d", (*ppdict)->size, (*ppdict)->used, ndx)); erts_fprintf(stderr, "%T", term); #endif /* HARDDEBUG */ return ret; }
static void print_curr_locks(lc_thread_t *thr) { lc_locked_lock_t *ll; if (!thr || !thr->locked.first) erts_fprintf(stderr, "Currently no locks are locked by the %s thread.\n", thr->thread_name); else { erts_fprintf(stderr, "Currently these locks are locked by the %s thread:\n", thr->thread_name); for (ll = thr->locked.first; ll; ll = ll->next) raw_print_lock(" ", ll->id, ll->extra, ll->flags, ll->file, ll->line, "\n"); } }
static void lock_twice(char *prefix, lc_thread_t *thr, erts_lc_lock_t *lck, erts_lock_options_t options) { erts_fprintf(stderr, "%s (%s)", prefix, rw_op_str(options)); print_lock(" ", lck, " lock which is already locked by thread!\n"); print_curr_locks(thr); lc_abort(); }
static void set_main_stack_size(void) { if (erts_sched_thread_suggested_stack_size > 0) { # if HAVE_DECL_GETRLIMIT && HAVE_DECL_SETRLIMIT && HAVE_DECL_RLIMIT_STACK struct rlimit rl; int bytes = erts_sched_thread_suggested_stack_size * sizeof(Uint) * 1024; if (getrlimit(RLIMIT_STACK, &rl) != 0 || (rl.rlim_cur = bytes, setrlimit(RLIMIT_STACK, &rl) != 0)) { erts_fprintf(stderr, "failed to set stack size for scheduler " "thread to %d bytes\n", bytes); erts_usage(); } # else erts_fprintf(stderr, "no OS support for dynamic stack size limit\n"); erts_usage(); # endif } }
int erts_lc_assert_failed(char *file, int line, char *assertion) { erts_fprintf(stderr, "%s:%d: Lock check assertion \"%s\" failed!\n", file, line, assertion); print_curr_locks(get_my_locked_locks()); lc_abort(); return 0; }
static void unlock_op_mismatch(lc_thread_t *thr, erts_lc_lock_t *lck, erts_lock_options_t options) { erts_fprintf(stderr, "Unlocking (%s) ", rw_op_str(options)); print_lock("", lck, " lock which mismatch previous lock operation!\n"); print_curr_locks(thr); lc_abort(); }
static void lock_twice(char *prefix, erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck, Uint16 op_flags) { erts_fprintf(stderr, "%s%s", prefix, rw_op_str(op_flags)); print_lock(" ", lck, " lock which is already locked by thread!\n"); print_curr_locks(l_lcks); lc_abort(); }
static void unlock_op_mismatch(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck, Uint16 op_flags) { erts_fprintf(stderr, "Unlocking%s ", rw_op_str(op_flags)); print_lock("", lck, " lock which mismatch previous lock operation!\n"); print_curr_locks(l_lcks); lc_abort(); }
Eterm erts_lc_dump_graph(void) { const char* basename = "lc_graph."; char filename[40]; lc_matrix_t* tot = &tot_lc_matrix; lc_thread_t* thr; int i, j, name_max = 0; FILE* ff; lc_lock_threads(); for (thr = lc_threads; thr; thr = thr->next) { collect_matrix(&thr->matrix); } lc_unlock_threads(); sys_strcpy(filename, basename); sys_get_pid(filename + strlen(basename), sizeof(filename) - strlen(basename)); ff = fopen(filename, "w"); if (!ff) return am_error; for (i = 1; i < ERTS_LOCK_ORDER_SIZE; i++) { int len = strlen(erts_lock_order[i].name); if (name_max < len) name_max = len; } fputs("%This file was generated by erts_debug:lc_graph()\n\n", ff); fputs("%{ThisLockName, ThisLockId, LockedDirectlyBeforeThis, LockedIndirectlyBeforeThis}\n", ff); fprintf(ff, "[{%*s, %2d}", name_max, "\"NO LOCK\"", 0); for (i = 1; i < ERTS_LOCK_ORDER_SIZE; i++) { char* delim = ""; fprintf(ff, ",\n {%*s, %2d, [", name_max, erts_lock_order[i].name, i); for (j = 0; j < ERTS_LOCK_ORDER_SIZE; j++) { if (tot->m[i][j] & 1) { fprintf(ff, "%s%d", delim, j); delim = ","; } } fprintf(ff, "], ["); delim = ""; for (j = 0; j < ERTS_LOCK_ORDER_SIZE; j++) { if (tot->m[i][j] == 2) { fprintf(ff, "%s%d", delim, j); delim = ","; } } fputs("]}", ff); } fputs("].", ff); fclose(ff); erts_fprintf(stderr, "Created file '%s' in current working directory\n", filename); return am_ok; }
Sint16 erts_lc_get_lock_order_id(char *name) { int i; if (!name || name[0] == '\0') erts_fprintf(stderr, "Missing lock name\n"); else { for (i = 0; i < ERTS_LOCK_ORDER_SIZE; i++) if (strcmp(erts_lock_order[i].name, name) == 0) return i; erts_fprintf(stderr, "Lock name '%s' missing in lock order " "(update erl_lock_check.c)\n", name); } lc_abort(); return (Sint16) -1; }
static void type_order_violation(char *op, erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck) { erts_fprintf(stderr, "Lock type order violation occured when "); print_lock(op, lck, "!\n"); ASSERT(l_lcks); print_curr_locks(l_lcks); lc_abort(); }
static void my_debug_printf(char *fmt, ...) { char buffer[1024]; va_list args; va_start(args, fmt); erts_vsnprintf(buffer,1024,fmt,args); va_end(args); erts_fprintf(stderr,"%s\r\n",buffer); }
void dbg_where(BeamInstr* addr, Eterm x0, Eterm* reg) { BeamInstr* f = find_function_from_pc(addr); if (f == NULL) { erts_fprintf(stderr, "???\n"); } else { int arity; int i; addr = f; arity = addr[2]; erts_fprintf(stderr, HEXF ": %T:%T(", addr, (Eterm) addr[0], (Eterm) addr[1]); for (i = 0; i < arity; i++) erts_fprintf(stderr, i ? ", %T" : "%T", i ? reg[i] : x0); erts_fprintf(stderr, ")\n"); } }
void dbg_where(BeamInstr* addr, Eterm x0, Eterm* reg) { ErtsCodeMFA* cmfa = find_function_from_pc(addr); if (cmfa == NULL) { erts_fprintf(stderr, "???\n"); } else { int arity; int i; arity = cmfa->arity; erts_fprintf(stderr, HEXF ": %T:%T(", addr, cmfa->module, cmfa->function); for (i = 0; i < arity; i++) erts_fprintf(stderr, i ? ", %T" : "%T", i ? reg[i] : x0); erts_fprintf(stderr, ")\n"); } }
__decl_noreturn void erts_thr_fatal_error(int err, char *what) { char *errstr = err ? strerror(err) : NULL; erts_fprintf(stderr, "Failed to %s: %s%s(%d)\n", what, errstr ? errstr : "", errstr ? " " : "", err); abort(); }
static void disable_trace(int error, char *reason, int eno) { char *mt_dis = "Memory trace disabled"; char *eno_str; erts_mtrace_enabled = 0; erts_sock_close(socket_desc); socket_desc = ERTS_SOCK_INVALID_SOCKET; if (eno == 0) erts_fprintf(stderr, "%s: %s\n", mt_dis, reason); else { eno_str = erl_errno_id(eno); if (strcmp(eno_str, "unknown") == 0) erts_fprintf(stderr, "%s: %s: %d\n", mt_dis, reason, eno); else erts_fprintf(stderr, "%s: %s: %s\n", mt_dis, reason, eno_str); } }
void erts_thr_progress_unblock(void) { erts_tse_t *event; int id, break_id, sz, wakeup; ErtsThrPrgrData *tpd = thr_prgr_data(NULL); ASSERT(tpd->is_blocking); if (--tpd->is_blocking) return; sz = intrnl->managed.no; wakeup = 1; if (!tpd->is_managed) id = break_id = tpd->id < 0 ? 0 : tpd->id % sz; else { break_id = tpd->id; id = break_id + 1; if (id >= sz) id = 0; if (id == break_id) wakeup = 0; erts_atomic32_inc_nob(&intrnl->misc.data.block_count); } event = ((erts_tse_t *) erts_atomic_read_nob(&intrnl->misc.data.blocker_event)); ASSERT(event); erts_atomic_set_nob(&intrnl->misc.data.blocker_event, ERTS_AINT_NULL); erts_atomic32_read_bor_relb(&intrnl->misc.data.block_count, ERTS_THR_PRGR_BC_FLG_NOT_BLOCKING); #if ERTS_THR_PRGR_PRINT_BLOCKERS erts_fprintf(stderr, "unblock(%d)\n", tpd->id); #endif erts_atomic32_read_band_mb(&intrnl->misc.data.lflgs, ~ERTS_THR_PRGR_LFLG_BLOCK); if (wakeup) { do { ErtsThrPrgrVal tmp; tmp = read_nob(&intrnl->thr[id].data.current); if (tmp != ERTS_THR_PRGR_VAL_WAITING) wakeup_managed(id); if (++id >= sz) id = 0; } while (id != break_id); } return_tmp_thr_prgr_data(tpd); erts_tse_return(event); }