int debase_start_attach() { if (rb_during_gc()) return 1; rb_add_event_hook(__catch_line_event, RUBY_EVENT_LINE, (VALUE) NULL); return 0; }
static void stackprof_signal_handler(int sig, siginfo_t *sinfo, void *ucontext) { _stackprof.overall_signals++; if (rb_during_gc()) _stackprof.during_gc++, _stackprof.overall_samples++; else rb_postponed_job_register_one(0, stackprof_job_handler, 0); }
int rb_stack_trace(void** result, int max_depth) { rb_thread_t *th = GET_THREAD(); rb_control_frame_t *cfp = th->cfp; rb_control_frame_t *end_cfp = RUBY_VM_END_CONTROL_FRAME(th); VALUE klass, self; ID method; int depth = 0; if (max_depth == 0) return 0; if (rb_during_gc()) { result[0] = rb_gc; return 1; } while (RUBY_VM_VALID_CONTROL_FRAME_P(cfp, end_cfp) && depth+3 <= max_depth) { rb_iseq_t *iseq = cfp->iseq; if (iseq && iseq->type == ISEQ_TYPE_METHOD) { self = 0; // maybe use cfp->self here, but iseq->self is a ISeq ruby obj klass = iseq->klass; method = iseq->defined_method_id; SAVE_FRAME(); } if (depth+3 > max_depth) break; switch (VM_FRAME_TYPE(cfp)) { case VM_FRAME_MAGIC_METHOD: case VM_FRAME_MAGIC_CFUNC: self = cfp->self; #ifdef HAVE_METHOD_H if (!cfp->me) break; klass = cfp->me->klass; method = cfp->me->called_id; #else klass = cfp->method_class; method = cfp->method_id; #endif SAVE_FRAME(); break; } cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); } assert(depth <= max_depth); return depth; }
static void __catch_line_event(rb_event_flag_t evflag, VALUE data, VALUE self, ID mid, VALUE klass) { (void)sizeof(evflag); (void)sizeof(self); (void)sizeof(mid); (void)sizeof(klass); rb_remove_event_hook(__catch_line_event); if (rb_during_gc()) return; __func_to_set_breakpoint_at(); }
void * sleep_thread_main(void *_arg) { struct wait_args *arg = _arg; sleep(arg->timeout_sec); fprintf(stderr, "Process exits(ExtremeTimeout::timeout)\n"); fflush(stderr); pthread_mutex_lock(&exitcode_mutex); exitcode = arg->exitcode; if (!rb_during_gc()) { set_stacktrace_dumper(); if (pthread_kill(arg->running_thread, SIGCONT) == 0) { pthread_join(arg->running_thread, NULL); } } else { exit(exitcode); } return NULL; }
int rb_stack_trace(void** result, int max_depth) { struct FRAME *frame = ruby_frame; NODE *n; VALUE klass, self; ID method; int depth = 0; if (max_depth == 0) return 0; #ifdef HAVE_RB_DURING_GC if (rb_during_gc()) { result[0] = rb_gc; return 1; } #endif // should not be possible to get here and already have a saved signal handler assert(!saved_handler); // ruby_frame is occasionally inconsistent, so temporarily catch segfaults saved_handler = signal(SIGSEGV, segv_handler); if (_setjmp(saved_location)) { signal(SIGSEGV, saved_handler); saved_handler = NULL; return 0; } /* // XXX does it make sense to track allocations or not? if (frame->last_func == ID_ALLOCATOR) { frame = frame->prev; } // XXX SIGPROF can come in while ruby_frame is in an inconsistent state (rb_call0), so we ignore the top-most frame if (frame->last_func && frame->last_class) { self = frame->self; klass = frame->last_class; method = frame->last_func; SAVE_FRAME(); } */ for (; frame && (n = frame->node); frame = frame->prev) { if (frame->prev && frame->prev->last_func) { if (frame->prev->node == n) { if (frame->prev->last_func == frame->last_func) continue; } if (depth+3 > max_depth) break; self = frame->prev->self; klass = frame->prev->last_class; method = frame->prev->last_func; SAVE_FRAME(); } } signal(SIGSEGV, saved_handler); saved_handler = NULL; assert(depth <= max_depth); return depth; }