static VALUE start_stat_tracing() { int i; if (logger->enabled == Qtrue) return Qnil; rb_tracepoint_enable(logger->newobj_trace); rb_tracepoint_enable(logger->freeobj_trace); for (i=0; i<3; i++) { rb_tracepoint_enable(logger->hooks[i]); } logger->enabled = Qtrue; return Qnil; }
static VALUE tracepoint_trace_s(int argc, VALUE *argv, VALUE self) { VALUE trace = tracepoint_new_s(argc, argv, self); rb_tracepoint_enable(trace); return trace; }
static VALUE install() { rb_event_flag_t events = RUBY_INTERNAL_EVENT_GC_START | RUBY_INTERNAL_EVENT_GC_END_MARK | RUBY_INTERNAL_EVENT_GC_END_SWEEP; if (_oobgc.installed) return Qfalse; if (!_oobgc.tpval) { _oobgc.tpval = rb_tracepoint_new(0, events, gc_event_i, (void *)0); rb_ivar_set(mOOB, rb_intern("tpval"), _oobgc.tpval); } rb_tracepoint_enable(_oobgc.tpval); /* rb_gc_stat() requires memory allocation for symbol creation only at * first time. If rb_gc_stat() was called during GC at first time by * tracepoint, memory allocation caused crash. We call rb_gc_stat() here * for symbol creation. */ rb_gc_stat(sym_total_allocated_object); _oobgc.installed = 1; return Qtrue; }
static VALUE set_gc_hook(rb_event_flag_t event) { VALUE tpval; // TODO - need to prevent applying the same tracepoint multiple times? tpval = rb_tracepoint_new(0, event, tracepoint_handler, 0); rb_tracepoint_enable(tpval); return tpval; }
static VALUE stackprof_start(int argc, VALUE *argv, VALUE self) { struct sigaction sa; struct itimerval timer; VALUE opts = Qnil, mode = Qnil, interval = Qnil; if (_stackprof.running) return Qfalse; rb_scan_args(argc, argv, "0:", &opts); if (RTEST(opts)) { mode = rb_hash_aref(opts, sym_mode); interval = rb_hash_aref(opts, sym_interval); } if (!RTEST(mode)) mode = sym_wall; if (!_stackprof.frames) { _stackprof.frames = st_init_numtable(); _stackprof.overall_signals = 0; _stackprof.overall_samples = 0; _stackprof.during_gc = 0; } if (mode == sym_object) { if (!RTEST(interval)) interval = INT2FIX(1); objtracer = rb_tracepoint_new(0, RUBY_INTERNAL_EVENT_NEWOBJ, stackprof_newobj_handler, 0); rb_tracepoint_enable(objtracer); } else if (mode == sym_wall || mode == sym_cpu) { if (!RTEST(interval)) interval = INT2FIX(1000); sa.sa_sigaction = stackprof_signal_handler; sa.sa_flags = SA_RESTART | SA_SIGINFO; sigemptyset(&sa.sa_mask); sigaction(mode == sym_wall ? SIGALRM : SIGPROF, &sa, NULL); timer.it_interval.tv_sec = 0; timer.it_interval.tv_usec = NUM2LONG(interval); timer.it_value = timer.it_interval; setitimer(mode == sym_wall ? ITIMER_REAL : ITIMER_PROF, &timer, 0); } else if (mode == sym_custom) { /* sampled manually */ interval = Qnil; } else { rb_raise(rb_eArgError, "unknown profiler mode"); } _stackprof.running = 1; _stackprof.mode = mode; _stackprof.interval = interval; return Qtrue; }
/** * rb_enable_traces * This is the implementation of Tracer#enable_traces methods. It creates * the Tracer#file_tracepoint and Tracer#fiber_tracepoint for the first time * called. Then it also enables them immediately upon creation. */ static VALUE rb_enable_traces(VALUE self) { VALUE file_tracepoint; VALUE fiber_tracepoint; file_tracepoint = register_tracepoint(self, FILE_TRACEPOINT_EVENTS, "@file_tracepoint", file_tracepoint_callback); UNUSED(fiber_tracepoint); // Immediately activate file tracepoint and fiber tracepoint if (RTEST(file_tracepoint) && !RTEST(rb_tracepoint_enabled_p(file_tracepoint))) { rb_tracepoint_enable(file_tracepoint); } #ifdef RUBY_EVENT_FIBER_SWITCH fiber_tracepoint = register_tracepoint(self, RUBY_EVENT_FIBER_SWITCH, "@fiber_tracepoint", fiber_tracepoint_callback); if (RTEST(fiber_tracepoint) && !RTEST(rb_tracepoint_enabled_p(fiber_tracepoint))) { rb_tracepoint_enable(fiber_tracepoint); } #endif return Qnil; }
/* * call-seq: * trace.enable -> true or false * trace.enable { block } -> obj * * Activates the trace * * Return true if trace was enabled. * Return false if trace was disabled. * * trace.enabled? #=> false * trace.enable #=> false (previous state) * # trace is enabled * trace.enabled? #=> true * trace.enable #=> true (previous state) * # trace is still enabled * * If a block is given, the trace will only be enabled within the scope of the * block. * * trace.enabled? * #=> false * * trace.enable do * trace.enabled? * # only enabled for this block * end * * trace.enabled? * #=> false * * Note: You cannot access event hooks within the block. * * trace.enable { p tp.lineno } * #=> RuntimeError: access from outside * */ static VALUE tracepoint_enable_m(VALUE tpval) { rb_tp_t *tp = tpptr(tpval); int previous_tracing = tp->tracing; rb_tracepoint_enable(tpval); if (rb_block_given_p()) { return rb_ensure(rb_yield, Qnil, previous_tracing ? rb_tracepoint_enable : rb_tracepoint_disable, tpval); } else { return previous_tracing ? Qtrue : Qfalse; } }
static void register_tracepoints(VALUE self) { int i; VALUE traces = tracepoints; UNUSED(self); if (NIL_P(traces)) { int line_msk = RUBY_EVENT_LINE; int call_msk = RUBY_EVENT_CALL; int ret_msk = RUBY_EVENT_RETURN | RUBY_EVENT_B_RETURN; int end_msk = RUBY_EVENT_END; int raw_call_msk = RUBY_EVENT_C_CALL | RUBY_EVENT_B_CALL | RUBY_EVENT_CLASS; int raw_ret_msk = RUBY_EVENT_C_RETURN; int raise_msk = RUBY_EVENT_RAISE; VALUE tpLine = rb_tracepoint_new(Qnil, line_msk, line_event, 0); VALUE tpCall = rb_tracepoint_new(Qnil, call_msk, call_event, 0); VALUE tpReturn = rb_tracepoint_new(Qnil, ret_msk, return_event, 0); VALUE tpEnd = rb_tracepoint_new(Qnil, end_msk, end_event, 0); VALUE tpCCall = rb_tracepoint_new(Qnil, raw_call_msk, raw_call_event, 0); VALUE tpCReturn = rb_tracepoint_new(Qnil, raw_ret_msk, raw_return_event, 0); VALUE tpRaise = rb_tracepoint_new(Qnil, raise_msk, raise_event, 0); traces = rb_ary_new(); rb_ary_push(traces, tpLine); rb_ary_push(traces, tpCall); rb_ary_push(traces, tpReturn); rb_ary_push(traces, tpEnd); rb_ary_push(traces, tpCCall); rb_ary_push(traces, tpCReturn); rb_ary_push(traces, tpRaise); tracepoints = traces; } for (i = 0; i < RARRAY_LENINT(traces); i++) rb_tracepoint_enable(rb_ary_entry(traces, i)); }