static VALUE stop_stat_tracing() { if (logger->hooks[0] != 0) { rb_tracepoint_disable(logger->hooks[0]); rb_tracepoint_disable(logger->hooks[1]); rb_tracepoint_disable(logger->hooks[2]); } if (logger->newobj_trace) { rb_tracepoint_disable(logger->newobj_trace); rb_tracepoint_disable(logger->freeobj_trace); } logger->enabled = Qfalse; return Qnil; }
static VALUE stackprof_stop(VALUE self) { struct sigaction sa; struct itimerval timer; if (!_stackprof.running) return Qfalse; _stackprof.running = 0; if (_stackprof.mode == sym_object) { rb_tracepoint_disable(objtracer); } else if (_stackprof.mode == sym_wall || _stackprof.mode == sym_cpu) { memset(&timer, 0, sizeof(timer)); setitimer(_stackprof.mode == sym_wall ? ITIMER_REAL : ITIMER_PROF, &timer, 0); sa.sa_handler = SIG_IGN; sa.sa_flags = SA_RESTART; sigemptyset(&sa.sa_mask); sigaction(_stackprof.mode == sym_wall ? SIGALRM : SIGPROF, &sa, NULL); } else if (_stackprof.mode == sym_custom) { /* sampled manually */ } else { rb_raise(rb_eArgError, "unknown profiler mode"); } return Qtrue; }
static void clear_tracepoints(VALUE self) { int i; UNUSED(self); for (i = RARRAY_LENINT(tracepoints) - 1; i >= 0; i--) rb_tracepoint_disable(rb_ary_entry(tracepoints, i)); }
/** * rb_disable_traces * This is implmenetation of Tracer#disable_traces methods. It disables * Tracer#file_tracepoint, Tracer#fiber_tracepoint, return event tracing, and * line event tracing for all threads. */ static VALUE rb_disable_traces(VALUE self) { VALUE file_tracepoint; VALUE fiber_tracepoint; VALUE threads; VALUE *c_threads; int c_threads_len; VALUE thread; int i; ID alive_q_id; ID list_id; CONST_ID(alive_q_id, "alive?"); CONST_ID(list_id, "list"); file_tracepoint = rb_iv_get(self, "@file_tracepoint"); threads = rb_funcall(rb_cThread, list_id, 0); c_threads_len = RARRAY_LEN(threads); c_threads = RARRAY_PTR(threads); UNUSED(fiber_tracepoint); if (RTEST(file_tracepoint) && RTEST(rb_tracepoint_enabled_p(file_tracepoint))) rb_tracepoint_disable(file_tracepoint); #ifdef RUBY_EVENT_FIBER_SWITCH fiber_tracepoint= rb_iv_get(self, "@fiber_tracepoint"); if (RTEST(fiber_tracepoint) && RTEST(rb_tracepoint_enabled_p(fiber_tracepoint))) rb_tracepoint_disable(fiber_tracepoint); #endif for (i = 0; i < c_threads_len; i++) { thread = c_threads[i]; if (RTEST(rb_funcall(thread, alive_q_id, 0))) { disable_line_trace_for_thread(thread); disable_return_trace_for_thread(thread); } } return Qnil; }
/* * call-seq: * trace.disable -> true or false * trace.disable { block } -> obj * * Deactivates the trace * * Return true if trace was enabled. * Return false if trace was disabled. * * trace.enabled? #=> true * trace.disable #=> false (previous status) * trace.enabled? #=> false * trace.disable #=> false * * If a block is given, the trace will only be disable within the scope of the * block. * * trace.enabled? * #=> true * * trace.disable do * trace.enabled? * # only disabled for this block * end * * trace.enabled? * #=> true * * Note: You cannot access event hooks within the block. * * trace.disable { p tp.lineno } * #=> RuntimeError: access from outside */ static VALUE tracepoint_disable_m(VALUE tpval) { rb_tp_t *tp = tpptr(tpval); int previous_tracing = tp->tracing; rb_tracepoint_disable(tpval); if (rb_block_given_p()) { return rb_ensure(rb_yield, Qnil, previous_tracing ? rb_tracepoint_enable : rb_tracepoint_disable, tpval); } else { return previous_tracing ? Qtrue : Qfalse; } }