static void gc_event_i(VALUE tpval, void *data) { rb_trace_arg_t *tparg = rb_tracearg_from_tracepoint(tpval); rb_event_flag_t flag = rb_tracearg_event_flag(tparg); switch (flag) { case RUBY_INTERNAL_EVENT_GC_START: _oobgc.allocation_limit = 0; _oobgc.start.total_allocated_object = rb_gc_stat(sym_total_allocated_object); _oobgc.start.heap_tomb_page_length = rb_gc_stat(sym_heap_tomb_page_length); break; case RUBY_INTERNAL_EVENT_GC_END_MARK: _oobgc.sweep_needed = 1; break; case RUBY_INTERNAL_EVENT_GC_END_SWEEP: _oobgc.sweep_needed = 0; _oobgc.allocation_limit = _oobgc.start.total_allocated_object + rb_gc_stat(sym_heap_swept_slot) + (rb_gc_stat(sym_heap_tomb_page_length) * _oobgc.heap_obj_limit) - rb_gc_stat(sym_heap_final_slot); break; } }
/** * file_tracepoint_callback * Callback function for Tracer#file_tracepoint. It gets called on * RUBY_EVENT_CLASS, RUBY_EVENT_CALL, and RUBY_EVENT_B_CALL * events. It check if any breakpoints matches current file the VM program counter * is in, and turn on line event tracing for that thread. Otherwise turn off * line tracing if in wrong file. The first time it turns on line event tracing, * it also turns on Tracer#return_tracepoint to maintain line tracing * consistency when file execution interleaves. */ static void file_tracepoint_callback(VALUE tracepoint, void *data) { VALUE self = (VALUE) data; rb_trace_arg_t *tracepoint_arg = rb_tracearg_from_tracepoint(tracepoint); VALUE tracepoint_path = rb_tracearg_path(tracepoint_arg); int match_found; if (!RB_TYPE_P(tracepoint_path, T_STRING)) return; // Ensure tracepoint_path is absolute path tracepoint_path = rb_file_expand_path(tracepoint_path, Qnil); if (!RTEST(tracepoint_path)) { return; } match_found = match_breakpoints_files(self, tracepoint_path); if (match_found) { enable_line_trace_for_thread(self); enable_return_trace_for_thread(self); } else { disable_line_trace_for_thread(Qnil); } return; }
static void tracepoint_handler(VALUE tpval, void *data) { rb_trace_arg_t *tparg = rb_tracearg_from_tracepoint(tpval); if (rb_tracearg_event_flag(tparg) == RUBY_INTERNAL_EVENT_NEWOBJ) { increment_allocations(); } }
// Refer Ruby source ext/objspace/object_tracing.c::freeobj_i static void freeobj_i(VALUE tpval, void *data) { rb_trace_arg_t *tparg = rb_tracearg_from_tracepoint(tpval); rbkit_obj_destroyed_event *event; VALUE obj = rb_tracearg_object(tparg); rbkit_logger *arg = (rbkit_logger *)data; // Delete allocation info of freed object delete_rbkit_allocation_info(tparg, obj, arg->str_table, arg->object_table); event = new_rbkit_obj_destroyed_event(FIX2ULONG(rb_obj_id(obj))); pack_event((rbkit_event_header *)event, arg->msgpacker); free(event); send_message(arg->sbuf); }
static void fiber_tracepoint_callback(VALUE tracepoint, void *data) { VALUE self = (VALUE) data; rb_trace_arg_t *tracepoint_arg = rb_tracearg_from_tracepoint(tracepoint); VALUE tracepoint_lineno = rb_tracearg_lineno(tracepoint_arg); int c_tracepoint_lineno = NUM2INT(tracepoint_lineno); // Only if lineno is greater than 0, then we know this event is triggered from // fiber execution, and we blindly starts line_trace. if (c_tracepoint_lineno > 0) { enable_line_trace_for_thread(self); } return; }
// Refer Ruby source ext/objspace/object_tracing.c::newobj_i static void newobj_i(VALUE tpval, void *data) { rbkit_obj_created_event *event; rbkit_logger * arg = (rbkit_logger *)data; rb_trace_arg_t *tparg = rb_tracearg_from_tracepoint(tpval); rbkit_allocation_info *info = new_rbkit_allocation_info(tparg, arg->str_table, arg->object_table); VALUE obj = rb_tracearg_object(tparg); VALUE klass = RBASIC_CLASS(obj); const char *class_name = NULL; if (!NIL_P(klass) && BUILTIN_TYPE(obj) != T_NONE && BUILTIN_TYPE(obj) != T_ZOMBIE && BUILTIN_TYPE(obj) != T_ICLASS) class_name = rb_class2name(klass); event = new_rbkit_obj_created_event(FIX2ULONG(rb_obj_id(obj)), class_name, info); pack_event((rbkit_event_header *)event, arg->msgpacker); free(event); send_message(arg->sbuf); }