static void eval_trace_callback(void *data, rb_trace_arg_t *trace_arg) { rb_event_flag_t event; VALUE evaluator; VALUE klass; VALUE obj; VALUE method_id; ID trace_func_cb_id; ID trace_c_func_cb_id; CONST_ID(trace_func_cb_id, "trace_func_callback"); CONST_ID(trace_c_func_cb_id, "trace_c_func_callback"); event = rb_tracearg_event_flag(trace_arg); evaluator = (VALUE)data; obj = rb_tracearg_self(trace_arg); method_id = rb_tracearg_method_id(trace_arg); klass = rb_tracearg_defined_class(trace_arg); if (event & RUBY_EVENT_CALL) { rb_funcall(evaluator, trace_func_cb_id, 3, obj, klass, method_id); } if (event & RUBY_EVENT_C_CALL) { rb_funcall(evaluator, trace_c_func_cb_id, 3, obj, klass, method_id); } return; }
static void gc_event_i(VALUE tpval, void *data) { rb_trace_arg_t *tparg = rb_tracearg_from_tracepoint(tpval); rb_event_flag_t flag = rb_tracearg_event_flag(tparg); switch (flag) { case RUBY_INTERNAL_EVENT_GC_START: _oobgc.allocation_limit = 0; _oobgc.start.total_allocated_object = rb_gc_stat(sym_total_allocated_object); _oobgc.start.heap_tomb_page_length = rb_gc_stat(sym_heap_tomb_page_length); break; case RUBY_INTERNAL_EVENT_GC_END_MARK: _oobgc.sweep_needed = 1; break; case RUBY_INTERNAL_EVENT_GC_END_SWEEP: _oobgc.sweep_needed = 0; _oobgc.allocation_limit = _oobgc.start.total_allocated_object + rb_gc_stat(sym_heap_swept_slot) + (rb_gc_stat(sym_heap_tomb_page_length) * _oobgc.heap_obj_limit) - rb_gc_stat(sym_heap_final_slot); break; } }
static void tracepoint_handler(VALUE tpval, void *data) { rb_trace_arg_t *tparg = rb_tracearg_from_tracepoint(tpval); if (rb_tracearg_event_flag(tparg) == RUBY_INTERNAL_EVENT_NEWOBJ) { increment_allocations(); } }