Example #1
0
/*
 * call-seq:
 *   Kernel.each_backtrace_frame( & block )
 *
 * Return array of hashes with object and method frame information for backtrace.
 * Specifying number_of_frames will cause only the last number_of_frames to be returned.
 * Kernel.backtrace returns all frames including the current context (__method__/__callee__).
 */
VALUE rb_RPRuby_Sender_Kernel_each_backtrace_frame(	int		argc,
													VALUE*	args,
													VALUE	rb_self )	{

	rb_thread_t*			c_thread					= GET_THREAD();
	//	Get the current frame - we're doing a backtrace, so our current working frame to start is the first previous thread
	rb_control_frame_t*		c_current_context_frame		= RUBY_VM_PREVIOUS_CONTROL_FRAME( RUBY_VM_PREVIOUS_CONTROL_FRAME( c_thread->cfp ) );
	
	//	c_top_of_control_frame describes the top edge of the stack trace
	//	set c_top_of_control_frame to the first frame in <main>
    rb_control_frame_t*		c_top_of_control_frame	=	RUBY_VM_NEXT_CONTROL_FRAME( RUBY_VM_NEXT_CONTROL_FRAME( (void *)( c_thread->stack + c_thread->stack_size ) ) );
	
	VALUE	rb_stored_backtrace_array	=	Qnil;
	
	//	if we were passed a stored backtrace array, use it
	if (	argc == 1
		&&	TYPE( args[ 0 ] ) == T_ARRAY )	{
		rb_stored_backtrace_array	=	args[ 0 ];
	}
				
	//	for each control frame:
    while ( c_current_context_frame < c_top_of_control_frame ) {

		VALUE	rb_frame_hash;
		//	if we are using a stored backtrace we don't need to ask for a new hash
		if ( rb_stored_backtrace_array == Qnil )	{
			rb_frame_hash	=	rb_RPRuby_Sender_Kernel_internal_backtraceHashForControlFrame(	& c_current_context_frame );
		}
		else {
			rb_frame_hash	=	rb_ary_shift( rb_stored_backtrace_array );
		}

		if ( rb_frame_hash == Qnil )	{
			break;
		}
		
		//	if we try to iterate using an Enumerator we will lose our context
		if ( ! rb_block_given_p() )	{
			
			//	we solve this by assuming that the desired context is the moment when each_backtrace_frame is called
			//	this allows us to store the backtrace and iterate it as we want
			//	the only downside is that we have to get the entire backtrace first in order to store it
			rb_stored_backtrace_array	=	rb_RPRuby_Sender_Kernel_backtrace(	0,
																				NULL,
																				rb_self );
		
			RETURN_ENUMERATOR( rb_self, 1, & rb_stored_backtrace_array );
		}
		
		//	otherwise, yield the block
		rb_yield( rb_frame_hash );
		
		//	only move the frame if we are not using a stored backtrace
		if ( rb_stored_backtrace_array == Qnil )	{
			c_current_context_frame = RUBY_VM_PREVIOUS_CONTROL_FRAME( c_current_context_frame );		
		}
	}
	
	return Qnil;
}
Example #2
0
static VALUE frame_count(VALUE self)
{
  rb_thread_t *th;
  GetThreadPtr(rb_thread_current(), th);

  rb_control_frame_t *cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp);
  rb_control_frame_t *limit_cfp = (void *)(th->stack + th->stack_size);

  int i = 1;
  while (cfp < limit_cfp) {
    cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);

    if (cfp >= limit_cfp)
      return INT2FIX(i);

    // skip invalid frames
    if (!valid_frame_p(cfp, limit_cfp))
      cfp = find_valid_frame(cfp, limit_cfp);

    if (!cfp)
      break;

    i++;
  }

  return INT2FIX(i);
}
Example #3
0
static VALUE
proc_new(VALUE klass, int is_lambda)
{
    VALUE procval = Qnil;
    rb_thread_t *th = GET_THREAD();
    rb_control_frame_t *cfp = th->cfp;
    rb_block_t *block;

    if ((GC_GUARDED_PTR_REF(cfp->lfp[0])) != 0 &&
	!RUBY_VM_CLASS_SPECIAL_P(cfp->lfp[0])) {

	block = GC_GUARDED_PTR_REF(cfp->lfp[0]);
	cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
    }
    else {
	cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);

	if ((GC_GUARDED_PTR_REF(cfp->lfp[0])) != 0 &&
	    !RUBY_VM_CLASS_SPECIAL_P(cfp->lfp[0])) {

	    block = GC_GUARDED_PTR_REF(cfp->lfp[0]);

	    if (block->proc) {
		return block->proc;
	    }

	    /* TODO: check more (cfp limit, called via cfunc, etc) */
	    while (cfp->dfp != block->dfp) {
		cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
	    }

	    if (is_lambda) {
		rb_warn("tried to create Proc object without a block");
	    }
	}
	else {
	    rb_raise(rb_eArgError,
		     "tried to create Proc object without a block");
	}
    }

    procval = block->proc;
    if (procval && RBASIC(procval)->klass == klass) {
	return procval;
    }

    procval = vm_make_proc(th, cfp, block, klass);

    if (is_lambda) {
	rb_proc_t *proc;
	GetProcPtr(procval, proc);
	proc->is_lambda = Qtrue;
    }
    return procval;
}
Example #4
0
/*
 * call-seq:
 *   Kernel.backtrace( number_of_frames = nil ) -> [ { :object => object, :method => method }, ... ]
 *
 * Return array of hashes with object and method frame information for backtrace.
 * Specifying number_of_frames will cause only the last number_of_frames to be returned.
 * Kernel.backtrace returns all frames including the current context (__method__/__callee__).
 */
VALUE rb_RPRuby_Sender_Kernel_backtrace(	int		argc,
											VALUE*	args,
											VALUE	rb_self )	{

	//	Get max stack level from args if it is there
	int		c_max_stack_level	=	0;
	if ( argc )	{
		c_max_stack_level	=	FIX2INT( args[ 0 ] );
		
		//	if max_stack_level is 0 return empty array
		if ( c_max_stack_level == 0 )	{
			return rb_ary_new();
		}
		//	if max_stack_level < 0, throw error
		else if ( c_max_stack_level < 0 ) {
			rb_raise( rb_eArgError, RPRUBY_SENDER_ERROR_STACK_LEVEL_LESS_THAN_ZERO );
		}
		
	}
	
	rb_thread_t*			c_thread					= GET_THREAD();
	//	Get the current frame - we're doing a backtrace, so our current working frame to start is the first previous thread
	rb_control_frame_t*		c_current_context_frame		= RUBY_VM_PREVIOUS_CONTROL_FRAME( c_thread->cfp );
	
	//	c_top_of_control_frame describes the top edge of the stack trace
	//	set c_top_of_control_frame to the first frame in <main>
    rb_control_frame_t*		c_top_of_control_frame	=	RUBY_VM_NEXT_CONTROL_FRAME( RUBY_VM_NEXT_CONTROL_FRAME( (void *)( c_thread->stack + c_thread->stack_size ) ) );
	
	VALUE	rb_return_array	=	rb_ary_new();
	
	int	c_stack_level	=	0;
	//	for each control frame:
    while (		c_current_context_frame < c_top_of_control_frame
		   &&	(	argc == 0
				 ||	c_stack_level < c_max_stack_level ) ) {
		
		VALUE	rb_frame_hash	=	rb_RPRuby_Sender_Kernel_internal_backtraceHashForControlFrame(	& c_current_context_frame );
	   
		if ( rb_frame_hash == Qnil )	{
			break;
		}
		
		//	push hash to array
		rb_ary_push(	rb_return_array,
						rb_frame_hash );
	   
	   c_current_context_frame = RUBY_VM_PREVIOUS_CONTROL_FRAME( c_current_context_frame );
	   c_stack_level++;
   }
   
   return rb_return_array;
}
Example #5
0
  void
  rb_dump_stack()
  {
    rb_thread_t *th = GET_THREAD();
    rb_control_frame_t *cfp = th->cfp;
    rb_control_frame_t *end_cfp = RUBY_VM_END_CONTROL_FRAME(th);
    ID func;

    printf("\n\n*********************\n");
    while (RUBY_VM_VALID_CONTROL_FRAME_P(cfp, end_cfp)) {
      printf("cfp (%p):\n", cfp);
      printf("  type: 0x%x\n", VM_FRAME_TYPE(cfp));
      printf("  pc: %p\n", cfp->pc);
      printf("  iseq: %p\n", cfp->iseq);
      if (cfp->iseq) {
        printf("     type: %d\n", FIX2INT(cfp->iseq->type));
        printf("     self: %p\n", cfp->iseq->self);
        printf("     klass: %p (%s)\n", cfp->iseq->klass, cfp->iseq->klass ? rb_class2name(cfp->iseq->klass) : "");
        printf("     method: %p (%s)\n", cfp->iseq->defined_method_id, cfp->iseq->defined_method_id ? rb_id2name(cfp->iseq->defined_method_id) : "");
      }
      printf("  self: %p\n", cfp->self);
      printf("  klass: %p (%s)\n", cfp->method_class, cfp->method_class ? rb_class2name(cfp->method_class) : "");
      printf("  method: %p (%s)\n", cfp->method_id, cfp->method_id ? rb_id2name(cfp->method_id) : "");

      cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
      printf("\n");
    }
    printf("*********************\n\n");
  }
rb_control_frame_t* RPRuby_internal_framePriorTo( rb_control_frame_t* c_control_frame )  {
  
  rb_thread_t*      c_thread          = (rb_thread_t *)RTYPEDDATA_DATA(rb_thread_current());
  rb_control_frame_t*    c_prior_control_frame    = NULL;
  //  get the current frame pointer
  if ( c_control_frame == NULL )  {
    c_control_frame  = c_thread->cfp;
  }
  
    if ( ( c_prior_control_frame = rb_vm_get_ruby_level_next_cfp( c_thread, c_control_frame ) ) != 0) {
    
    //  not sure why we have to call this a second time after it was called at the end of rb_vm_get_ruby_level_next_cfp,
    //  but for some reason it seems to be necessary
    c_prior_control_frame = RUBY_VM_PREVIOUS_CONTROL_FRAME( c_prior_control_frame );

    }
  else {
    c_prior_control_frame = NULL;
  }
  
  //  if we have a nil object we've passed main, we're done
  if ( c_prior_control_frame->self == Qnil )  {
    return NULL;
  }

  return c_prior_control_frame;
  
}
Example #7
0
static inline void
vm_pop_frame(rb_thread_t *th)
{
#if COLLECT_PROFILE
    rb_control_frame_t *cfp = th->cfp;

    if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
	VALUE current_time = clock();
	rb_control_frame_t *cfp = th->cfp;
	cfp->prof_time_self = current_time - cfp->prof_time_self;
	(cfp+1)->prof_time_chld += cfp->prof_time_self;

	cfp->iseq->profile.count++;
	cfp->iseq->profile.time_cumu = cfp->prof_time_self;
	cfp->iseq->profile.time_self = cfp->prof_time_self - cfp->prof_time_chld;
    }
    else if (0 /* c method? */) {

    }
#endif
    th->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp);

    if (VMDEBUG == 2) {
	SDR();
    }
}
Example #8
0
void
rb_vm_stack_to_heap(rb_thread_t * const th)
{
    rb_control_frame_t *cfp = th->cfp;
    while ((cfp = rb_vm_get_ruby_level_next_cfp(th, cfp)) != 0) {
	rb_vm_make_env_object(th, cfp);
	cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
    }
}
Example #9
0
static void
rb_threadptr_exec_event_hooks_orig(rb_trace_arg_t *trace_arg, int pop_p)
{
    rb_thread_t *th = trace_arg->th;

    if (trace_arg->event & RUBY_INTERNAL_EVENT_MASK) {
        if (th->trace_arg && (th->trace_arg->event & RUBY_INTERNAL_EVENT_MASK)) {
            /* skip hooks because this thread doing INTERNAL_EVENT */
        }
        else {
            rb_trace_arg_t *prev_trace_arg = th->trace_arg;
            th->trace_arg = trace_arg;
            exec_hooks_unprotected(th, &th->event_hooks, trace_arg);
            exec_hooks_unprotected(th, &th->vm->event_hooks, trace_arg);
            th->trace_arg = prev_trace_arg;
        }
    }
    else {
        if (th->trace_arg == 0 && /* check reentrant */
                trace_arg->self != rb_mRubyVMFrozenCore /* skip special methods. TODO: remove it. */) {
            const VALUE errinfo = th->errinfo;
            const int outer_state = th->state;
            int state = 0;
            th->state = 0;
            th->errinfo = Qnil;

            th->vm->trace_running++;
            th->trace_arg = trace_arg;
            {
                /* thread local traces */
                state = exec_hooks_protected(th, &th->event_hooks, trace_arg);
                if (state) goto terminate;

                /* vm global traces */
                state = exec_hooks_protected(th, &th->vm->event_hooks, trace_arg);
                if (state) goto terminate;

                th->errinfo = errinfo;
            }
terminate:
            th->trace_arg = 0;
            th->vm->trace_running--;

            if (state) {
                if (pop_p) {
                    if (VM_FRAME_TYPE_FINISH_P(th->cfp)) {
                        th->tag = th->tag->prev;
                    }
                    th->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp);
                }
                TH_JUMP_TAG(th, state);
            }
            th->state = outer_state;
        }
    }
}
Example #10
0
  int
  rb_stack_trace(void** result, int max_depth)
  {
    rb_thread_t *th = GET_THREAD();
    rb_control_frame_t *cfp = th->cfp;
    rb_control_frame_t *end_cfp = RUBY_VM_END_CONTROL_FRAME(th);

    VALUE klass, self;
    ID method;
    int depth = 0;

    if (max_depth == 0)
      return 0;

    if (rb_during_gc()) {
      result[0] = rb_gc;
      return 1;
    }

    while (RUBY_VM_VALID_CONTROL_FRAME_P(cfp, end_cfp) && depth+3 <= max_depth) {
      rb_iseq_t *iseq = cfp->iseq;

      if (iseq && iseq->type == ISEQ_TYPE_METHOD) {
        self = 0; // maybe use cfp->self here, but iseq->self is a ISeq ruby obj
        klass = iseq->klass;
        method = iseq->defined_method_id;
        SAVE_FRAME();
      }

      if (depth+3 > max_depth)
        break;

      switch (VM_FRAME_TYPE(cfp)) {
        case VM_FRAME_MAGIC_METHOD:
        case VM_FRAME_MAGIC_CFUNC:
          self = cfp->self;
#ifdef HAVE_METHOD_H
          if (!cfp->me) break;

          klass = cfp->me->klass;
          method = cfp->me->called_id;
#else
          klass = cfp->method_class;
          method = cfp->method_id;
#endif
          SAVE_FRAME();
          break;
      }

      cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
    }

    assert(depth <= max_depth);
    return depth;
  }
Example #11
0
static rb_control_frame_t *
vm_normal_frame(rb_thread_t *th, rb_control_frame_t *cfp)
{
    while (cfp->pc == 0) {
	cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
	if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
	    return 0;
	}
    }
    return cfp;
}
Example #12
0
rb_control_frame_t *
rb_vm_get_ruby_level_next_cfp(rb_thread_t *th, rb_control_frame_t *cfp)
{
    while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
	if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
	    return cfp;
	}
	cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
    }
    return 0;
}
Example #13
0
/* copy from vm.c */
static VALUE *
vm_base_ptr(rb_control_frame_t *cfp)
{
    rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
    VALUE *bp = prev_cfp->sp + cfp->iseq->local_size + 1;

    if (cfp->iseq->type == ISEQ_TYPE_METHOD) {
	bp += 1;
    }
    return bp;
}
Example #14
0
static VALUE
rb_f_local_variables(void)
{
    VALUE ary = rb_ary_new();
    rb_thread_t *th = GET_THREAD();
    rb_control_frame_t *cfp =
	vm_get_ruby_level_caller_cfp(th, RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp));
    int i;

    while (cfp) {
	if (cfp->iseq) {
	    for (i = 0; i < cfp->iseq->local_table_size; i++) {
		ID lid = cfp->iseq->local_table[i];
		if (lid) {
		    const char *vname = rb_id2name(lid);
		    /* should skip temporary variable */
		    if (vname) {
			rb_ary_push(ary, ID2SYM(lid));
		    }
		}
	    }
	}
	if (cfp->lfp != cfp->dfp) {
	    /* block */
	    VALUE *dfp = GC_GUARDED_PTR_REF(cfp->dfp[0]);

	    if (vm_collect_local_variables_in_heap(th, dfp, ary)) {
		break;
	    }
	    else {
		while (cfp->dfp != dfp) {
		    cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
		}
	    }
	}
	else {
	    break;
	}
    }
    return ary;
}
Example #15
0
static VALUE
proc_new(VALUE klass, int is_lambda)
{
    VALUE procval = Qnil;
    rb_thread_t *th = GET_THREAD();
    rb_control_frame_t *cfp = th->cfp;
    rb_block_t *block;

    if ((GC_GUARDED_PTR_REF(cfp->lfp[0])) != 0 &&
	!RUBY_VM_CLASS_SPECIAL_P(cfp->lfp[0])) {

	block = GC_GUARDED_PTR_REF(cfp->lfp[0]);
    }
    else {
	cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);

	if ((GC_GUARDED_PTR_REF(cfp->lfp[0])) != 0 &&
	    !RUBY_VM_CLASS_SPECIAL_P(cfp->lfp[0])) {

	    block = GC_GUARDED_PTR_REF(cfp->lfp[0]);

	    if (is_lambda) {
		rb_warn("tried to create Proc object without a block");
	    }
	}
	else {
	    rb_raise(rb_eArgError,
		     "tried to create Proc object without a block");
	}
    }

    procval = block->proc;

    if (procval) {
	if (RBASIC(procval)->klass == klass) {
	    return procval;
	}
	else {
	    VALUE newprocval = proc_dup(procval);
	    RBASIC(newprocval)->klass = klass;
	    return newprocval;
	}
    }

    procval = rb_vm_make_proc(th, block, klass);

    if (is_lambda) {
	rb_proc_t *proc;
	GetProcPtr(procval, proc);
	proc->is_lambda = Qtrue;
    }
    return procval;
}
Example #16
0
static rb_control_frame_t *
vm_get_ruby_level_caller_cfp(rb_thread_t *th, rb_control_frame_t *cfp)
{
    if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
	return cfp;
    }

    cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);

    while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
	if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
	    return cfp;
	}

	if ((cfp->flag & VM_FRAME_FLAG_PASSED) == 0) {
	    break;
	}
	cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
    }
    return 0;
}
Example #17
0
static VALUE binding_of_caller(VALUE self, VALUE rb_level)
{
  rb_thread_t *th;
  GetThreadPtr(rb_thread_current(), th);

  rb_control_frame_t *cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp);
  rb_control_frame_t *limit_cfp = (void *)(th->stack + th->stack_size);
  int level = FIX2INT(rb_level);

  // attempt to locate the nth parent control frame
  for (int i = 0; i < level; i++) {
    cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);

    if (cfp >= limit_cfp)
      rb_raise(rb_eRuntimeError, "Invalid frame, gone beyond end of stack!");

    // skip invalid frames
    if (!valid_frame_p(cfp, limit_cfp))
      cfp = find_valid_frame(cfp, limit_cfp);
  }

  VALUE bindval = binding_alloc(rb_cBinding);
  rb_binding_t *bind;

  if (cfp == 0)
    rb_raise(rb_eRuntimeError, "Can't create Binding Object on top of Fiber.");

  GetBindingPtr(bindval, bind);

  bind->env = rb_vm_make_env_object(th, cfp);
  bind->filename = cfp->iseq->filename;
  bind->line_no = rb_vm_get_sourceline(cfp);
  
  rb_iv_set(bindval, "@frame_type", frametype_name(cfp->flag));
  rb_iv_set(bindval, "@frame_description", cfp->iseq->name);

  return bindval;
}
Example #18
0
static void
raise_method_missing(rb_thread_t *th, int argc, const VALUE *argv, VALUE obj,
		     int last_call_status)
{
    ID id;
    VALUE exc = rb_eNoMethodError;
    const char *format = 0;

    if (argc == 0 || !SYMBOL_P(argv[0])) {
	rb_raise(rb_eArgError, "no id given");
    }

    stack_check();

    id = SYM2ID(argv[0]);

    if (last_call_status & NOEX_PRIVATE) {
	format = "private method `%s' called for %s";
    }
    else if (last_call_status & NOEX_PROTECTED) {
	format = "protected method `%s' called for %s";
    }
    else if (last_call_status & NOEX_VCALL) {
	format = "undefined local variable or method `%s' for %s";
	exc = rb_eNameError;
    }
    else if (last_call_status & NOEX_SUPER) {
	format = "super: no superclass method `%s' for %s";
    }
    if (!format) {
	format = "undefined method `%s' for %s";
    }

    {
	int n = 0;
	VALUE args[3];
	args[n++] = rb_funcall(rb_const_get(exc, rb_intern("message")), '!',
			       3, rb_str_new2(format), obj, argv[0]);
	args[n++] = argv[0];
	if (exc == rb_eNoMethodError) {
	    args[n++] = rb_ary_new4(argc - 1, argv + 1);
	}
	exc = rb_class_new_instance(n, args, exc);

	if (!(last_call_status & NOEX_MISSING)) {
	    th->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp);
	}
	rb_exc_raise(exc);
    }
}
Example #19
0
static rb_control_frame_t * find_valid_frame(rb_control_frame_t * cfp, rb_control_frame_t * limit_cfp) {
  while (cfp < limit_cfp) {
    cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);

    if (cfp >= limit_cfp)
      return NULL;

    if (valid_frame_p(cfp, limit_cfp))
      return cfp;
  }

  // beyond end of stack
  return NULL;
}
Example #20
0
static void
rb_threadptr_exec_event_hooks_orig(rb_trace_arg_t *trace_arg, int pop_p)
{
    rb_thread_t *th = trace_arg->th;
    if (th->trace_arg == 0 &&
	trace_arg->self != rb_mRubyVMFrozenCore /* skip special methods. TODO: remove it. */) {
	const int vm_tracing = th->vm->trace_running;
	const VALUE errinfo = th->errinfo;
	const int outer_state = th->state;
	int state = 0;
	th->state = 0;

	th->vm->trace_running++;
	th->trace_arg = trace_arg;
	{
	    rb_hook_list_t *list;

	    /* thread local traces */
	    list = &th->event_hooks;
	    if (list->events & trace_arg->event) {
		state = exec_hooks(th, list, trace_arg, TRUE);
		if (state) goto terminate;
	    }

	    /* vm global traces */
	    list = &th->vm->event_hooks;
	    if (list->events & trace_arg->event) {
		state = exec_hooks(th, list, trace_arg, !vm_tracing);
		if (state) goto terminate;
	    }
	    th->errinfo = errinfo;
	}
      terminate:
	th->trace_arg = 0;
	th->vm->trace_running--;

	if (state) {
	    if (pop_p) {
		if (VM_FRAME_TYPE_FINISH_P(th->cfp)) {
		    th->tag = th->tag->prev;
		}
		th->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp);
	    }
	    TH_JUMP_TAG(th, state);
	}
	th->state = outer_state;
    }
}
Example #21
0
static VALUE
send_internal(int argc, VALUE *argv, VALUE recv, int scope)
{
    VALUE vid;
    VALUE self = RUBY_VM_PREVIOUS_CONTROL_FRAME(GET_THREAD()->cfp)->self;
    rb_thread_t *th = GET_THREAD();

    if (argc == 0) {
	rb_raise(rb_eArgError, "no method name given");
    }

    vid = *argv++; argc--;
    PASS_PASSED_BLOCK_TH(th);

    return rb_call0(CLASS_OF(recv), recv, rb_to_id(vid), argc, argv, scope, self);
}
Example #22
0
VALUE
rb_f_block_given_p(void)
{
    rb_thread_t *th = GET_THREAD();
    rb_control_frame_t *cfp = th->cfp;
    cfp = vm_get_ruby_level_caller_cfp(th, RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp));

    if (cfp != 0 &&
	(cfp->lfp[0] & 0x02) == 0 &&
	GC_GUARDED_PTR_REF(cfp->lfp[0])) {
	return Qtrue;
    }
    else {
	return Qfalse;
    }
}
Example #23
0
VALUE
rb_vm_make_env_object(rb_thread_t * th, rb_control_frame_t *cfp)
{
    VALUE envval;

    if (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_FINISH) {
	/* for method_missing */
	cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
    }

    envval = vm_make_env_each(th, cfp, cfp->dfp, cfp->lfp);

    if (PROCDEBUG) {
	check_env_value(envval);
    }

    return envval;
}
Example #24
0
int
rb_profile_frames(int start, int limit, VALUE *buff, int *lines)
{
    int i;
    rb_thread_t *th = GET_THREAD();
    rb_control_frame_t *cfp = th->cfp, *end_cfp = RUBY_VM_END_CONTROL_FRAME(th);

    for (i=0; i<limit && cfp != end_cfp;) {
	if (cfp->iseq && cfp->pc) { /* should be NORMAL_ISEQ */
	    if (start > 0) {
		start--;
		continue;
	    }

	    /* record frame info */
	    buff[i] = cfp->iseq->self;
	    if (lines) lines[i] = calc_lineno(cfp->iseq, cfp->pc);
	    i++;
	}
	cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
    }

    return i;
}
Example #25
0
/*
 * call-seq:
 *   Kernel.backtrace_includes?( method_or_object, ... ) -> true or false
 *   Kernel.backtrace_includes?( number_of_frames, method_or_object, ... ) -> true or false
 *
 * Returns whether specified methods or objects or classes are in the current backtrace context.
 * Kernel.backtrace_includes? begins with the prior frame, so asking if the backtrace includes the current method
 * will only report true if the current method is part of the earlier call chain.
 */
VALUE rb_RPRuby_Sender_Kernel_backtrace_includes(	int		argc,
													VALUE*	args,
													VALUE	rb_self )	{
	
	//	this function is also used for 
	//	* backtrace_includes_one_of?
	//	* backtrace_includes_frame?
	//	* backtrace_includes_one_of_frames?
	
	//	create tracking array
	VALUE	rb_tracking_array	=	rb_ary_new();
	
	//	populate tracking array with methods/objects
	//	optional - if first arg is Qtrue, we are looking for one of the args instead of all of the args
	int		c_which_arg				=	0;
	BOOL	c_requires_all_items	= TRUE;
	if (	args[ 0 ] == Qnil
		||	(	argc > 1
			 &&	args[ 1 ] == Qnil ) )	{
		c_which_arg++;
		c_requires_all_items = FALSE;
	}
	BOOL	c_return_frame	=	FALSE;
	if (	args[ 0 ] == Qfalse
		||	(	argc > 1
			 &&	args[ 1 ] == Qfalse ) )	{
		c_which_arg++;
		c_return_frame = TRUE;
	}
	BOOL	c_return_all_frames	=	FALSE;
	if (	args[ 0 ] == Qtrue
		||	(	argc > 1
			 &&	args[ 1 ] == Qtrue ) )	{
		c_which_arg++;
		c_return_all_frames = TRUE;
	}
	int	c_args_offset	=	c_which_arg;
	for ( ; c_which_arg < argc ; c_which_arg++ )	{
		rb_ary_push(	rb_tracking_array,
						args[ c_which_arg ] );
	}
		
	rb_thread_t*			c_thread					= GET_THREAD();
	//	Get the current frame - we're doing a backtrace, so our current working frame to start is the first previous thread
	rb_control_frame_t*		c_current_context_frame		= RUBY_VM_PREVIOUS_CONTROL_FRAME( RUBY_VM_PREVIOUS_CONTROL_FRAME( c_thread->cfp ) );
	
	//	c_top_of_control_frame describes the top edge of the stack trace
	//	set c_top_of_control_frame to the first frame in <main>
    rb_control_frame_t*		c_top_of_control_frame	=	RUBY_VM_NEXT_CONTROL_FRAME( RUBY_VM_NEXT_CONTROL_FRAME( (void *)( c_thread->stack + c_thread->stack_size ) ) );
	
	VALUE	rb_test_index_array	=	rb_ary_new();
	//	:object
	//	instance or class
	rb_ary_push(	rb_test_index_array,
					ID2SYM( rb_intern( "object" ) ) );
	//	:method				
	rb_ary_push(	rb_test_index_array,
					ID2SYM( rb_intern( "method" ) ) );
	//	:file
	rb_ary_push(	rb_test_index_array,
					ID2SYM( rb_intern( "file" ) ) );
	//	:line
	rb_ary_push(	rb_test_index_array,
					ID2SYM( rb_intern( "line" ) ) );
	
	//	only used if c_return_all_frames == TRUE
	VALUE	rb_frame_hashes_array	=	Qnil;
	if ( c_return_all_frames == TRUE )	{
		rb_frame_hashes_array		=	rb_ary_new();
	}
	
	VALUE	rb_frame_hash	=	Qnil;
	
	//	for each control frame:
    while ( c_current_context_frame < c_top_of_control_frame ) {
	   
		//	iterate each array member 
		int	c_which_member;
		for ( c_which_member = 0 ; c_which_member < RARRAY_LEN( rb_tracking_array ) ; c_which_member++ )	{
		   
			VALUE	rb_this_arg	=	args[ c_which_member + c_args_offset ];
		   
			BOOL		matched	=	FALSE;
		   
			rb_frame_hash	=	rb_RPRuby_Sender_Kernel_internal_backtraceHashForControlFrame(	& c_current_context_frame );
			
			//	if we have a hash we are testing multiple items in a frame
			if ( TYPE( rb_this_arg ) == T_HASH )	{

				VALUE	rb_frame_test_array	=	rb_obj_clone( rb_test_index_array );
			
				//	for each element that we could test for
				int	c_which_index;
				int	c_skipped_index_count	=	0;
				for ( c_which_index = 0 ; c_which_index < RARRAY_LEN( rb_frame_test_array ) ; c_which_index++ )	{
					
					VALUE	rb_this_index	=	RARRAY_PTR( rb_frame_test_array )[ c_which_index ];
					
					//	see if our requested test hash includes the potential test element
					if ( rb_hash_lookup(	rb_this_arg,
											rb_this_index ) != Qnil )	{
						
						VALUE	rb_required_element	=	rb_hash_aref(	rb_this_arg,
																		rb_this_index );
						VALUE	rb_frame_element	=	rb_hash_aref(	rb_frame_hash,
																		rb_this_index	);
									 						
						//	if it does, we need to see if the current frame's element matches this element
						VALUE	rb_required_element_klass;
						if ( rb_required_element == rb_frame_element
							//	if we have a string, which is a filename
							||	(	TYPE( rb_required_element ) == T_STRING
								 &&	rb_funcall( rb_frame_element, rb_intern( "==" ), 1, rb_required_element ) == Qtrue )
							//	if we have a class, which is a special case for :object
							||	(	rb_this_index == ID2SYM( rb_intern( "class" ) ) 
								 &&	( rb_required_element_klass = ( ( TYPE( rb_required_element ) == T_CLASS ) ? rb_required_element : rb_funcall( rb_required_element, rb_intern( "class" ), 0 ) ) )
								 &&	rb_required_element_klass == rb_required_element ) )	{

							rb_ary_delete_at(	rb_frame_test_array,
												c_which_index );
							c_which_index--;
						}
					}
					else {
						c_skipped_index_count++;
					}

					if ( RARRAY_LEN( rb_frame_test_array ) == c_skipped_index_count )	{
						if ( c_return_frame == TRUE )	{
							return rb_frame_hash;
						}
						else if ( c_return_all_frames == TRUE )	{
							rb_ary_push(	rb_frame_hashes_array,
											rb_frame_hash );
						}
						else {
							return Qtrue;							
						}
					}					
				}
			}
			else {

				//	:object => <class:instance>
				if	(	TYPE( rb_this_arg ) == T_OBJECT )	{

					if ( rb_hash_aref(	rb_frame_hash,
										ID2SYM( rb_intern( "object" ) ) ) == rb_this_arg )	{
						matched = TRUE;
					}
				}
				//	:object => <class>
				else if	( TYPE( rb_this_arg ) == T_CLASS )	{
					
					VALUE	rb_frame_object			=	rb_hash_aref(	rb_frame_hash,
																		ID2SYM( rb_intern( "object" ) ) );
					VALUE	rb_frame_object_klass	=	TYPE( rb_frame_object ) == T_CLASS ? rb_frame_object : rb_funcall( rb_frame_object, rb_intern( "class" ), 0 );
					if ( rb_frame_object_klass == rb_this_arg )	{
						matched = TRUE;
					}
				}
				//	:method => :method
				else if	( TYPE( rb_this_arg ) == T_SYMBOL )	{
				   
					if ( rb_hash_aref(	rb_frame_hash,
										ID2SYM( rb_intern( "method" ) ) ) == rb_this_arg )	{
						matched = TRUE;
					}
				}
				//	:file => "filename"
				else if ( TYPE( rb_this_arg ) == T_STRING )	{
					VALUE	rb_filename	=	rb_hash_aref(	rb_frame_hash,
															ID2SYM( rb_intern( "file" ) ) );
					VALUE	rb_comparison	=	rb_funcall( rb_filename, rb_intern( "==" ), 1, rb_this_arg );
					if ( rb_comparison == Qtrue )	{
						matched = TRUE;
					}
				}
				//	:line => number
				else if ( TYPE( rb_this_arg ) == T_FIXNUM )	{
					if ( rb_hash_aref(	rb_frame_hash,
										ID2SYM( rb_intern( "line" ) ) ) == rb_this_arg )	{
						matched = TRUE;
					}
				}
			   
				//	if array member exists in frame, remove from array
				if ( matched )	{
					if ( c_requires_all_items == FALSE )	{
						if ( c_return_frame == TRUE )	{
							return rb_frame_hash;
						}
						else {
							return Qtrue;							
						}

					}
					else {
						
						//	delete this index
						rb_ary_delete_at(	rb_tracking_array,
											c_which_member );
						
						//	decrement the loop iterator so that the increase is offset
						//	this is necessary since we just removed an index and are iterating vs. the length of the array
						c_which_member--;
					}
				}
			}
		}
	   
		//	if array is empty, return true
		//	we check here as well as at the end so we can stop iterating the backtrace if we find all our items
		if ( RARRAY_LEN( rb_tracking_array ) == 0 )	{
			if ( c_return_frame == TRUE )	{
				return rb_frame_hash;
			}
			else if ( c_return_all_frames == TRUE )	{
				rb_ary_push(	rb_frame_hashes_array,
								rb_frame_hash );
				return rb_frame_hashes_array;
			}
			else {
				return Qtrue;							
			}
		}
		c_current_context_frame = RUBY_VM_PREVIOUS_CONTROL_FRAME( c_current_context_frame );		
	}
	
	if (	c_return_all_frames == TRUE
		&&	RARRAY_LEN( rb_frame_hashes_array ) > 0 ) {
		return rb_frame_hashes_array;
	}
	//	if we finish iterating frames and still have items in the array, return false
	else if ( RARRAY_LEN( rb_tracking_array ) > 0 )	{
		if ( c_return_frame == TRUE )	{
			return Qnil;
		}
		else {
			return Qfalse;
		}
	}
	//	otherwise, return true
	else if ( c_return_frame == TRUE )	{
		return rb_frame_hash;
	}
	else {
		return Qtrue;							
	}
	//	we don't get here
	return Qnil;
}
Example #26
0
VALUE rb_RPRuby_Sender_Kernel_internal_backtraceHashForControlFrame(	rb_control_frame_t**		c_current_frame )	{

	const char*		c_method_name		=	NULL;
	int				c_sourcefile_line	=	0;
	
	//	create new hash for this frame
	VALUE	rb_frame_hash			=	rb_hash_new();
		
	VALUE	rb_sourcefile_name		=	Qnil;
	VALUE	rb_sourcefile_line		=	Qnil;
	VALUE	rb_method_name			=	Qnil;
	VALUE	rb_object_for_frame		=	Qnil;
	
	if ( ( *c_current_frame )->iseq != 0 ) {
		
		if ( ( *c_current_frame )->pc != 0 ) {
			
			rb_iseq_t *iseq			= ( *c_current_frame )->iseq;
			
			//	get sourcefile name and set in hash
			rb_sourcefile_name		=	iseq->filename;
			
			//	get sourcefile line and set in hash
			c_sourcefile_line		=	rb_vm_get_sourceline( *c_current_frame );
			rb_sourcefile_line		=	INT2FIX( c_sourcefile_line );
			
			//	get name of instruction sequence
			rb_method_name			=	ID2SYM( rb_intern( StringValuePtr( iseq->name ) ) );				
			rb_object_for_frame	=	( *c_current_frame )->self;
		}
	}
	else if ( RUBYVM_CFUNC_FRAME_P( *c_current_frame ) ) {
		
		//	get name of method

		#if RUBY_PATCHLEVEL >= -1
		//	For 1.9.2:
		const rb_method_entry_t*	c_method_for_frame	=	( *c_current_frame )->me;
		c_method_name				=	rb_id2name( c_method_for_frame->called_id );
		#else
		//	For 1.9.1:
		c_method_name				=	rb_id2name( ( *c_current_frame )->method_id );
		#endif
		
		rb_method_name				=	( c_method_name == NULL ? Qnil : ID2SYM( rb_intern( c_method_name ) ) );
		rb_object_for_frame	=	( *c_current_frame )->self;		
	}
	//	we have to test this case - it works for blocks but there may be other cases too
	else if (	( *c_current_frame )->block_iseq != 0
			 &&	( *c_current_frame )->pc == 0)	{
	
		//	If we got here we have a fiber
		//	There doesn't seem to be much that we can tell about a fiber's context
			
		VALUE			rb_current_fiber	=	rb_fiber_current();
		rb_fiber_t*		c_current_fiber		=	NULL;

		GetFiberPtr(	rb_current_fiber, 
						c_current_fiber);
						
		rb_context_t*	c_context			=	& c_current_fiber->cont;
		
//		rb_block_t*	c_blockptr = RUBY_VM_GET_BLOCK_PTR_IN_CFP( *c_current_frame );
		 
		rb_object_for_frame	=	( *c_current_frame )->self;
		
		//	get sourcefile name and set in hash
		rb_sourcefile_name		=	Qnil;

		//	get sourcefile line and set in hash
		rb_sourcefile_line		=	Qnil;
		
		//	get name of instruction sequence
		rb_method_name			=	rb_str_new2( "<Fiber>" );		
		
		//	if we have a fiber we also include its ruby reference since we have so little other context
		rb_hash_aset(	rb_frame_hash,
						ID2SYM( rb_intern( "fiber" ) ),
						c_context->self );
		
		//	The one time that we know a fiber is in use in the Ruby base is with Enumerators
		//	For now we will handle that with a special case
		
		VALUE	rb_enumerator_class	=	rb_const_get(	rb_cObject,
														rb_intern( "Enumerator" ) );
		
		VALUE	rb_object_for_frame_klass	=	( ( TYPE( rb_object_for_frame ) == T_CLASS ) ? rb_object_for_frame : rb_funcall( rb_object_for_frame, rb_intern( "class" ), 0 ) );

		VALUE	rb_ancestors	=	rb_funcall(	rb_object_for_frame_klass,
												rb_intern( "ancestors" ),
												0 );
		
		if ( rb_ary_includes(	rb_ancestors,
								rb_enumerator_class ) )	{
			
			struct enumerator* c_enumerator		=	enumerator_ptr( rb_object_for_frame );
			
			rb_object_for_frame	=	c_enumerator->obj;
			rb_method_name		=	ID2SYM( c_enumerator->meth );			
		}
		
	}
	else if (	( *c_current_frame )->block_iseq == 0
			 &&	( *c_current_frame )->pc == 0)	{
		//	this happens after we had a fiber and we try to go up - which doesn't make sense with a fiber
		//	not sure what we want to do here, if anything
		return Qnil;
	}
	else {
		//	The third possibility is that we have an iseq frame with nil params for what we want
		//	In that case we can simply return the next frame
		*c_current_frame	=	RUBY_VM_PREVIOUS_CONTROL_FRAME( *c_current_frame );
		
		//	in theory this could crash because we are going forward a frame when we don't know what's there
		//	in practice I think we are ok, since we are only jumping forward from nil frames which should never be at the end
		//	at least - I don't think they should... we shall see.
		//	
		//	a fix would be to check the next frame, but that requires access to the thread or the limit cfp, 
		//	which requires passing more context; so for now, I'm leaving it there
		
		return rb_RPRuby_Sender_Kernel_internal_backtraceHashForControlFrame( c_current_frame );
	}

	//	Push values to return hash

	rb_hash_aset(	rb_frame_hash,
					ID2SYM( rb_intern( "object" ) ),
					rb_object_for_frame );
					
	rb_hash_aset(	rb_frame_hash,
					ID2SYM( rb_intern( "file" ) ),
					rb_sourcefile_name );

	rb_hash_aset(	rb_frame_hash,
					ID2SYM( rb_intern( "line" ) ),
					rb_sourcefile_line );

	rb_hash_aset(	rb_frame_hash,
					ID2SYM( rb_intern( "method" ) ),
					rb_method_name );
	
	return rb_frame_hash;
}
Example #27
0
static VALUE
vm_exec(rb_thread_t *th)
{
    int state;
    VALUE result, err;
    VALUE initial = 0;
    VALUE *escape_dfp = NULL;

    TH_PUSH_TAG(th);
    _tag.retval = Qnil;
    if ((state = EXEC_TAG()) == 0) {
      vm_loop_start:
	result = vm_exec_core(th, initial);
	if ((state = th->state) != 0) {
	    err = result;
	    th->state = 0;
	    goto exception_handler;
	}
    }
    else {
	int i;
	struct iseq_catch_table_entry *entry;
	unsigned long epc, cont_pc, cont_sp;
	VALUE catch_iseqval;
	rb_control_frame_t *cfp;
	VALUE type;

	err = th->errinfo;

      exception_handler:
	cont_pc = cont_sp = catch_iseqval = 0;

	while (th->cfp->pc == 0 || th->cfp->iseq == 0) {
	    if (UNLIKELY(VM_FRAME_TYPE(th->cfp) == VM_FRAME_MAGIC_CFUNC)) {
		const rb_method_entry_t *me = th->cfp->me;
		EXEC_EVENT_HOOK(th, RUBY_EVENT_C_RETURN, th->cfp->self, me->called_id, me->klass);
	    }
	    th->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp);
	}

	cfp = th->cfp;
	epc = cfp->pc - cfp->iseq->iseq_encoded;

	if (state == TAG_BREAK || state == TAG_RETURN) {
	    escape_dfp = GET_THROWOBJ_CATCH_POINT(err);

	    if (cfp->dfp == escape_dfp) {
		if (state == TAG_RETURN) {
		    if ((cfp + 1)->pc != &finish_insn_seq[0]) {
			SET_THROWOBJ_CATCH_POINT(err, (VALUE)(cfp + 1)->dfp);
			SET_THROWOBJ_STATE(err, state = TAG_BREAK);
		    }
		    else {
			for (i = 0; i < cfp->iseq->catch_table_size; i++) {
			    entry = &cfp->iseq->catch_table[i];
			    if (entry->start < epc && entry->end >= epc) {
				if (entry->type == CATCH_TYPE_ENSURE) {
				    catch_iseqval = entry->iseq;
				    cont_pc = entry->cont;
				    cont_sp = entry->sp;
				    break;
				}
			    }
			}
			if (!catch_iseqval) {
			    result = GET_THROWOBJ_VAL(err);
			    th->errinfo = Qnil;
			    th->cfp += 2;
			    goto finish_vme;
			}
		    }
		    /* through */
		}
		else {
		    /* TAG_BREAK */
#if OPT_STACK_CACHING
		    initial = (GET_THROWOBJ_VAL(err));
#else
		    *th->cfp->sp++ = (GET_THROWOBJ_VAL(err));
#endif
		    th->errinfo = Qnil;
		    goto vm_loop_start;
		}
	    }
	}

	if (state == TAG_RAISE) {
	    for (i = 0; i < cfp->iseq->catch_table_size; i++) {
		entry = &cfp->iseq->catch_table[i];
		if (entry->start < epc && entry->end >= epc) {

		    if (entry->type == CATCH_TYPE_RESCUE ||
			entry->type == CATCH_TYPE_ENSURE) {
			catch_iseqval = entry->iseq;
			cont_pc = entry->cont;
			cont_sp = entry->sp;
			break;
		    }
		}
	    }
	}
	else if (state == TAG_RETRY) {
	    for (i = 0; i < cfp->iseq->catch_table_size; i++) {
		entry = &cfp->iseq->catch_table[i];
		if (entry->start < epc && entry->end >= epc) {

		    if (entry->type == CATCH_TYPE_ENSURE) {
			catch_iseqval = entry->iseq;
			cont_pc = entry->cont;
			cont_sp = entry->sp;
			break;
		    }
		    else if (entry->type == CATCH_TYPE_RETRY) {
			VALUE *escape_dfp;
			escape_dfp = GET_THROWOBJ_CATCH_POINT(err);
			if (cfp->dfp == escape_dfp) {
			    cfp->pc = cfp->iseq->iseq_encoded + entry->cont;
			    th->errinfo = Qnil;
			    goto vm_loop_start;
			}
		    }
		}
	    }
	}
	else if (state == TAG_BREAK && ((VALUE)escape_dfp & ~0x03) == 0) {
	    type = CATCH_TYPE_BREAK;

	  search_restart_point:
	    for (i = 0; i < cfp->iseq->catch_table_size; i++) {
		entry = &cfp->iseq->catch_table[i];

		if (entry->start < epc && entry->end >= epc) {
		    if (entry->type == CATCH_TYPE_ENSURE) {
			catch_iseqval = entry->iseq;
			cont_pc = entry->cont;
			cont_sp = entry->sp;
			break;
		    }
		    else if (entry->type == type) {
			cfp->pc = cfp->iseq->iseq_encoded + entry->cont;
			cfp->sp = cfp->bp + entry->sp;

			if (state != TAG_REDO) {
#if OPT_STACK_CACHING
			    initial = (GET_THROWOBJ_VAL(err));
#else
			    *th->cfp->sp++ = (GET_THROWOBJ_VAL(err));
#endif
			}
			th->errinfo = Qnil;
			goto vm_loop_start;
		    }
		}
	    }
	}
	else if (state == TAG_REDO) {
	    type = CATCH_TYPE_REDO;
	    goto search_restart_point;
	}
	else if (state == TAG_NEXT) {
	    type = CATCH_TYPE_NEXT;
	    goto search_restart_point;
	}
	else {
	    for (i = 0; i < cfp->iseq->catch_table_size; i++) {
		entry = &cfp->iseq->catch_table[i];
		if (entry->start < epc && entry->end >= epc) {

		    if (entry->type == CATCH_TYPE_ENSURE) {
			catch_iseqval = entry->iseq;
			cont_pc = entry->cont;
			cont_sp = entry->sp;
			break;
		    }
		}
	    }
	}

	if (catch_iseqval != 0) {
	    /* found catch table */
	    rb_iseq_t *catch_iseq;

	    /* enter catch scope */
	    GetISeqPtr(catch_iseqval, catch_iseq);
	    cfp->sp = cfp->bp + cont_sp;
	    cfp->pc = cfp->iseq->iseq_encoded + cont_pc;

	    /* push block frame */
	    cfp->sp[0] = err;
	    vm_push_frame(th, catch_iseq, VM_FRAME_MAGIC_BLOCK,
			  cfp->self, (VALUE)cfp->dfp, catch_iseq->iseq_encoded,
			  cfp->sp + 1 /* push value */, cfp->lfp, catch_iseq->local_size - 1);

	    state = 0;
	    th->state = 0;
	    th->errinfo = Qnil;
	    goto vm_loop_start;
	}
	else {
	    /* skip frame */

	    switch (VM_FRAME_TYPE(th->cfp)) {
	      case VM_FRAME_MAGIC_METHOD:
		EXEC_EVENT_HOOK(th, RUBY_EVENT_RETURN, th->cfp->self, 0, 0);
		break;
	      case VM_FRAME_MAGIC_CLASS:
		EXEC_EVENT_HOOK(th, RUBY_EVENT_END, th->cfp->self, 0, 0);
		break;
	    }

	    th->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp);

	    if (VM_FRAME_TYPE(th->cfp) != VM_FRAME_MAGIC_FINISH) {
		goto exception_handler;
	    }
	    else {
		vm_pop_frame(th);
		th->errinfo = err;
		TH_POP_TAG2();
		JUMP_TAG(state);
	    }
	}
    }
  finish_vme:
    TH_POP_TAG();
    return result;
}
Example #28
0
void
rb_thread_mark(void *ptr)
{
    rb_thread_t *th = NULL;
    RUBY_MARK_ENTER("thread");
    if (ptr) {
	th = ptr;
	if (th->stack) {
	    VALUE *p = th->stack;
	    VALUE *sp = th->cfp->sp;
	    rb_control_frame_t *cfp = th->cfp;
	    rb_control_frame_t *limit_cfp = (void *)(th->stack + th->stack_size);

	    while (p < sp) {
		rb_gc_mark(*p++);
	    }
	    rb_gc_mark_locations(p, p + th->mark_stack_len);

	    while (cfp != limit_cfp) {
		rb_iseq_t *iseq = cfp->iseq;
		rb_gc_mark(cfp->proc);
		if (iseq) {
		    rb_gc_mark(RUBY_VM_NORMAL_ISEQ_P(iseq) ? iseq->self : (VALUE)iseq);
		}
		if (cfp->me) ((rb_method_entry_t *)cfp->me)->mark = 1;
		cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
	    }
	}

	/* mark ruby objects */
	RUBY_MARK_UNLESS_NULL(th->first_proc);
	if (th->first_proc) RUBY_MARK_UNLESS_NULL(th->first_args);

	RUBY_MARK_UNLESS_NULL(th->thgroup);
	RUBY_MARK_UNLESS_NULL(th->value);
	RUBY_MARK_UNLESS_NULL(th->errinfo);
	RUBY_MARK_UNLESS_NULL(th->thrown_errinfo);
	RUBY_MARK_UNLESS_NULL(th->local_svar);
	RUBY_MARK_UNLESS_NULL(th->top_self);
	RUBY_MARK_UNLESS_NULL(th->top_wrapper);
	RUBY_MARK_UNLESS_NULL(th->fiber);
	RUBY_MARK_UNLESS_NULL(th->root_fiber);
	RUBY_MARK_UNLESS_NULL(th->stat_insn_usage);
	RUBY_MARK_UNLESS_NULL(th->last_status);

	RUBY_MARK_UNLESS_NULL(th->locking_mutex);

	rb_mark_tbl(th->local_storage);

	if (GET_THREAD() != th && th->machine_stack_start && th->machine_stack_end) {
	    rb_gc_mark_machine_stack(th);
	    rb_gc_mark_locations((VALUE *)&th->machine_regs,
				 (VALUE *)(&th->machine_regs) +
				 sizeof(th->machine_regs) / sizeof(VALUE));
	}

	mark_event_hooks(th->event_hooks);
    }

    RUBY_MARK_LEAVE("thread");
}
Example #29
0
static int profiler_source_location(const char **p_srcfile, long *p_line)
{
#if 0
  VALUE thval = rb_thread_current();
  rb_thread_t *th = DATA_PTR(thval);
  rb_control_frame_t *cfp = th->cfp;
  rb_iseq_t *iseq;
  const char *srcfile;
  long line = -1, i, pc;

  /* find cfp */
  while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
    if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
        break;
    }
    cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
  }
  if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
    return 0;
  }
  iseq = cfp->iseq;

  /* find sourcefile */
  srcfile = RSTRING_PTR(iseq->filename);
  if (!srcfile) {
    return 0;
  }

  /* find line */
  if (iseq->insn_info_size <= 0) {
    return 0;
  }
  pc = cfp->pc - iseq->iseq_encoded;
  for (i = 0; i < iseq->insn_info_size; i++) {
    if (iseq->insn_info_table[i].position == pc) {
      line = iseq->insn_info_table[i - 1].line_no;
      break;
    }
  }
  if (line < 0) {
    line = iseq->insn_info_table[i - 1].line_no;
  }
  if (line < 0) {
    rb_bug("pline_callback_info: should not be reached");
  }
#else
  const char *srcfile;
  long line;

  srcfile = rb_sourcefile();
  if (!srcfile) {
    return 0;
  }

  line = rb_sourceline();
  if (line < 0) {
    return 0;
  }
#endif

  *p_srcfile = srcfile;
  *p_line = line;

  return 1;
}