Ejemplo n.º 1
0
static VALUE
fiber_store(rb_fiber_t *next_fib)
{
    rb_thread_t *th = GET_THREAD();
    rb_fiber_t *fib;

    if (th->fiber) {
	GetFiberPtr(th->fiber, fib);
	fib->cont.saved_thread = *th;
    }
    else {
	/* create current fiber */
	fib = root_fiber_alloc(th);
	th->root_fiber = th->fiber = fib->cont.self;
    }

    cont_save_machine_stack(th, &fib->cont);

    if (ruby_setjmp(fib->cont.jmpbuf)) {
	/* restored */
	GetFiberPtr(th->fiber, fib);
	return fib->cont.value;
    }
    else {
	return Qundef;
    }
}
Ejemplo n.º 2
0
/*
 *  call-seq:
 *     fiber.alive? -> true or false
 *  
 *  Returns true if the fiber can still be resumed (or transferred to).
 *  After finishing execution of the fiber block this method will always
 *  return false.
 */
VALUE
rb_fiber_alive_p(VALUE fibval)
{
    rb_fiber_t *fib;
    GetFiberPtr(fibval, fib);
    return fib->status != TERMINATED;
}
Ejemplo n.º 3
0
VALUE
rb_fiber_resume(VALUE fibval, int argc, VALUE *argv)
{
    rb_fiber_t *fib;
    GetFiberPtr(fibval, fib);

    if (fib->prev != Qnil) {
	rb_raise(rb_eFiberError, "double resume");
    }

    return fiber_switch(fibval, argc, argv, 1);
}
Ejemplo n.º 4
0
static void
fiber_link_join(rb_fiber_t *fib)
{
    VALUE current_fibval = rb_fiber_current();
    rb_fiber_t *current_fib;
    GetFiberPtr(current_fibval, current_fib);

    /* join fiber link */
    fib->next_fiber = current_fib->next_fiber;
    fib->prev_fiber = current_fib;
    current_fib->next_fiber->prev_fiber = fib;
    current_fib->next_fiber = fib;
}
Ejemplo n.º 5
0
static void
cont_restore_thread(rb_context_t *cont)
{
    rb_thread_t *th = GET_THREAD(), *sth = &cont->saved_thread;

    /* restore thread context */
    if (cont->type == CONTINUATION_CONTEXT) {
	/* continuation */
	VALUE fib;

	th->fiber = sth->fiber;
	fib = th->fiber ? th->fiber : th->root_fiber;

	if (fib) {
	    rb_fiber_t *fcont;
	    GetFiberPtr(fib, fcont);
	    th->stack_size = fcont->cont.saved_thread.stack_size;
	    th->stack = fcont->cont.saved_thread.stack;
	}
#ifdef CAPTURE_JUST_VALID_VM_STACK
	MEMCPY(th->stack, cont->vm_stack, VALUE, cont->vm_stack_slen);
	MEMCPY(th->stack + sth->stack_size - cont->vm_stack_clen,
	       cont->vm_stack + cont->vm_stack_slen, VALUE, cont->vm_stack_clen);
#else
	MEMCPY(th->stack, cont->vm_stack, VALUE, sth->stack_size);
#endif
    }
    else {
	/* fiber */
	th->stack = sth->stack;
	th->stack_size = sth->stack_size;
	th->local_storage = sth->local_storage;
	th->fiber = cont->self;
    }

    th->cfp = sth->cfp;
    th->safe_level = sth->safe_level;
    th->raised_flag = sth->raised_flag;
    th->state = sth->state;
    th->status = sth->status;
    th->tag = sth->tag;
    th->protect_tag = sth->protect_tag;
    th->errinfo = sth->errinfo;
    th->first_proc = sth->first_proc;
    th->root_lep = sth->root_lep;
    th->root_svar = sth->root_svar;
}
Ejemplo n.º 6
0
void
rb_fiber_start(void)
{
    rb_thread_t *th = GET_THREAD();
    rb_fiber_t *fib;
    rb_context_t *cont;
    rb_proc_t *proc;
    int state;

    GetFiberPtr(th->fiber, fib);
    cont = &fib->cont;

    TH_PUSH_TAG(th);
    if ((state = EXEC_TAG()) == 0) {
	int argc;
	VALUE *argv, args;
	GetProcPtr(cont->saved_thread.first_proc, proc);
	args = cont->value;
	argv = (argc = cont->argc) > 1 ? RARRAY_PTR(args) : &args;
	cont->value = Qnil;
	th->errinfo = Qnil;
	th->local_lfp = proc->block.lfp;
	th->local_svar = Qnil;

	fib->status = RUNNING;
	cont->value = rb_vm_invoke_proc(th, proc, proc->block.self, argc, argv, 0);
    }
    TH_POP_TAG();

    if (state) {
	if (TAG_RAISE) {
	    th->thrown_errinfo = th->errinfo;
	}
	else {
	    th->thrown_errinfo =
	      rb_vm_make_jump_tag_but_local_jump(state, th->errinfo);
	}
	RUBY_VM_SET_INTERRUPT(th);
    }

    rb_fiber_terminate(fib);
    rb_bug("rb_fiber_start: unreachable");
}
Ejemplo n.º 7
0
static VALUE
return_fiber(void)
{
    rb_fiber_t *fib;
    VALUE curr = rb_fiber_current();
    GetFiberPtr(curr, fib);

    if (fib->prev == Qnil) {
	rb_thread_t *th = GET_THREAD();

	if (th->root_fiber != curr) {
	    return th->root_fiber;
	}
	else {
	    rb_raise(rb_eFiberError, "can't yield from root fiber");
	}
    }
    else {
	VALUE prev = fib->prev;
	fib->prev = Qnil;
	return prev;
    }
}
Ejemplo n.º 8
0
static inline VALUE
fiber_switch(VALUE fibval, int argc, VALUE *argv, int is_resume)
{
    VALUE value;
    rb_fiber_t *fib;
    rb_context_t *cont;
    rb_thread_t *th = GET_THREAD();

    GetFiberPtr(fibval, fib);
    cont = &fib->cont;

    if (cont->saved_thread.self != th->self) {
	rb_raise(rb_eFiberError, "fiber called across threads");
    }
    else if (cont->saved_thread.trap_tag != th->trap_tag) {
	rb_raise(rb_eFiberError, "fiber called across trap");
    }
    else if (fib->status == TERMINATED) {
	rb_raise(rb_eFiberError, "dead fiber called");
    }

    if (is_resume) {
	fib->prev = rb_fiber_current();
    }

    cont->argc = argc;
    cont->value = make_passing_arg(argc, argv);

    if ((value = fiber_store(fib)) == Qundef) {
	cont_restore_0(&fib->cont, &value);
	rb_bug("rb_fiber_resume: unreachable");
    }

    RUBY_VM_CHECK_INTS();

    return value;
}
Ejemplo n.º 9
0
static void
cont_restore_1(rb_context_t *cont)
{
    rb_thread_t *th = GET_THREAD(), *sth = &cont->saved_thread;

    /* restore thread context */
    if (cont->type == CONTINUATION_CONTEXT) {
	/* continuation */
	VALUE fib;

	th->fiber = sth->fiber;
	fib = th->fiber ? th->fiber : th->root_fiber;

	if (fib) {
	    rb_fiber_t *fcont;
	    GetFiberPtr(fib, fcont);
	    th->stack_size = fcont->cont.saved_thread.stack_size;
	    th->stack = fcont->cont.saved_thread.stack;
	}
#ifdef CAPTURE_JUST_VALID_VM_STACK
	MEMCPY(th->stack, cont->vm_stack, VALUE, cont->vm_stack_slen);
	MEMCPY(th->stack + sth->stack_size - cont->vm_stack_clen,
	       cont->vm_stack + cont->vm_stack_slen, VALUE, cont->vm_stack_clen);
#else
	MEMCPY(th->stack, cont->vm_stack, VALUE, sth->stack_size);
#endif
    }
    else {
	/* fiber */
	th->stack = sth->stack;
	th->stack_size = sth->stack_size;
	th->local_storage = sth->local_storage;
	th->fiber = cont->self;
    }

    th->cfp = sth->cfp;
    th->safe_level = sth->safe_level;
    th->raised_flag = sth->raised_flag;
    th->state = sth->state;
    th->status = sth->status;
    th->tag = sth->tag;
    th->protect_tag = sth->protect_tag;
    th->errinfo = sth->errinfo;
    th->first_proc = sth->first_proc;

    /* restore machine stack */
#ifdef _M_AMD64
    {
	/* workaround for x64 SEH */
	jmp_buf buf;
	setjmp(buf);
	((_JUMP_BUFFER*)(&cont->jmpbuf))->Frame =
	    ((_JUMP_BUFFER*)(&buf))->Frame;
    }
#endif
    if (cont->machine_stack_src) {
	FLUSH_REGISTER_WINDOWS;
	MEMCPY(cont->machine_stack_src, cont->machine_stack,
	       VALUE, cont->machine_stack_size);
    }

#ifdef __ia64
    if (cont->machine_register_stack_src) {
	MEMCPY(cont->machine_register_stack_src, cont->machine_register_stack,
	       VALUE, cont->machine_register_stack_size);
    }
#endif

    ruby_longjmp(cont->jmpbuf, 1);
}
Ejemplo n.º 10
0
VALUE rb_RPRuby_Sender_Kernel_internal_backtraceHashForControlFrame(	rb_control_frame_t**		c_current_frame )	{

	const char*		c_method_name		=	NULL;
	int				c_sourcefile_line	=	0;
	
	//	create new hash for this frame
	VALUE	rb_frame_hash			=	rb_hash_new();
		
	VALUE	rb_sourcefile_name		=	Qnil;
	VALUE	rb_sourcefile_line		=	Qnil;
	VALUE	rb_method_name			=	Qnil;
	VALUE	rb_object_for_frame		=	Qnil;
	
	if ( ( *c_current_frame )->iseq != 0 ) {
		
		if ( ( *c_current_frame )->pc != 0 ) {
			
			rb_iseq_t *iseq			= ( *c_current_frame )->iseq;
			
			//	get sourcefile name and set in hash
			rb_sourcefile_name		=	iseq->filename;
			
			//	get sourcefile line and set in hash
			c_sourcefile_line		=	rb_vm_get_sourceline( *c_current_frame );
			rb_sourcefile_line		=	INT2FIX( c_sourcefile_line );
			
			//	get name of instruction sequence
			rb_method_name			=	ID2SYM( rb_intern( StringValuePtr( iseq->name ) ) );				
			rb_object_for_frame	=	( *c_current_frame )->self;
		}
	}
	else if ( RUBYVM_CFUNC_FRAME_P( *c_current_frame ) ) {
		
		//	get name of method

		#if RUBY_PATCHLEVEL >= -1
		//	For 1.9.2:
		const rb_method_entry_t*	c_method_for_frame	=	( *c_current_frame )->me;
		c_method_name				=	rb_id2name( c_method_for_frame->called_id );
		#else
		//	For 1.9.1:
		c_method_name				=	rb_id2name( ( *c_current_frame )->method_id );
		#endif
		
		rb_method_name				=	( c_method_name == NULL ? Qnil : ID2SYM( rb_intern( c_method_name ) ) );
		rb_object_for_frame	=	( *c_current_frame )->self;		
	}
	//	we have to test this case - it works for blocks but there may be other cases too
	else if (	( *c_current_frame )->block_iseq != 0
			 &&	( *c_current_frame )->pc == 0)	{
	
		//	If we got here we have a fiber
		//	There doesn't seem to be much that we can tell about a fiber's context
			
		VALUE			rb_current_fiber	=	rb_fiber_current();
		rb_fiber_t*		c_current_fiber		=	NULL;

		GetFiberPtr(	rb_current_fiber, 
						c_current_fiber);
						
		rb_context_t*	c_context			=	& c_current_fiber->cont;
		
//		rb_block_t*	c_blockptr = RUBY_VM_GET_BLOCK_PTR_IN_CFP( *c_current_frame );
		 
		rb_object_for_frame	=	( *c_current_frame )->self;
		
		//	get sourcefile name and set in hash
		rb_sourcefile_name		=	Qnil;

		//	get sourcefile line and set in hash
		rb_sourcefile_line		=	Qnil;
		
		//	get name of instruction sequence
		rb_method_name			=	rb_str_new2( "<Fiber>" );		
		
		//	if we have a fiber we also include its ruby reference since we have so little other context
		rb_hash_aset(	rb_frame_hash,
						ID2SYM( rb_intern( "fiber" ) ),
						c_context->self );
		
		//	The one time that we know a fiber is in use in the Ruby base is with Enumerators
		//	For now we will handle that with a special case
		
		VALUE	rb_enumerator_class	=	rb_const_get(	rb_cObject,
														rb_intern( "Enumerator" ) );
		
		VALUE	rb_object_for_frame_klass	=	( ( TYPE( rb_object_for_frame ) == T_CLASS ) ? rb_object_for_frame : rb_funcall( rb_object_for_frame, rb_intern( "class" ), 0 ) );

		VALUE	rb_ancestors	=	rb_funcall(	rb_object_for_frame_klass,
												rb_intern( "ancestors" ),
												0 );
		
		if ( rb_ary_includes(	rb_ancestors,
								rb_enumerator_class ) )	{
			
			struct enumerator* c_enumerator		=	enumerator_ptr( rb_object_for_frame );
			
			rb_object_for_frame	=	c_enumerator->obj;
			rb_method_name		=	ID2SYM( c_enumerator->meth );			
		}
		
	}
	else if (	( *c_current_frame )->block_iseq == 0
			 &&	( *c_current_frame )->pc == 0)	{
		//	this happens after we had a fiber and we try to go up - which doesn't make sense with a fiber
		//	not sure what we want to do here, if anything
		return Qnil;
	}
	else {
		//	The third possibility is that we have an iseq frame with nil params for what we want
		//	In that case we can simply return the next frame
		*c_current_frame	=	RUBY_VM_PREVIOUS_CONTROL_FRAME( *c_current_frame );
		
		//	in theory this could crash because we are going forward a frame when we don't know what's there
		//	in practice I think we are ok, since we are only jumping forward from nil frames which should never be at the end
		//	at least - I don't think they should... we shall see.
		//	
		//	a fix would be to check the next frame, but that requires access to the thread or the limit cfp, 
		//	which requires passing more context; so for now, I'm leaving it there
		
		return rb_RPRuby_Sender_Kernel_internal_backtraceHashForControlFrame( c_current_frame );
	}

	//	Push values to return hash

	rb_hash_aset(	rb_frame_hash,
					ID2SYM( rb_intern( "object" ) ),
					rb_object_for_frame );
					
	rb_hash_aset(	rb_frame_hash,
					ID2SYM( rb_intern( "file" ) ),
					rb_sourcefile_name );

	rb_hash_aset(	rb_frame_hash,
					ID2SYM( rb_intern( "line" ) ),
					rb_sourcefile_line );

	rb_hash_aset(	rb_frame_hash,
					ID2SYM( rb_intern( "method" ) ),
					rb_method_name );
	
	return rb_frame_hash;
}