static VALUE rb_cont_call(int argc, VALUE *argv, VALUE contval) { rb_context_t *cont; rb_thread_t *th = GET_THREAD(); GetContPtr(contval, cont); if (cont->saved_thread.self != th->self) { rb_raise(rb_eRuntimeError, "continuation called across threads"); } if (cont->saved_thread.trap_tag != th->trap_tag) { rb_raise(rb_eRuntimeError, "continuation called across trap"); } if (cont->saved_thread.fiber) { rb_context_t *fcont; GetContPtr(cont->saved_thread.fiber, fcont); if (th->fiber != cont->saved_thread.fiber) { rb_raise(rb_eRuntimeError, "continuation called across fiber"); } } cont->argc = argc; cont->value = make_passing_arg(argc, argv); cont_restore_0(cont, &contval); return Qnil; /* unreachable */ }
static VALUE fiber_store(rb_context_t *next_cont) { rb_thread_t *th = GET_THREAD(); rb_context_t *cont; if (th->fiber) { GetContPtr(th->fiber, cont); cont->saved_thread = *th; } else { /* create current fiber */ cont = fiber_alloc(rb_cFiber); /* no need to allocate vm stack */ cont->type = ROOT_FIBER_CONTEXT; th->root_fiber = th->fiber = cont->self; } cont_save_machine_stack(th, cont); if (ruby_setjmp(cont->jmpbuf)) { /* restored */ GetContPtr(th->fiber, cont); return cont->value; } else { return Qundef; } }
VALUE rb_fiber_alive_p(VALUE fib) { rb_context_t *cont; GetContPtr(fib, cont); return cont->alive; }
static inline VALUE fiber_switch(VALUE fib, int argc, VALUE *argv, int is_resume) { VALUE value; rb_context_t *cont; rb_thread_t *th = GET_THREAD(); GetContPtr(fib, cont); if (cont->saved_thread.self != th->self) { rb_raise(rb_eFiberError, "fiber called across threads"); } else if (cont->saved_thread.trap_tag != th->trap_tag) { rb_raise(rb_eFiberError, "fiber called across trap"); } else if (!cont->alive) { rb_raise(rb_eFiberError, "dead fiber called"); } if (is_resume) { cont->prev = rb_fiber_current(); } cont->value = make_passing_arg(argc, argv); if ((value = fiber_store(cont)) == Qundef) { cont_restore_0(cont, &value); rb_bug("rb_fiber_resume: unreachable"); } RUBY_VM_CHECK_INTS(); return value; }
VALUE rb_fiber_resume(VALUE fib, int argc, VALUE *argv) { rb_context_t *cont; GetContPtr(fib, cont); if (cont->prev != Qnil) { rb_raise(rb_eFiberError, "double resume"); } return fiber_switch(fib, argc, argv, 1); }
static VALUE return_fiber(void) { rb_context_t *cont; VALUE curr = rb_fiber_current(); GetContPtr(curr, cont); if (cont->prev == Qnil) { rb_thread_t *th = GET_THREAD(); if (th->root_fiber != curr) { return th->root_fiber; } else { rb_raise(rb_eFiberError, "can't yield from root fiber"); } } else { VALUE prev = cont->prev; cont->prev = Qnil; return prev; } }
void rb_fiber_start(void) { rb_thread_t *th = GET_THREAD(); rb_context_t *cont; rb_proc_t *proc; VALUE args; int state; GetContPtr(th->fiber, cont); TH_PUSH_TAG(th); if ((state = EXEC_TAG()) == 0) { GetProcPtr(cont->saved_thread.first_proc, proc); args = cont->value; cont->value = Qnil; th->errinfo = Qnil; th->local_lfp = proc->block.lfp; th->local_svar = Qnil; cont->value = vm_invoke_proc(th, proc, proc->block.self, 1, &args, 0); } TH_POP_TAG(); if (state) { if (TAG_RAISE) { th->thrown_errinfo = th->errinfo; } else { th->thrown_errinfo = vm_make_jump_tag_but_local_jump(state, th->errinfo); } RUBY_VM_SET_INTERRUPT(th); } rb_fiber_terminate(cont); rb_bug("rb_fiber_start: unreachable"); }
static void cont_restore_1(rb_context_t *cont) { rb_thread_t *th = GET_THREAD(), *sth = &cont->saved_thread; /* restore thread context */ if (cont->type == CONTINUATION_CONTEXT) { /* continuation */ VALUE fib; th->fiber = sth->fiber; fib = th->fiber ? th->fiber : th->root_fiber; if (fib) { rb_context_t *fcont; GetContPtr(fib, fcont); th->stack_size = fcont->saved_thread.stack_size; th->stack = fcont->saved_thread.stack; } #ifdef CAPTURE_JUST_VALID_VM_STACK MEMCPY(th->stack, cont->vm_stack, VALUE, cont->vm_stack_slen); MEMCPY(th->stack + sth->stack_size - cont->vm_stack_clen, cont->vm_stack + cont->vm_stack_slen, VALUE, cont->vm_stack_clen); #elif MEMCPY(th->stack, cont->vm_stack, VALUE, sth->stack_size); #endif } else { /* fiber */ th->stack = sth->stack; th->stack_size = sth->stack_size; th->local_storage = sth->local_storage; th->fiber = cont->self; } th->cfp = sth->cfp; th->safe_level = sth->safe_level; th->raised_flag = sth->raised_flag; th->state = sth->state; th->status = sth->status; th->tag = sth->tag; th->trap_tag = sth->trap_tag; th->errinfo = sth->errinfo; th->first_proc = sth->first_proc; /* restore machine stack */ #ifdef _M_AMD64 { /* workaround for x64 SEH */ jmp_buf buf; setjmp(buf); ((_JUMP_BUFFER*)(&cont->jmpbuf))->Frame = ((_JUMP_BUFFER*)(&buf))->Frame; } #endif if (cont->machine_stack_src) { FLUSH_REGISTER_WINDOWS; MEMCPY(cont->machine_stack_src, cont->machine_stack, VALUE, cont->machine_stack_size); } #ifdef __ia64 if (cont->machine_register_stack_src) { MEMCPY(cont->machine_register_stack_src, cont->machine_register_stack, VALUE, cont->machine_register_stack_size); } #endif ruby_longjmp(cont->jmpbuf, 1); }