intptr_t coroutine_context::jump( coroutine_context & other, intptr_t param, bool preserve_fpu) { #if defined(BOOST_USE_SEGMENTED_STACKS) if ( stack_ctx_) __splitstack_getcontext( stack_ctx_->segments_ctx); if ( other.stack_ctx_) __splitstack_setcontext( other.stack_ctx_->segments_ctx); #endif return context::jump_fcontext( ctx_, other.ctx_, param, preserve_fpu); #if defined(BOOST_USE_SEGMENTED_STACKS) if ( stack_ctx_) __splitstack_setcontext( stack_ctx_->segments_ctx); #endif }
static void runtime_mcall(void (*pfn)(G*)) { #ifndef USING_SPLIT_STACK int i; #endif // Ensure that all registers are on the stack for the garbage // collector. __builtin_unwind_init(); if(g == m->g0) runtime_throw("runtime: mcall called on m->g0 stack"); if(g != nil) { #ifdef USING_SPLIT_STACK __splitstack_getcontext(&g->stack_context[0]); #else g->gcnext_sp = &i; #endif g->fromgogo = false; getcontext(&g->context); } if (g == nil || !g->fromgogo) { #ifdef USING_SPLIT_STACK __splitstack_setcontext(&m->g0->stack_context[0]); #endif m->g0->entry = (byte*)pfn; m->g0->param = g; g = m->g0; setcontext(&m->g0->context); runtime_throw("runtime: mcall function returned"); } }
intptr_t coroutine_context::jump( coroutine_context & other, intptr_t param, bool preserve_fpu) { #if defined(BOOST_USE_SEGMENTED_STACKS) __splitstack_getcontext( palloc_.sctx.segments_ctx); __splitstack_setcontext( other.palloc_.sctx.segments_ctx); intptr_t ret = context::jump_fcontext( & ctx_, other.ctx_, param, preserve_fpu); __splitstack_setcontext( palloc_.sctx.segments_ctx); return ret; #else return context::jump_fcontext( & ctx_, other.ctx_, param, preserve_fpu); #endif }
static void runtime_mcall(void (*pfn)(G*)) { M *mp; G *gp; #ifndef USING_SPLIT_STACK int i; #endif // Ensure that all registers are on the stack for the garbage // collector. __builtin_unwind_init(); mp = m; gp = g; if(gp == mp->g0) runtime_throw("runtime: mcall called on m->g0 stack"); if(gp != nil) { #ifdef USING_SPLIT_STACK __splitstack_getcontext(&g->stack_context[0]); #else gp->gcnext_sp = &i; #endif gp->fromgogo = false; getcontext(&gp->context); // When we return from getcontext, we may be running // in a new thread. That means that m and g may have // changed. They are global variables so we will // reload them, but the addresses of m and g may be // cached in our local stack frame, and those // addresses may be wrong. Call functions to reload // the values for this thread. mp = runtime_m(); gp = runtime_g(); if(gp->traceback != nil) gtraceback(gp); } if (gp == nil || !gp->fromgogo) { #ifdef USING_SPLIT_STACK __splitstack_setcontext(&mp->g0->stack_context[0]); #endif mp->g0->entry = (byte*)pfn; mp->g0->param = gp; // It's OK to set g directly here because this case // can not occur if we got here via a setcontext to // the getcontext call just above. g = mp->g0; fixcontext(&mp->g0->context); setcontext(&mp->g0->context); runtime_throw("runtime: mcall function returned"); } }
friend void swap_context(fcontext_context_impl& from, fcontext_context_impl const& to, detail::default_hint) { #if defined(HPX_GENERIC_CONTEXT_USE_SEGMENTED_STACKS) __splitstack_getcontext(from.alloc_.segments_ctx_); __splitstack_setcontext(to.alloc_.segments_ctx); #endif // switch to other coroutine context #if BOOST_VERSION < 105600 boost::context::jump_fcontext(&from.ctx_, &to.ctx_, to.cb_, false); #else boost::context::jump_fcontext(&from.ctx_, to.ctx_, to.cb_, false); #endif #if defined(HPX_GENERIC_CONTEXT_USE_SEGMENTED_STACKS) __splitstack_setcontext(from.alloc_.segments_ctx); #endif }
static void runtime_gogo(G* newg) { #ifdef USING_SPLIT_STACK __splitstack_setcontext(&newg->stack_context[0]); #endif g = newg; newg->fromgogo = true; setcontext(&newg->context); }
static void runtime_gogo(G* newg) { #ifdef USING_SPLIT_STACK __splitstack_setcontext(&newg->stack_context[0]); #endif g = newg; newg->fromgogo = true; fixcontext(&newg->context); setcontext(&newg->context); runtime_throw("gogo setcontext returned"); }
intptr_t coroutine_context_base::jump_to( fcontext::fcontext_t& from_fcontext, fcontext::fcontext_t& to_fcontext, stack_context& from_stack, stack_context& to_stack, intptr_t param, bool preserve_fpu) { #ifdef COPP_MACRO_USE_SEGMENTED_STACKS assert(&from_stack != &to_stack); __splitstack_getcontext(from_stack.segments_ctx); __splitstack_setcontext(to_stack.segments_ctx); #endif intptr_t ret = copp::fcontext::copp_jump_fcontext(&from_fcontext, to_fcontext, param, preserve_fpu); return ret; }
void * coroutine_context::jump( coroutine_context & other, void * param) { #if defined(BOOST_USE_SEGMENTED_STACKS) __splitstack_getcontext( palloc_.sctx.segments_ctx); __splitstack_setcontext( other.palloc_.sctx.segments_ctx); #endif data_t data = { this, param }; context::detail::transfer_t t = context::detail::jump_fcontext( other.ctx_, & data); data_t * ret = static_cast< data_t * >( t.data); ret->from->ctx_ = t.fctx; return ret->data; }