// Allocate a new g, with a stack big enough for stacksize bytes. G* runtime_malg(int32 stacksize, byte** ret_stack, size_t* ret_stacksize) { G *newg; newg = runtime_malloc(sizeof(G)); if(stacksize >= 0) { #if USING_SPLIT_STACK int dont_block_signals = 0; *ret_stack = __splitstack_makecontext(stacksize, &newg->stack_context[0], ret_stacksize); __splitstack_block_signals_context(&newg->stack_context[0], &dont_block_signals, nil); #else *ret_stack = runtime_mallocgc(stacksize, FlagNoProfiling|FlagNoGC, 0, 0); *ret_stacksize = stacksize; newg->gcinitial_sp = *ret_stack; newg->gcstack_size = stacksize; runtime_xadd(&runtime_stacks_sys, stacksize); #endif } return newg; }
G* __go_go(void (*fn)(void*), void* arg) { byte *sp; size_t spsize; G * volatile newg; // volatile to avoid longjmp warning schedlock(); if((newg = gfget()) != nil){ #ifdef USING_SPLIT_STACK int dont_block_signals = 0; sp = __splitstack_resetcontext(&newg->stack_context[0], &spsize); __splitstack_block_signals_context(&newg->stack_context[0], &dont_block_signals, nil); #else sp = newg->gcinitial_sp; spsize = newg->gcstack_size; if(spsize == 0) runtime_throw("bad spsize in __go_go"); newg->gcnext_sp = sp; #endif } else { newg = runtime_malg(StackMin, &sp, &spsize); if(runtime_lastg == nil) runtime_allg = newg; else runtime_lastg->alllink = newg; runtime_lastg = newg; } newg->status = Gwaiting; newg->waitreason = "new goroutine"; newg->entry = (byte*)fn; newg->param = arg; newg->gopc = (uintptr)__builtin_return_address(0); runtime_sched.gcount++; runtime_sched.goidgen++; newg->goid = runtime_sched.goidgen; if(sp == nil) runtime_throw("nil g->stack0"); getcontext(&newg->context); newg->context.uc_stack.ss_sp = sp; #ifdef MAKECONTEXT_STACK_TOP newg->context.uc_stack.ss_sp += spsize; #endif newg->context.uc_stack.ss_size = spsize; makecontext(&newg->context, kickoff, 0); newprocreadylocked(newg); schedunlock(); return newg; //printf(" goid=%d\n", newg->goid); }
void segmented_stack_allocator::allocate( stack_context & ctx, std::size_t size) { void * limit = __splitstack_makecontext( size, ctx.segments_ctx, & ctx.size); BOOST_ASSERT( limit); ctx.sp = static_cast< char * >( limit) + ctx.size; int off = 0; __splitstack_block_signals_context( ctx.segments_ctx, & off, 0); }
void allocate( stack_context & ctx, std::size_t size = traits_type::minimum_size() ) { void * limit = __splitstack_makecontext( size, ctx.segments_ctx, & ctx.size); if ( ! limit) throw std::bad_alloc(); // ctx.size is already filled by __splitstack_makecontext ctx.sp = static_cast< char * >( limit) + ctx.size; int off = 0; __splitstack_block_signals_context( ctx.segments_ctx, & off, 0); }
void* allocate(std::size_t size) const { HPX_ASSERT(default_stacksize() <= size); void* limit = __splitstack_makecontext(size, segments_ctx_, &size); if (!limit) boost::throw_exception(std::bad_alloc()); int off = 0; __splitstack_block_signals_context(segments_ctx_, &off, 0); return static_cast<char *>(limit) + size; }
void stack_allocator_split_segment::allocate(stack_context & ctx, std::size_t size) { void* start_ptr = __splitstack_makecontext( size, ctx.segments_ctx, &ctx.size); assert(start_ptr); if (!start_ptr) { ctx.sp = NULL; return; } ctx.sp = static_cast<char *>(start_ptr) + ctx.size; // stack down int off = 0; __splitstack_block_signals_context(ctx.segments_ctx, &off, 0); }