void __go_undefer (_Bool *frame) { G *g; g = runtime_g (); while (g->defer != NULL && g->defer->__frame == frame) { struct __go_defer_stack *d; void (*pfn) (void *); d = g->defer; pfn = d->__pfn; d->__pfn = NULL; if (pfn != NULL) (*pfn) (d->__arg); g->defer = d->__next; __go_free (d); /* Since we are executing a defer function here, we know we are returning from the calling function. If the calling function, or one of its callees, paniced, then the defer functions would be executed by __go_panic. */ *frame = 1; } }
void syscall_cgocallback () { M *mp; mp = runtime_m (); if (mp == NULL) { runtime_needm (); mp = runtime_m (); mp->dropextram = true; } runtime_exitsyscall (); if (runtime_g ()->ncgo == 0) { /* The C call to Go came from a thread not currently running any Go. In the case of -buildmode=c-archive or c-shared, this call may be coming in before package initialization is complete. Wait until it is. */ __go_receive (NULL, runtime_main_init_done, NULL); } mp = runtime_m (); if (mp->needextram) { mp->needextram = 0; runtime_newextram (); } }
static void runtime_mcall(void (*pfn)(G*)) { M *mp; G *gp; #ifndef USING_SPLIT_STACK int i; #endif // Ensure that all registers are on the stack for the garbage // collector. __builtin_unwind_init(); mp = m; gp = g; if(gp == mp->g0) runtime_throw("runtime: mcall called on m->g0 stack"); if(gp != nil) { #ifdef USING_SPLIT_STACK __splitstack_getcontext(&g->stack_context[0]); #else gp->gcnext_sp = &i; #endif gp->fromgogo = false; getcontext(&gp->context); // When we return from getcontext, we may be running // in a new thread. That means that m and g may have // changed. They are global variables so we will // reload them, but the addresses of m and g may be // cached in our local stack frame, and those // addresses may be wrong. Call functions to reload // the values for this thread. mp = runtime_m(); gp = runtime_g(); if(gp->traceback != nil) gtraceback(gp); } if (gp == nil || !gp->fromgogo) { #ifdef USING_SPLIT_STACK __splitstack_setcontext(&mp->g0->stack_context[0]); #endif mp->g0->entry = (byte*)pfn; mp->g0->param = gp; // It's OK to set g directly here because this case // can not occur if we got here via a setcontext to // the getcontext call just above. g = mp->g0; fixcontext(&mp->g0->context); setcontext(&mp->g0->context); runtime_throw("runtime: mcall function returned"); } }
_Bool __go_set_defer_retaddr (void *retaddr) { G *g; g = runtime_g (); if (g->defer != NULL) g->defer->__retaddr = retaddr; return 0; }
void syscall_cgocall () { M* m; G* g; m = runtime_m (); ++m->ncgocall; g = runtime_g (); ++g->ncgo; runtime_entersyscall (); }
void runtime_startpanic(void) { M *m; m = runtime_m(); if(runtime_mheap.cachealloc.size == 0) { // very early runtime_printf("runtime: panic before malloc heap initialized\n"); m->mallocing = 1; // tell rest of panic not to try to malloc } else if(m->mcache == nil) // can happen if called from signal handler or throw m->mcache = runtime_allocmcache(); switch(m->dying) { case 0: m->dying = 1; if(runtime_g() != nil) runtime_g()->writebuf = nil; runtime_xadd(&runtime_panicking, 1); runtime_lock(&paniclk); if(runtime_debug.schedtrace > 0 || runtime_debug.scheddetail > 0) runtime_schedtrace(true); runtime_freezetheworld(); return; case 1: // Something failed while panicing, probably the print of the // argument to panic(). Just print a stack trace and exit. m->dying = 2; runtime_printf("panic during panic\n"); runtime_dopanic(0); runtime_exit(3); case 2: // This is a genuine bug in the runtime, we couldn't even // print the stack trace successfully. m->dying = 3; runtime_printf("stack trace unavailable\n"); runtime_exit(4); default: // Can't even print! Just exit. runtime_exit(5); } }
void syscall_cgocallbackdone () { M *mp; runtime_entersyscall (); mp = runtime_m (); if (mp->dropextram && runtime_g ()->ncgo == 0) { mp->dropextram = false; runtime_dropm (); } }
void syscall_cgocall () { M* m; G* g; if (runtime_needextram && runtime_cas (&runtime_needextram, 1, 0)) runtime_newextram (); m = runtime_m (); ++m->ncgocall; g = runtime_g (); ++g->ncgo; runtime_entersyscall (); }
void __go_defer (_Bool *frame, void (*pfn) (void *), void *arg) { G *g; struct __go_defer_stack *n; g = runtime_g (); n = (struct __go_defer_stack *) __go_alloc (sizeof (struct __go_defer_stack)); n->__next = g->defer; n->__frame = frame; n->__panic = g->panic; n->__pfn = pfn; n->__arg = arg; n->__retaddr = NULL; g->defer = n; }
struct __go_empty_interface __go_deferred_recover () { G *g; g = runtime_g (); if (g->defer == NULL || g->defer->__panic != g->panic) { struct __go_empty_interface ret; ret.__type_descriptor = NULL; ret.__object = NULL; return ret; } return __go_recover (); }
void * alloc_saved (size_t n) { void *ret; G *g; CgoMal *c; ret = __go_alloc (n); g = runtime_g (); c = (CgoMal *) __go_alloc (sizeof (CgoMal)); c->next = g->cgomal; c->alloc = ret; g->cgomal = c; return ret; }
int32 runtime_snprintf(byte *buf, int32 n, const char *s, ...) { G *g = runtime_g(); va_list va; int32 m; g->writebuf = buf; g->writenbuf = n-1; va_start(va, s); go_vprintf(s, va); va_end(va); *g->writebuf = '\0'; m = g->writebuf - buf; g->writenbuf = 0; g->writebuf = nil; return m; }
static void __go_rundefer(void) { G *g; Defer *d; g = runtime_g(); while((d = g->defer) != nil) { void (*pfn)(void*); g->defer = d->__next; pfn = d->__pfn; d->__pfn = nil; if (pfn != nil) (*pfn)(d->__arg); runtime_freedefer(d); } }
void runtime_dopanic(int32 unused __attribute__ ((unused))) { G *g; static bool didothers; bool crash; int32 t; g = runtime_g(); if(g->sig != 0) runtime_printf("[signal %x code=%p addr=%p]\n", g->sig, (void*)g->sigcode0, (void*)g->sigcode1); if((t = runtime_gotraceback(&crash)) > 0){ if(g != runtime_m()->g0) { runtime_printf("\n"); runtime_goroutineheader(g); runtime_traceback(); runtime_printcreatedby(g); } else if(t >= 2 || runtime_m()->throwing > 0) { runtime_printf("\nruntime stack:\n"); runtime_traceback(); } if(!didothers) { didothers = true; runtime_tracebackothers(g); } } runtime_unlock(&paniclk); if(runtime_xadd(&runtime_panicking, -1) != 0) { // Some other m is panicking too. // Let it print what it needs to print. // Wait forever without chewing up cpu. // It will exit when it's done. static Lock deadlock; runtime_lock(&deadlock); runtime_lock(&deadlock); } if(crash) runtime_crash(); runtime_exit(2); }
int32 runtime_snprintf(byte *buf, int32 n, const char *s, ...) { G *g = runtime_g(); va_list va; int32 m; g->writebuf.__values = buf; g->writebuf.__count = 0; g->writebuf.__capacity = n-1; va_start(va, s); go_vprintf(s, va); va_end(va); m = g->writebuf.__count; ((byte*)g->writebuf.__values)[m] = '\0'; g->writebuf.__values = nil; g->writebuf.__count = 0; g->writebuf.__capacity = 0; return m; }
void syscall_cgocalldone () { G* g; g = runtime_g (); __go_assert (g != NULL); --g->ncgo; if (g->ncgo == 0) { /* We are going back to Go, and we are not in a recursive call. Let the garbage collector clean up any unreferenced memory. */ g->cgomal = NULL; } /* If we are invoked because the C function called _cgo_panic, then _cgo_panic will already have exited syscall mode. */ if (g->status == Gsyscall) runtime_exitsyscall (); }
void __go_unwind_stack () { struct _Unwind_Exception *hdr; hdr = ((struct _Unwind_Exception *) __go_alloc (sizeof (struct _Unwind_Exception))); __builtin_memcpy (&hdr->exception_class, &__go_exception_class, sizeof hdr->exception_class); hdr->exception_cleanup = NULL; runtime_g ()->exception = hdr; #ifdef __USING_SJLJ_EXCEPTIONS__ _Unwind_SjLj_RaiseException (hdr); #else _Unwind_RaiseException (hdr); #endif /* Raising an exception should not return. */ abort (); }
// write to goroutine-local buffer if diverting output, // or else standard error. static void gwrite(const void *v, intgo n) { G* g = runtime_g(); if(g == nil || g->writebuf == nil) { // Avoid -D_FORTIFY_SOURCE problems. int rv __attribute__((unused)); rv = runtime_write(2, v, n); return; } if(g->writenbuf == 0) return; if(n > g->writenbuf) n = g->writenbuf; runtime_memmove(g->writebuf, v, n); g->writebuf += n; g->writenbuf -= n; }
void __go_undefer (_Bool *frame) { G *g; g = runtime_g (); while (g->defer != NULL && g->defer->__frame == frame) { struct __go_defer_stack *d; void (*pfn) (void *); M *m; d = g->defer; pfn = d->__pfn; d->__pfn = NULL; if (pfn != NULL) (*pfn) (d->__arg); g->defer = d->__next; /* This may be called by a cgo callback routine to defer the call to syscall.CgocallBackDone, in which case we will not have a memory context. Don't try to free anything in that case--the GC will release it later. */ m = runtime_m (); if (m != NULL && m->mcache != NULL && d->__free) __go_free (d); /* Since we are executing a defer function here, we know we are returning from the calling function. If the calling function, or one of its callees, paniced, then the defer functions would be executed by __go_panic. */ *frame = 1; } }
void __go_panic (struct __go_empty_interface arg) { G *g; Panic *n; g = runtime_g (); n = (Panic *) __go_alloc (sizeof (Panic)); n->arg = arg; n->next = g->_panic; g->_panic = n; /* Run all the defer functions. */ while (1) { Defer *d; void (*pfn) (void *); d = g->_defer; if (d == NULL) break; pfn = (void (*) (void *)) d->pfn; d->pfn = 0; if (pfn != NULL) { (*pfn) (d->arg); if (n->recovered) { /* Some defer function called recover. That means that we should stop running this panic. */ g->_panic = n->next; __go_free (n); /* Now unwind the stack by throwing an exception. The compiler has arranged to create exception handlers in each function which uses a defer statement. These exception handlers will check whether the entry on the top of the defer stack is from the current function. If it is, we have unwound the stack far enough. */ __go_unwind_stack (); /* __go_unwind_stack should not return. */ abort (); } /* Because we executed that defer function by a panic, and it did not call recover, we know that we are not returning from the calling function--we are panicing through it. */ *d->frame = 0; } g->_defer = d->next; /* This may be called by a cgo callback routine to defer the call to syscall.CgocallBackDone, in which case we will not have a memory context. Don't try to free anything in that case--the GC will release it later. */ if (runtime_m () != NULL) runtime_freedefer (d); } /* The panic was not recovered. */ runtime_startpanic (); __printpanics (g->_panic); runtime_dopanic (0); }
/* * generic single channel send/recv * if the bool pointer is nil, * then the full exchange will * occur. if pres is not nil, * then the protocol will not * sleep but return if it could * not complete. * * sleep can wake up with g->param == nil * when a channel involved in the sleep has * been closed. it is easiest to loop and re-run * the operation; we'll see that it's now closed. */ static bool chansend(ChanType *t, Hchan *c, byte *ep, bool block, void *pc) { SudoG *sg; SudoG mysg; G* gp; int64 t0; G* g; g = runtime_g(); if(raceenabled) runtime_racereadobjectpc(ep, t->__element_type, runtime_getcallerpc(&t), chansend); if(c == nil) { USED(t); if(!block) return false; runtime_park(nil, nil, "chan send (nil chan)"); return false; // not reached } if(runtime_gcwaiting()) runtime_gosched(); if(debug) { runtime_printf("chansend: chan=%p\n", c); } t0 = 0; mysg.releasetime = 0; if(runtime_blockprofilerate > 0) { t0 = runtime_cputicks(); mysg.releasetime = -1; } runtime_lock(c); if(raceenabled) runtime_racereadpc(c, pc, chansend); if(c->closed) goto closed; if(c->dataqsiz > 0) goto asynch; sg = dequeue(&c->recvq); if(sg != nil) { if(raceenabled) racesync(c, sg); runtime_unlock(c); gp = sg->g; gp->param = sg; if(sg->elem != nil) runtime_memmove(sg->elem, ep, c->elemsize); if(sg->releasetime) sg->releasetime = runtime_cputicks(); runtime_ready(gp); return true; } if(!block) { runtime_unlock(c); return false; } mysg.elem = ep; mysg.g = g; mysg.selectdone = nil; g->param = nil; enqueue(&c->sendq, &mysg); runtime_parkunlock(c, "chan send"); if(g->param == nil) { runtime_lock(c); if(!c->closed) runtime_throw("chansend: spurious wakeup"); goto closed; } if(mysg.releasetime > 0) runtime_blockevent(mysg.releasetime - t0, 2); return true; asynch: if(c->closed) goto closed; if(c->qcount >= c->dataqsiz) { if(!block) { runtime_unlock(c); return false; } mysg.g = g; mysg.elem = nil; mysg.selectdone = nil; enqueue(&c->sendq, &mysg); runtime_parkunlock(c, "chan send"); runtime_lock(c); goto asynch; } if(raceenabled) runtime_racerelease(chanbuf(c, c->sendx)); runtime_memmove(chanbuf(c, c->sendx), ep, c->elemsize); if(++c->sendx == c->dataqsiz) c->sendx = 0; c->qcount++; sg = dequeue(&c->recvq); if(sg != nil) { gp = sg->g; runtime_unlock(c); if(sg->releasetime) sg->releasetime = runtime_cputicks(); runtime_ready(gp); } else runtime_unlock(c); if(mysg.releasetime > 0) runtime_blockevent(mysg.releasetime - t0, 2); return true; closed: runtime_unlock(c); runtime_panicstring("send on closed channel"); return false; // not reached }
static bool chanrecv(ChanType *t, Hchan* c, byte *ep, bool block, bool *received) { SudoG *sg; SudoG mysg; G *gp; int64 t0; G *g; if(runtime_gcwaiting()) runtime_gosched(); // raceenabled: don't need to check ep, as it is always on the stack. if(debug) runtime_printf("chanrecv: chan=%p\n", c); g = runtime_g(); if(c == nil) { USED(t); if(!block) return false; runtime_park(nil, nil, "chan receive (nil chan)"); return false; // not reached } t0 = 0; mysg.releasetime = 0; if(runtime_blockprofilerate > 0) { t0 = runtime_cputicks(); mysg.releasetime = -1; } runtime_lock(c); if(c->dataqsiz > 0) goto asynch; if(c->closed) goto closed; sg = dequeue(&c->sendq); if(sg != nil) { if(raceenabled) racesync(c, sg); runtime_unlock(c); if(ep != nil) runtime_memmove(ep, sg->elem, c->elemsize); gp = sg->g; gp->param = sg; if(sg->releasetime) sg->releasetime = runtime_cputicks(); runtime_ready(gp); if(received != nil) *received = true; return true; } if(!block) { runtime_unlock(c); return false; } mysg.elem = ep; mysg.g = g; mysg.selectdone = nil; g->param = nil; enqueue(&c->recvq, &mysg); runtime_parkunlock(c, "chan receive"); if(g->param == nil) { runtime_lock(c); if(!c->closed) runtime_throw("chanrecv: spurious wakeup"); goto closed; } if(received != nil) *received = true; if(mysg.releasetime > 0) runtime_blockevent(mysg.releasetime - t0, 2); return true; asynch: if(c->qcount <= 0) { if(c->closed) goto closed; if(!block) { runtime_unlock(c); if(received != nil) *received = false; return false; } mysg.g = g; mysg.elem = nil; mysg.selectdone = nil; enqueue(&c->recvq, &mysg); runtime_parkunlock(c, "chan receive"); runtime_lock(c); goto asynch; } if(raceenabled) runtime_raceacquire(chanbuf(c, c->recvx)); if(ep != nil) runtime_memmove(ep, chanbuf(c, c->recvx), c->elemsize); runtime_memclr(chanbuf(c, c->recvx), c->elemsize); if(++c->recvx == c->dataqsiz) c->recvx = 0; c->qcount--; sg = dequeue(&c->sendq); if(sg != nil) { gp = sg->g; runtime_unlock(c); if(sg->releasetime) sg->releasetime = runtime_cputicks(); runtime_ready(gp); } else runtime_unlock(c); if(received != nil) *received = true; if(mysg.releasetime > 0) runtime_blockevent(mysg.releasetime - t0, 2); return true; closed: if(ep != nil) runtime_memclr(ep, c->elemsize); if(received != nil) *received = false; if(raceenabled) runtime_raceacquire(c); runtime_unlock(c); if(mysg.releasetime > 0) runtime_blockevent(mysg.releasetime - t0, 2); return true; }
void __go_check_defer (_Bool *frame) { G *g; struct _Unwind_Exception *hdr; g = runtime_g (); if (g == NULL) { /* Some other language has thrown an exception. We know there are no defer handlers, so there is nothing to do. */ } else if (g->isforeign) { Panic *n; _Bool recovered; /* Some other language has thrown an exception. We need to run the local defer handlers. If they call recover, we stop unwinding the stack here. */ n = (Panic *) __go_alloc (sizeof (Panic)); n->arg.__type_descriptor = NULL; n->arg.__object = NULL; n->recovered = 0; n->isforeign = 1; n->next = g->_panic; g->_panic = n; while (1) { Defer *d; void (*pfn) (void *); d = g->_defer; if (d == NULL || d->frame != frame || d->pfn == 0) break; pfn = (void (*) (void *)) d->pfn; g->_defer = d->next; (*pfn) (d->arg); if (runtime_m () != NULL) runtime_freedefer (d); if (n->recovered) { /* The recover function caught the panic thrown by some other language. */ break; } } recovered = n->recovered; g->_panic = n->next; __go_free (n); if (recovered) { /* Just return and continue executing Go code. */ *frame = 1; return; } /* We are panicing through this function. */ *frame = 0; } else if (g->_defer != NULL && g->_defer->pfn == 0 && g->_defer->frame == frame) { Defer *d; /* This is the defer function which called recover. Simply return to stop the stack unwind, and let the Go code continue to execute. */ d = g->_defer; g->_defer = d->next; if (runtime_m () != NULL) runtime_freedefer (d); /* We are returning from this function. */ *frame = 1; return; } /* This is some other defer function. It was already run by the call to panic, or just above. Rethrow the exception. */ hdr = (struct _Unwind_Exception *) g->exception; #ifdef __USING_SJLJ_EXCEPTIONS__ _Unwind_SjLj_Resume_or_Rethrow (hdr); #else #if defined(_LIBUNWIND_STD_ABI) _Unwind_RaiseException (hdr); #else _Unwind_Resume_or_Rethrow (hdr); #endif #endif /* Rethrowing the exception should not return. */ abort(); }
_Unwind_Reason_Code PERSONALITY_FUNCTION (int version, _Unwind_Action actions, _Unwind_Exception_Class exception_class, struct _Unwind_Exception *ue_header, struct _Unwind_Context *context) #endif { lsda_header_info info; const unsigned char *language_specific_data, *p, *action_record; _Unwind_Ptr landing_pad, ip; int ip_before_insn = 0; _Bool is_foreign; G *g; #ifdef __ARM_EABI_UNWINDER__ _Unwind_Action actions; switch (state & _US_ACTION_MASK) { case _US_VIRTUAL_UNWIND_FRAME: actions = _UA_SEARCH_PHASE; break; case _US_UNWIND_FRAME_STARTING: actions = _UA_CLEANUP_PHASE; if (!(state & _US_FORCE_UNWIND) && ue_header->barrier_cache.sp == _Unwind_GetGR(context, 13)) actions |= _UA_HANDLER_FRAME; break; case _US_UNWIND_FRAME_RESUME: CONTINUE_UNWINDING; break; default: abort(); } actions |= state & _US_FORCE_UNWIND; is_foreign = 0; /* The dwarf unwinder assumes the context structure holds things like the function and LSDA pointers. The ARM implementation caches these in the exception header (UCB). To avoid rewriting everything we make the virtual IP register point at the UCB. */ ip = (_Unwind_Ptr) ue_header; _Unwind_SetGR (context, 12, ip); #else if (version != 1) return _URC_FATAL_PHASE1_ERROR; is_foreign = exception_class != __go_exception_class; #endif language_specific_data = (const unsigned char *) _Unwind_GetLanguageSpecificData (context); /* If no LSDA, then there are no handlers or cleanups. */ if (! language_specific_data) CONTINUE_UNWINDING; /* Parse the LSDA header. */ p = parse_lsda_header (context, language_specific_data, &info); #ifdef HAVE_GETIPINFO ip = _Unwind_GetIPInfo (context, &ip_before_insn); #else ip = _Unwind_GetIP (context); #endif if (! ip_before_insn) --ip; landing_pad = 0; action_record = NULL; #ifdef __USING_SJLJ_EXCEPTIONS__ /* The given "IP" is an index into the call-site table, with two exceptions -- -1 means no-action, and 0 means terminate. But since we're using uleb128 values, we've not got random access to the array. */ if ((int) ip <= 0) return _URC_CONTINUE_UNWIND; else { _uleb128_t cs_lp, cs_action; do { p = read_uleb128 (p, &cs_lp); p = read_uleb128 (p, &cs_action); } while (--ip); /* Can never have null landing pad for sjlj -- that would have been indicated by a -1 call site index. */ landing_pad = (_Unwind_Ptr)cs_lp + 1; if (cs_action) action_record = info.action_table + cs_action - 1; goto found_something; } #else /* Search the call-site table for the action associated with this IP. */ while (p < info.action_table) { _Unwind_Ptr cs_start, cs_len, cs_lp; _uleb128_t cs_action; /* Note that all call-site encodings are "absolute" displacements. */ p = read_encoded_value (0, info.call_site_encoding, p, &cs_start); p = read_encoded_value (0, info.call_site_encoding, p, &cs_len); p = read_encoded_value (0, info.call_site_encoding, p, &cs_lp); p = read_uleb128 (p, &cs_action); /* The table is sorted, so if we've passed the ip, stop. */ if (ip < info.Start + cs_start) p = info.action_table; else if (ip < info.Start + cs_start + cs_len) { if (cs_lp) landing_pad = info.LPStart + cs_lp; if (cs_action) action_record = info.action_table + cs_action - 1; goto found_something; } } #endif /* IP is not in table. No associated cleanups. */ CONTINUE_UNWINDING; found_something: if (landing_pad == 0) { /* IP is present, but has a null landing pad. No handler to be run. */ CONTINUE_UNWINDING; } if (actions & _UA_SEARCH_PHASE) { if (action_record == 0) { /* This indicates a cleanup rather than an exception handler. */ CONTINUE_UNWINDING; } return _URC_HANDLER_FOUND; } /* It's possible for g to be NULL here for an exception thrown by a language other than Go. */ g = runtime_g (); if (g == NULL) { if (!is_foreign) abort (); } else { g->exception = ue_header; g->isforeign = is_foreign; } _Unwind_SetGR (context, __builtin_eh_return_data_regno (0), (_Unwind_Ptr) ue_header); _Unwind_SetGR (context, __builtin_eh_return_data_regno (1), 0); _Unwind_SetIP (context, landing_pad); return _URC_INSTALL_CONTEXT; }
static void sig_handler (int sig) { int i; if (runtime_m () == NULL) { runtime_badsignal (sig); return; } #ifdef SIGPROF if (sig == SIGPROF) { runtime_sigprof (); return; } #endif for (i = 0; runtime_sigtab[i].sig != -1; ++i) { SigTab *t; t = &runtime_sigtab[i]; if (t->sig != sig) continue; if ((t->flags & SigNotify) != 0) { if (__go_sigsend (sig)) return; } if ((t->flags & SigKill) != 0) runtime_exit (2); if ((t->flags & SigThrow) == 0) return; runtime_startpanic (); { const char *name = NULL; #ifdef HAVE_STRSIGNAL name = strsignal (sig); #endif if (name == NULL) runtime_printf ("Signal %d\n", sig); else runtime_printf ("%s\n", name); } runtime_printf ("\n"); if (runtime_gotraceback ()) { G *g; g = runtime_g (); runtime_traceback (g); runtime_tracebackothers (g); /* The gc library calls runtime_dumpregs here, and provides a function that prints the registers saved in context in a readable form. */ } runtime_exit (2); } __builtin_unreachable (); }
__go_assert (i == 0); } #ifdef SA_SIGINFO /* Signal dispatch for signals which panic, on systems which support SA_SIGINFO. This is called on the thread stack, and as such it is permitted to split the stack. */ static void sig_panic_info_handler (int sig, siginfo_t *info, void *context __attribute__ ((unused))) { G *g; g = runtime_g (); if (g == NULL || info->si_code == SI_USER) { sig_handler (sig); return; } g->sig = sig; g->sigcode0 = info->si_code; g->sigcode1 = (uintptr_t) info->si_addr; /* It would be nice to set g->sigpc here as the gc library does, but I don't know how to get it portably. */ sig_panic_leadin (sig);
/* * generic single channel send/recv * if the bool pointer is nil, * then the full exchange will * occur. if pres is not nil, * then the protocol will not * sleep but return if it could * not complete. * * sleep can wake up with g->param == nil * when a channel involved in the sleep has * been closed. it is easiest to loop and re-run * the operation; we'll see that it's now closed. */ void runtime_chansend(ChanType *t, Hchan *c, byte *ep, bool *pres, void *pc) { SudoG *sg; SudoG mysg; G* gp; int64 t0; G* g; g = runtime_g(); if(c == nil) { USED(t); if(pres != nil) { *pres = false; return; } runtime_park(nil, nil, "chan send (nil chan)"); return; // not reached } if(runtime_gcwaiting) runtime_gosched(); if(debug) { runtime_printf("chansend: chan=%p\n", c); } t0 = 0; mysg.releasetime = 0; if(runtime_blockprofilerate > 0) { t0 = runtime_cputicks(); mysg.releasetime = -1; } runtime_lock(c); // TODO(dvyukov): add similar instrumentation to select. if(raceenabled) runtime_racereadpc(c, pc); if(c->closed) goto closed; if(c->dataqsiz > 0) goto asynch; sg = dequeue(&c->recvq); if(sg != nil) { if(raceenabled) racesync(c, sg); runtime_unlock(c); gp = sg->g; gp->param = sg; if(sg->elem != nil) runtime_memmove(sg->elem, ep, c->elemsize); if(sg->releasetime) sg->releasetime = runtime_cputicks(); runtime_ready(gp); if(pres != nil) *pres = true; return; } if(pres != nil) { runtime_unlock(c); *pres = false; return; } mysg.elem = ep; mysg.g = g; mysg.selgen = NOSELGEN; g->param = nil; enqueue(&c->sendq, &mysg); runtime_park(runtime_unlock, c, "chan send"); if(g->param == nil) { runtime_lock(c); if(!c->closed) runtime_throw("chansend: spurious wakeup"); goto closed; } if(mysg.releasetime > 0) runtime_blockevent(mysg.releasetime - t0, 2); return; asynch: if(c->closed) goto closed; if(c->qcount >= c->dataqsiz) { if(pres != nil) { runtime_unlock(c); *pres = false; return; } mysg.g = g; mysg.elem = nil; mysg.selgen = NOSELGEN; enqueue(&c->sendq, &mysg); runtime_park(runtime_unlock, c, "chan send"); runtime_lock(c); goto asynch; } if(raceenabled) runtime_racerelease(chanbuf(c, c->sendx)); runtime_memmove(chanbuf(c, c->sendx), ep, c->elemsize); if(++c->sendx == c->dataqsiz) c->sendx = 0; c->qcount++; sg = dequeue(&c->recvq); if(sg != nil) { gp = sg->g; runtime_unlock(c); if(sg->releasetime) sg->releasetime = runtime_cputicks(); runtime_ready(gp); } else runtime_unlock(c); if(pres != nil) *pres = true; if(mysg.releasetime > 0) runtime_blockevent(mysg.releasetime - t0, 2); return; closed: runtime_unlock(c); runtime_panicstring("send on closed channel"); }