void runtime_goroutinetrailer(G *g) { if(g != nil && g->gopc != 0 && g->goid != 1) { struct __go_string fn; struct __go_string file; int line; if(__go_file_line(g->gopc - 1, &fn, &file, &line)) { runtime_printf("created by %s\n", fn.__data); runtime_printf("\t%s:%d\n", file.__data, line); } } }
static void __printpanics (struct __go_panic_stack *p) { if (p->__next != NULL) { __printpanics (p->__next); runtime_printf ("\t"); } runtime_printf ("panic: "); runtime_printany (p->__arg); if (p->__was_recovered) runtime_printf (" [recovered]"); runtime_printf ("\n"); }
static void __printpanics (Panic *p) { if (p->next != NULL) { __printpanics (p->next); runtime_printf ("\t"); } runtime_printf ("panic: "); runtime_printany (p->arg); if (p->recovered) runtime_printf (" [recovered]"); runtime_printf ("\n"); }
void runtime_printtrace (Location *locbuf, int32 c, bool current) { int32 i; for (i = 0; i < c; ++i) { if (runtime_showframe (locbuf[i].function, current)) { runtime_printf ("%S\n", locbuf[i].function); runtime_printf ("\t%S:%D\n", locbuf[i].filename, (int64) locbuf[i].lineno); } } }
// Gets a span that has a free object in it and assigns it // to be the cached span for the given sizeclass. Returns this span. MSpan* runtime_MCache_Refill(MCache *c, int32 sizeclass) { MCacheList *l; MSpan *s; runtime_m()->locks++; // Return the current cached span to the central lists. s = c->alloc[sizeclass]; if(s->freelist != nil) runtime_throw("refill on a nonempty span"); if(s != &emptymspan) runtime_MCentral_UncacheSpan(&runtime_mheap.central[sizeclass], s); // Push any explicitly freed objects to the central lists. // Not required, but it seems like a good time to do it. l = &c->free[sizeclass]; if(l->nlist > 0) { runtime_MCentral_FreeList(&runtime_mheap.central[sizeclass], l->list); l->list = nil; l->nlist = 0; } // Get a new cached span from the central lists. s = runtime_MCentral_CacheSpan(&runtime_mheap.central[sizeclass]); if(s == nil) runtime_throw("out of memory"); if(s->freelist == nil) { runtime_printf("%d %d\n", s->ref, (int32)((s->npages << PageShift) / s->elemsize)); runtime_throw("empty span"); } c->alloc[sizeclass] = s; runtime_m()->locks--; return s; }
void* runtime_FixAlloc_Alloc(FixAlloc *f) { void *v; if(f->size == 0) { runtime_printf("runtime: use of FixAlloc_Alloc before FixAlloc_Init\n"); runtime_throw("runtime: internal error"); } if(f->list) { v = f->list; f->list = *(void**)f->list; f->inuse += f->size; return v; } if(f->nchunk < f->size) { f->sys += FixAllocChunk; f->chunk = f->alloc(FixAllocChunk); if(f->chunk == nil) runtime_throw("out of memory (FixAlloc)"); f->nchunk = FixAllocChunk; } v = f->chunk; if(f->first) f->first(f->arg, v); f->chunk += f->size; f->nchunk -= f->size; f->inuse += f->size; return v; }
void __go_assert_fail (const char *file, unsigned int lineno) { /* FIXME: Eventually we should dump a stack trace here. */ runtime_printf ("%s:%U: libgo assertion failure\n", file, (uint64) lineno); abort(); }
void runtime_goroutineheader(G *g) { const char *status; switch(g->status) { case Gidle: status = "idle"; break; case Grunnable: status = "runnable"; break; case Grunning: status = "running"; break; case Gsyscall: status = "syscall"; break; case Gwaiting: if(g->waitreason) status = g->waitreason; else status = "waiting"; break; case Gmoribund: status = "moribund"; break; default: status = "???"; break; } runtime_printf("goroutine %d [%s]:\n", g->goid, status); }
static Hchan* makechan(ChanType *t, int64 hint) { Hchan *c; uintptr n; const Type *elem; elem = t->__element_type; // compiler checks this but be safe. if(elem->__size >= (1<<16)) runtime_throw("makechan: invalid channel element type"); if(hint < 0 || (intgo)hint != hint || (elem->__size > 0 && (uintptr)hint > (MaxMem - sizeof(*c)) / elem->__size)) runtime_panicstring("makechan: size out of range"); n = sizeof(*c); n = ROUND(n, elem->__align); // allocate memory in one call c = (Hchan*)runtime_mallocgc(sizeof(*c) + hint*elem->__size, (uintptr)t | TypeInfo_Chan, 0); c->elemsize = elem->__size; c->elemtype = elem; c->dataqsiz = hint; if(debug) runtime_printf("makechan: chan=%p; elemsize=%D; dataqsiz=%D\n", c, (int64)elem->__size, (int64)c->dataqsiz); return c; }
// Put on `g' queue. Sched must be locked. static void gput(G *g) { M *m; // If g is wired, hand it off directly. if((m = g->lockedm) != nil && canaddmcpu()) { mnextg(m, g); return; } // If g is the idle goroutine for an m, hand it off. if(g->idlem != nil) { if(g->idlem->idleg != nil) { runtime_printf("m%d idle out of sync: g%d g%d\n", g->idlem->id, g->idlem->idleg->goid, g->goid); runtime_throw("runtime: double idle"); } g->idlem->idleg = g; return; } g->schedlink = nil; if(runtime_sched.ghead == nil) runtime_sched.ghead = g; else runtime_sched.gtail->schedlink = g; runtime_sched.gtail = g; // increment gwait. // if it transitions to nonzero, set atomic gwaiting bit. if(runtime_sched.gwait++ == 0) runtime_xadd(&runtime_sched.atomic, 1<<gwaitingShift); }
void runtime_panicstring(const char *s) { Eface err; if(runtime_m()->mallocing) { runtime_printf("panic: %s\n", s); runtime_throw("panic during malloc"); } if(runtime_m()->gcing) { runtime_printf("panic: %s\n", s); runtime_throw("panic during gc"); } runtime_newErrorCString(s, &err); runtime_panic(err); }
void runtime_dopanic(int32 unused __attribute__ ((unused))) { G *g; static bool didothers; bool crash; int32 t; g = runtime_g(); if(g->sig != 0) runtime_printf("[signal %x code=%p addr=%p]\n", g->sig, (void*)g->sigcode0, (void*)g->sigcode1); if((t = runtime_gotraceback(&crash)) > 0){ if(g != runtime_m()->g0) { runtime_printf("\n"); runtime_goroutineheader(g); runtime_traceback(); runtime_printcreatedby(g); } else if(t >= 2 || runtime_m()->throwing > 0) { runtime_printf("\nruntime stack:\n"); runtime_traceback(); } if(!didothers) { didothers = true; runtime_tracebackothers(g); } } runtime_unlock(&paniclk); if(runtime_xadd(&runtime_panicking, -1) != 0) { // Some other m is panicking too. // Let it print what it needs to print. // Wait forever without chewing up cpu. // It will exit when it's done. static Lock deadlock; runtime_lock(&deadlock); runtime_lock(&deadlock); } if(crash) runtime_crash(); runtime_exit(2); }
void runtime_netpollinit(void) { kq = runtime_kqueue(); if(kq < 0) { runtime_printf("netpollinit: kqueue failed with %d\n", -kq); runtime_throw("netpollinit: kqueue failed"); } runtime_closeonexec(kq); }
void runtime_tracebackothers(G * volatile me) { G * volatile g; Traceback traceback; traceback.gp = me; for(g = runtime_allg; g != nil; g = g->alllink) { if(g == me || g->status == Gdead) continue; runtime_printf("\n"); runtime_goroutineheader(g); // Our only mechanism for doing a stack trace is // _Unwind_Backtrace. And that only works for the // current thread, not for other random goroutines. // So we need to switch context to the goroutine, get // the backtrace, and then switch back. // This means that if g is running or in a syscall, we // can't reliably print a stack trace. FIXME. if(g->status == Gsyscall || g->status == Grunning) { runtime_printf("no stack trace available\n"); runtime_goroutinetrailer(g); continue; } g->traceback = &traceback; #ifdef USING_SPLIT_STACK __splitstack_getcontext(&me->stack_context[0]); #endif getcontext(&me->context); if(g->traceback != nil) { runtime_gogo(g); } runtime_printtrace(traceback.pcbuf, traceback.c); runtime_goroutinetrailer(g); } }
void runtime_startpanic(void) { M *m; m = runtime_m(); if(runtime_mheap.cachealloc.size == 0) { // very early runtime_printf("runtime: panic before malloc heap initialized\n"); m->mallocing = 1; // tell rest of panic not to try to malloc } else if(m->mcache == nil) // can happen if called from signal handler or throw m->mcache = runtime_allocmcache(); switch(m->dying) { case 0: m->dying = 1; if(runtime_g() != nil) runtime_g()->writebuf = nil; runtime_xadd(&runtime_panicking, 1); runtime_lock(&paniclk); if(runtime_debug.schedtrace > 0 || runtime_debug.scheddetail > 0) runtime_schedtrace(true); runtime_freezetheworld(); return; case 1: // Something failed while panicing, probably the print of the // argument to panic(). Just print a stack trace and exit. m->dying = 2; runtime_printf("panic during panic\n"); runtime_dopanic(0); runtime_exit(3); case 2: // This is a genuine bug in the runtime, we couldn't even // print the stack trace successfully. m->dying = 3; runtime_printf("stack trace unavailable\n"); runtime_exit(4); default: // Can't even print! Just exit. runtime_exit(5); } }
void runtime_tracebackothers(G *me) { G *g; for(g = runtime_allg; g != nil; g = g->alllink) { if(g == me || g->status == Gdead) continue; runtime_printf("\n"); runtime_goroutineheader(g); // runtime_traceback(g->sched.pc, g->sched.sp, 0, g); } }
void runtime_throw(const char *s) { M *mp; mp = runtime_m(); if(mp->throwing == 0) mp->throwing = 1; runtime_startpanic(); runtime_printf("fatal error: %s\n", s); runtime_dopanic(0); *(int32*)0 = 0; // not reached runtime_exit(1); // even more not reached }
void runtime_netpollinit(void) { epfd = runtime_epollcreate1(EPOLL_CLOEXEC); if(epfd >= 0) return; epfd = runtime_epollcreate(1024); if(epfd >= 0) { runtime_closeonexec(epfd); return; } runtime_printf("netpollinit: failed to create descriptor (%d)\n", -epfd); runtime_throw("netpollinit: failed to create descriptor"); }
// If any procs are sleeping on addr, wake up at most cnt. void runtime_futexwakeup(uint32 *addr, uint32 cnt) { int64 ret; ret = syscall(__NR_futex, addr, FUTEX_WAKE, cnt, nil, nil, 0); if(ret >= 0) return; // I don't know that futex wakeup can return // EAGAIN or EINTR, but if it does, it would be // safe to loop and call futex again. runtime_printf("futexwakeup addr=%p returned %D\n", addr, ret); *(int32*)0x1006 = 0x1006; }
static void sig_panic_leadin (int sig) { int i; sigset_t clear; if (runtime_m ()->mallocing) { runtime_printf ("caught signal while mallocing: %d\n", sig); runtime_throw ("caught signal while mallocing"); } /* The signal handler blocked signals; unblock them. */ i = sigfillset (&clear); __go_assert (i == 0); i = sigprocmask (SIG_UNBLOCK, &clear, NULL); __go_assert (i == 0); }
// Mark g ready to run. Sched is already locked. // G might be running already and about to stop. // The sched lock protects g->status from changing underfoot. static void readylocked(G *g) { if(g->m){ // Running on another machine. // Ready it when it stops. g->readyonstop = 1; return; } // Mark runnable. if(g->status == Grunnable || g->status == Grunning) { runtime_printf("goroutine %d has status %d\n", g->goid, g->status); runtime_throw("bad g->status in ready"); } g->status = Grunnable; gput(g); matchmg(); }
// Polls for ready network connections. // Returns list of goroutines that become runnable. G* runtime_netpoll(bool block) { static int32 lasterr; Kevent events[64], *ev; Timespec ts, *tp; int32 n, i, mode; G *gp; if(kq == -1) return nil; tp = nil; if(!block) { ts.tv_sec = 0; ts.tv_nsec = 0; tp = &ts; } gp = nil; retry: n = runtime_kevent(kq, nil, 0, events, nelem(events), tp); if(n < 0) { if(n != -EINTR && n != lasterr) { lasterr = n; runtime_printf("runtime: kevent on fd %d failed with %d\n", kq, -n); } goto retry; } for(i = 0; i < n; i++) { ev = &events[i]; mode = 0; if(ev->filter == EVFILT_READ) mode += 'r'; if(ev->filter == EVFILT_WRITE) mode += 'w'; if(mode) runtime_netpollready(&gp, (PollDesc*)ev->udata, mode); } if(block && gp == nil) goto retry; return gp; }
// polls for ready network connections // returns list of goroutines that become runnable G* runtime_netpoll(bool block) { static int32 lasterr; EpollEvent events[128], *ev; int32 n, i, waitms, mode; G *gp; if(epfd == -1) return nil; waitms = -1; if(!block) waitms = 0; retry: n = runtime_epollwait(epfd, events, nelem(events), waitms); if(n < 0) { if(n != -EINTR && n != lasterr) { lasterr = n; runtime_printf("runtime: epollwait on fd %d failed with %d\n", epfd, -n); } goto retry; } gp = nil; for(i = 0; i < n; i++) { ev = &events[i]; if(ev->events == 0) continue; mode = 0; if(ev->events & (EPOLLIN|EPOLLRDHUP|EPOLLHUP|EPOLLERR)) mode += 'r'; if(ev->events & (EPOLLOUT|EPOLLHUP|EPOLLERR)) mode += 'w'; if(mode) runtime_netpollready(&gp, (void*)ev->data.ptr, mode); } if(block && gp == nil) goto retry; return gp; }
void runtime_InitSizes(void) { int32 align, sizeclass, size, nextsize, n; uint32 i; uintptr allocsize, npages; // Initialize the runtime_class_to_size table (and choose class sizes in the process). runtime_class_to_size[0] = 0; sizeclass = 1; // 0 means no class align = 8; for(size = align; size <= MaxSmallSize; size += align) { if((size&(size-1)) == 0) { // bump alignment once in a while if(size >= 2048) align = 256; else if(size >= 128) align = size / 8; else if(size >= 16) align = 16; // required for x86 SSE instructions, if we want to use them } if((align&(align-1)) != 0) runtime_throw("InitSizes - bug"); // Make the allocnpages big enough that // the leftover is less than 1/8 of the total, // so wasted space is at most 12.5%. allocsize = PageSize; while(allocsize%size > allocsize/8) allocsize += PageSize; npages = allocsize >> PageShift; // If the previous sizeclass chose the same // allocation size and fit the same number of // objects into the page, we might as well // use just this size instead of having two // different sizes. if(sizeclass > 1 && (int32)npages == runtime_class_to_allocnpages[sizeclass-1] && allocsize/size == allocsize/runtime_class_to_size[sizeclass-1]) { runtime_class_to_size[sizeclass-1] = size; continue; } runtime_class_to_allocnpages[sizeclass] = npages; runtime_class_to_size[sizeclass] = size; sizeclass++; } if(sizeclass != NumSizeClasses) { // runtime_printf("sizeclass=%d NumSizeClasses=%d\n", sizeclass, NumSizeClasses); runtime_throw("InitSizes - bad NumSizeClasses"); } // Initialize the size_to_class tables. nextsize = 0; for (sizeclass = 1; sizeclass < NumSizeClasses; sizeclass++) { for(; nextsize < 1024 && nextsize <= runtime_class_to_size[sizeclass]; nextsize+=8) size_to_class8[nextsize/8] = sizeclass; if(nextsize >= 1024) for(; nextsize <= runtime_class_to_size[sizeclass]; nextsize += 128) size_to_class128[(nextsize-1024)/128] = sizeclass; } // Double-check SizeToClass. if(0) { for(n=0; n < MaxSmallSize; n++) { sizeclass = runtime_SizeToClass(n); if(sizeclass < 1 || sizeclass >= NumSizeClasses || runtime_class_to_size[sizeclass] < n) { // runtime_printf("size=%d sizeclass=%d runtime_class_to_size=%d\n", n, sizeclass, runtime_class_to_size[sizeclass]); // runtime_printf("incorrect SizeToClass"); goto dump; } if(sizeclass > 1 && runtime_class_to_size[sizeclass-1] >= n) { // runtime_printf("size=%d sizeclass=%d runtime_class_to_size=%d\n", n, sizeclass, runtime_class_to_size[sizeclass]); // runtime_printf("SizeToClass too big"); goto dump; } } } // Copy out for statistics table. for(i=0; i<nelem(runtime_class_to_size); i++) mstats.by_size[i].size = runtime_class_to_size[i]; // Initialize the runtime_class_to_transfercount table. for(sizeclass = 1; sizeclass < NumSizeClasses; sizeclass++) { n = 64*1024 / runtime_class_to_size[sizeclass]; if(n < 2) n = 2; if(n > 32) n = 32; runtime_class_to_transfercount[sizeclass] = n; } return; dump: if(1){ runtime_printf("NumSizeClasses=%d\n", NumSizeClasses); runtime_printf("runtime_class_to_size:"); for(sizeclass=0; sizeclass<NumSizeClasses; sizeclass++) runtime_printf(" %d", runtime_class_to_size[sizeclass]); runtime_printf("\n\n"); runtime_printf("size_to_class8:"); for(i=0; i<nelem(size_to_class8); i++) runtime_printf(" %d=>%d(%d)\n", i*8, size_to_class8[i], runtime_class_to_size[size_to_class8[i]]); runtime_printf("\n"); runtime_printf("size_to_class128:"); for(i=0; i<nelem(size_to_class128); i++) runtime_printf(" %d=>%d(%d)\n", i*128, size_to_class128[i], runtime_class_to_size[size_to_class128[i]]); runtime_printf("\n"); } runtime_throw("InitSizes failed"); }
void runtime_gc(int32 force __attribute__ ((unused))) { int64 t0, t1; char *p; Finalizer *fp; // The gc is turned off (via enablegc) until // the bootstrap has completed. // Also, malloc gets called in the guts // of a number of libraries that might be // holding locks. To avoid priority inversion // problems, don't bother trying to run gc // while holding a lock. The next mallocgc // without a lock will do the gc instead. if(!mstats.enablegc || m->locks > 0 /* || runtime_panicking */) return; if(gcpercent == -2) { // first time through p = runtime_getenv("GOGC"); if(p == nil || p[0] == '\0') gcpercent = 100; else if(runtime_strcmp(p, "off") == 0) gcpercent = -1; else gcpercent = runtime_atoi(p); } if(gcpercent < 0) return; pthread_mutex_lock(&finqlock); pthread_mutex_lock(&gcsema); m->locks++; // disable gc during the mallocs in newproc t0 = runtime_nanotime(); runtime_stoptheworld(); if(force || mstats.heap_alloc >= mstats.next_gc) { __go_cachestats(); mark(); sweep(); __go_stealcache(); mstats.next_gc = mstats.heap_alloc+mstats.heap_alloc*gcpercent/100; } t1 = runtime_nanotime(); mstats.numgc++; mstats.pause_ns += t1 - t0; if(mstats.debuggc) runtime_printf("pause %llu\n", (unsigned long long)t1-t0); pthread_mutex_unlock(&gcsema); runtime_starttheworld(); // finqlock is still held. fp = finq; if(fp != nil) { // kick off or wake up goroutine to run queued finalizers if(!finstarted) { __go_go(runfinq, nil); finstarted = 1; } else if(fingwait) { fingwait = 0; pthread_cond_signal(&finqcond); } } m->locks--; pthread_mutex_unlock(&finqlock); }
static void scanblock(byte *b, int64 n) { int32 off; void *obj; uintptr size; uint32 *refp, ref; void **vp; int64 i; BlockList *w; w = bl; w->obj = b; w->size = n; w++; while(w > bl) { w--; b = w->obj; n = w->size; if(Debug > 1) runtime_printf("scanblock %p %lld\n", b, (long long) n); off = (uint32)(uintptr)b & (PtrSize-1); if(off) { b += PtrSize - off; n -= PtrSize - off; } vp = (void**)b; n /= PtrSize; for(i=0; i<n; i++) { obj = vp[i]; if(obj == nil) continue; if(runtime_mheap.closure_min != nil && runtime_mheap.closure_min <= (byte*)obj && (byte*)obj < runtime_mheap.closure_max) { if((((uintptr)obj) & 63) != 0) continue; // Looks like a Native Client closure. // Actual pointer is pointed at by address in first instruction. // Embedded pointer starts at byte 2. // If it is f4f4f4f4 then that space hasn't been // used for a closure yet (f4 is the HLT instruction). // See nacl/386/closure.c for more. void **pp; pp = *(void***)((byte*)obj+2); if(pp == (void**)0xf4f4f4f4) // HLT... - not a closure after all continue; obj = *pp; } if(runtime_mheap.min <= (byte*)obj && (byte*)obj < runtime_mheap.max) { if(runtime_mlookup(obj, (byte**)&obj, &size, nil, &refp)) { ref = *refp; switch(ref & ~RefFlags) { case RefNone: if(Debug > 1) runtime_printf("found at %p: ", &vp[i]); *refp = RefSome | (ref & RefFlags); if(!(ref & RefNoPointers)) { if(w >= ebl) runtime_throw("scanblock: garbage collection stack overflow"); w->obj = obj; w->size = size; w++; } break; } } } } } }
/* * generic single channel send/recv * if the bool pointer is nil, * then the full exchange will * occur. if pres is not nil, * then the protocol will not * sleep but return if it could * not complete. * * sleep can wake up with g->param == nil * when a channel involved in the sleep has * been closed. it is easiest to loop and re-run * the operation; we'll see that it's now closed. */ static bool chansend(ChanType *t, Hchan *c, byte *ep, bool block, void *pc) { SudoG *sg; SudoG mysg; G* gp; int64 t0; G* g; g = runtime_g(); if(raceenabled) runtime_racereadobjectpc(ep, t->__element_type, runtime_getcallerpc(&t), chansend); if(c == nil) { USED(t); if(!block) return false; runtime_park(nil, nil, "chan send (nil chan)"); return false; // not reached } if(runtime_gcwaiting()) runtime_gosched(); if(debug) { runtime_printf("chansend: chan=%p\n", c); } t0 = 0; mysg.releasetime = 0; if(runtime_blockprofilerate > 0) { t0 = runtime_cputicks(); mysg.releasetime = -1; } runtime_lock(c); if(raceenabled) runtime_racereadpc(c, pc, chansend); if(c->closed) goto closed; if(c->dataqsiz > 0) goto asynch; sg = dequeue(&c->recvq); if(sg != nil) { if(raceenabled) racesync(c, sg); runtime_unlock(c); gp = sg->g; gp->param = sg; if(sg->elem != nil) runtime_memmove(sg->elem, ep, c->elemsize); if(sg->releasetime) sg->releasetime = runtime_cputicks(); runtime_ready(gp); return true; } if(!block) { runtime_unlock(c); return false; } mysg.elem = ep; mysg.g = g; mysg.selectdone = nil; g->param = nil; enqueue(&c->sendq, &mysg); runtime_parkunlock(c, "chan send"); if(g->param == nil) { runtime_lock(c); if(!c->closed) runtime_throw("chansend: spurious wakeup"); goto closed; } if(mysg.releasetime > 0) runtime_blockevent(mysg.releasetime - t0, 2); return true; asynch: if(c->closed) goto closed; if(c->qcount >= c->dataqsiz) { if(!block) { runtime_unlock(c); return false; } mysg.g = g; mysg.elem = nil; mysg.selectdone = nil; enqueue(&c->sendq, &mysg); runtime_parkunlock(c, "chan send"); runtime_lock(c); goto asynch; } if(raceenabled) runtime_racerelease(chanbuf(c, c->sendx)); runtime_memmove(chanbuf(c, c->sendx), ep, c->elemsize); if(++c->sendx == c->dataqsiz) c->sendx = 0; c->qcount++; sg = dequeue(&c->recvq); if(sg != nil) { gp = sg->g; runtime_unlock(c); if(sg->releasetime) sg->releasetime = runtime_cputicks(); runtime_ready(gp); } else runtime_unlock(c); if(mysg.releasetime > 0) runtime_blockevent(mysg.releasetime - t0, 2); return true; closed: runtime_unlock(c); runtime_panicstring("send on closed channel"); return false; // not reached }
static bool chanrecv(ChanType *t, Hchan* c, byte *ep, bool block, bool *received) { SudoG *sg; SudoG mysg; G *gp; int64 t0; G *g; if(runtime_gcwaiting()) runtime_gosched(); // raceenabled: don't need to check ep, as it is always on the stack. if(debug) runtime_printf("chanrecv: chan=%p\n", c); g = runtime_g(); if(c == nil) { USED(t); if(!block) return false; runtime_park(nil, nil, "chan receive (nil chan)"); return false; // not reached } t0 = 0; mysg.releasetime = 0; if(runtime_blockprofilerate > 0) { t0 = runtime_cputicks(); mysg.releasetime = -1; } runtime_lock(c); if(c->dataqsiz > 0) goto asynch; if(c->closed) goto closed; sg = dequeue(&c->sendq); if(sg != nil) { if(raceenabled) racesync(c, sg); runtime_unlock(c); if(ep != nil) runtime_memmove(ep, sg->elem, c->elemsize); gp = sg->g; gp->param = sg; if(sg->releasetime) sg->releasetime = runtime_cputicks(); runtime_ready(gp); if(received != nil) *received = true; return true; } if(!block) { runtime_unlock(c); return false; } mysg.elem = ep; mysg.g = g; mysg.selectdone = nil; g->param = nil; enqueue(&c->recvq, &mysg); runtime_parkunlock(c, "chan receive"); if(g->param == nil) { runtime_lock(c); if(!c->closed) runtime_throw("chanrecv: spurious wakeup"); goto closed; } if(received != nil) *received = true; if(mysg.releasetime > 0) runtime_blockevent(mysg.releasetime - t0, 2); return true; asynch: if(c->qcount <= 0) { if(c->closed) goto closed; if(!block) { runtime_unlock(c); if(received != nil) *received = false; return false; } mysg.g = g; mysg.elem = nil; mysg.selectdone = nil; enqueue(&c->recvq, &mysg); runtime_parkunlock(c, "chan receive"); runtime_lock(c); goto asynch; } if(raceenabled) runtime_raceacquire(chanbuf(c, c->recvx)); if(ep != nil) runtime_memmove(ep, chanbuf(c, c->recvx), c->elemsize); runtime_memclr(chanbuf(c, c->recvx), c->elemsize); if(++c->recvx == c->dataqsiz) c->recvx = 0; c->qcount--; sg = dequeue(&c->sendq); if(sg != nil) { gp = sg->g; runtime_unlock(c); if(sg->releasetime) sg->releasetime = runtime_cputicks(); runtime_ready(gp); } else runtime_unlock(c); if(received != nil) *received = true; if(mysg.releasetime > 0) runtime_blockevent(mysg.releasetime - t0, 2); return true; closed: if(ep != nil) runtime_memclr(ep, c->elemsize); if(received != nil) *received = false; if(raceenabled) runtime_raceacquire(c); runtime_unlock(c); if(mysg.releasetime > 0) runtime_blockevent(mysg.releasetime - t0, 2); return true; }
static void sig_handler (int sig) { int i; if (runtime_m () == NULL) { runtime_badsignal (sig); return; } #ifdef SIGPROF if (sig == SIGPROF) { runtime_sigprof (); return; } #endif for (i = 0; runtime_sigtab[i].sig != -1; ++i) { SigTab *t; t = &runtime_sigtab[i]; if (t->sig != sig) continue; if ((t->flags & SigNotify) != 0) { if (__go_sigsend (sig)) return; } if ((t->flags & SigKill) != 0) runtime_exit (2); if ((t->flags & SigThrow) == 0) return; runtime_startpanic (); { const char *name = NULL; #ifdef HAVE_STRSIGNAL name = strsignal (sig); #endif if (name == NULL) runtime_printf ("Signal %d\n", sig); else runtime_printf ("%s\n", name); } runtime_printf ("\n"); if (runtime_gotraceback ()) { G *g; g = runtime_g (); runtime_traceback (g); runtime_tracebackothers (g); /* The gc library calls runtime_dumpregs here, and provides a function that prints the registers saved in context in a readable form. */ } runtime_exit (2); } __builtin_unreachable (); }
G* runtime_netpoll(bool block) { fd_set *prfds, *pwfds, *pefds, *ptfds; bool allocatedfds; struct timeval timeout; struct timeval *pt; int max, c, i; G *gp; int32 mode; byte b; struct stat st; retry: runtime_lock(&selectlock); max = allocated; if(max == 0) { runtime_unlock(&selectlock); return nil; } if(inuse) { prfds = runtime_SysAlloc(4 * sizeof fds, &mstats.other_sys); pwfds = prfds + 1; pefds = pwfds + 1; ptfds = pefds + 1; allocatedfds = true; } else { prfds = &grfds; pwfds = &gwfds; pefds = &gefds; ptfds = >fds; inuse = true; allocatedfds = false; } __builtin_memcpy(prfds, &fds, sizeof fds); runtime_unlock(&selectlock); __builtin_memcpy(pwfds, prfds, sizeof fds); FD_CLR(rdwake, pwfds); __builtin_memcpy(pefds, pwfds, sizeof fds); __builtin_memcpy(ptfds, pwfds, sizeof fds); __builtin_memset(&timeout, 0, sizeof timeout); pt = &timeout; if(block) pt = nil; c = select(max, prfds, pwfds, pefds, pt); if(c < 0) { if(errno == EBADF) { // Some file descriptor has been closed. // Check each one, and treat each closed // descriptor as ready for read/write. c = 0; FD_ZERO(prfds); FD_ZERO(pwfds); FD_ZERO(pefds); for(i = 0; i < max; i++) { if(FD_ISSET(i, ptfds) && fstat(i, &st) < 0 && errno == EBADF) { FD_SET(i, prfds); FD_SET(i, pwfds); c += 2; } } } else { if(errno != EINTR) runtime_printf("runtime: select failed with %d\n", errno); goto retry; } } gp = nil; for(i = 0; i < max && c > 0; i++) { mode = 0; if(FD_ISSET(i, prfds)) { mode += 'r'; --c; } if(FD_ISSET(i, pwfds)) { mode += 'w'; --c; } if(FD_ISSET(i, pefds)) { mode = 'r' + 'w'; --c; } if(i == rdwake) { while(read(rdwake, &b, sizeof b) > 0) ; continue; } if(mode) { PollDesc *pd; runtime_lock(&selectlock); pd = data[i]; runtime_unlock(&selectlock); if(pd != nil) runtime_netpollready(&gp, pd, mode); } } if(block && gp == nil) goto retry; if(allocatedfds) { runtime_SysFree(prfds, 4 * sizeof fds, &mstats.other_sys); } else { runtime_lock(&selectlock); inuse = false; runtime_unlock(&selectlock); } return gp; }