void runtime_notetsleep(Note *n, int64 ns) { int64 deadline, now; if(ns < 0) { runtime_notesleep(n); return; } if(runtime_atomicload((uint32*)&n->key) != 0) return; if(runtime_m()->profilehz > 0) runtime_setprof(false); deadline = runtime_nanotime() + ns; for(;;) { runtime_futexsleep((uint32*)&n->key, 0, ns); if(runtime_atomicload((uint32*)&n->key) != 0) break; now = runtime_nanotime(); if(now >= deadline) break; ns = deadline - now; } if(runtime_m()->profilehz > 0) runtime_setprof(true); }
// Gets a span that has a free object in it and assigns it // to be the cached span for the given sizeclass. Returns this span. MSpan* runtime_MCache_Refill(MCache *c, int32 sizeclass) { MCacheList *l; MSpan *s; runtime_m()->locks++; // Return the current cached span to the central lists. s = c->alloc[sizeclass]; if(s->freelist != nil) runtime_throw("refill on a nonempty span"); if(s != &emptymspan) runtime_MCentral_UncacheSpan(&runtime_mheap.central[sizeclass], s); // Push any explicitly freed objects to the central lists. // Not required, but it seems like a good time to do it. l = &c->free[sizeclass]; if(l->nlist > 0) { runtime_MCentral_FreeList(&runtime_mheap.central[sizeclass], l->list); l->list = nil; l->nlist = 0; } // Get a new cached span from the central lists. s = runtime_MCentral_CacheSpan(&runtime_mheap.central[sizeclass]); if(s == nil) runtime_throw("out of memory"); if(s->freelist == nil) { runtime_printf("%d %d\n", s->ref, (int32)((s->npages << PageShift) / s->elemsize)); runtime_throw("empty span"); } c->alloc[sizeclass] = s; runtime_m()->locks--; return s; }
// Allocate a new span of npage pages from the heap // and record its size class in the HeapMap and HeapMapCache. MSpan* runtime_MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool needzero) { MSpan *s; runtime_lock(h); mstats.heap_alloc += (intptr)runtime_m()->mcache->local_cachealloc; runtime_m()->mcache->local_cachealloc = 0; s = MHeap_AllocLocked(h, npage, sizeclass); if(s != nil) { mstats.heap_inuse += npage<<PageShift; if(large) { mstats.heap_objects++; mstats.heap_alloc += npage<<PageShift; // Swept spans are at the end of lists. if(s->npages < nelem(h->free)) runtime_MSpanList_InsertBack(&h->busy[s->npages], s); else runtime_MSpanList_InsertBack(&h->busylarge, s); } } runtime_unlock(h); if(s != nil) { if(needzero && s->needzero) runtime_memclr((byte*)(s->start<<PageShift), s->npages<<PageShift); s->needzero = 0; } return s; }
// The GOTRACEBACK environment variable controls the // behavior of a Go program that is crashing and exiting. // GOTRACEBACK=0 suppress all tracebacks // GOTRACEBACK=1 default behavior - show tracebacks but exclude runtime frames // GOTRACEBACK=2 show tracebacks including runtime frames // GOTRACEBACK=crash show tracebacks including runtime frames, then crash (core dump etc) int32 runtime_gotraceback(bool *crash) { const byte *p; uint32 x; if(crash != nil) *crash = false; if(runtime_m()->traceback != 0) return runtime_m()->traceback; x = runtime_atomicload(&traceback_cache); if(x == ~(uint32)0) { p = runtime_getenv("GOTRACEBACK"); if(p == nil) p = (const byte*)""; if(p[0] == '\0') x = 1<<1; else if(runtime_strcmp((const char *)p, "crash") == 0) x = (2<<1) | 1; else x = runtime_atoi(p)<<1; runtime_atomicstore(&traceback_cache, x); } if(crash != nil) *crash = x&1; return x>>1; }
void syscall_cgocallback () { M *mp; mp = runtime_m (); if (mp == NULL) { runtime_needm (); mp = runtime_m (); mp->dropextram = true; } runtime_exitsyscall (0); if (runtime_m ()->ncgo == 0) { /* The C call to Go came from a thread not currently running any Go. In the case of -buildmode=c-archive or c-shared, this call may be coming in before package initialization is complete. Wait until it is. */ chanrecv1 (NULL, runtime_main_init_done, NULL); } mp = runtime_m (); if (mp->needextram) { mp->needextram = 0; runtime_newextram (); } }
void runtime_notesleep(Note *n) { if(runtime_m()->profilehz > 0) runtime_setprof(false); while(runtime_atomicload((uint32*)&n->key) == 0) runtime_futexsleep((uint32*)&n->key, 0, -1); if(runtime_m()->profilehz > 0) runtime_setprof(true); }
void runtime_unlock(Lock *l) { uintptr v; M *mp; if(--runtime_m()->locks < 0) runtime_throw("runtime_unlock: lock count"); for(;;) { v = (uintptr)runtime_atomicloadp((void**)&l->key); if(v == LOCKED) { if(runtime_casp((void**)&l->key, (void*)LOCKED, nil)) break; } else { // Other M's are waiting for the lock. // Dequeue an M. mp = (void*)(v&~LOCKED); if(runtime_casp((void**)&l->key, (void*)v, mp->nextwaitm)) { // Dequeued an M. Wake it. runtime_semawakeup(mp); break; } } } }
void runtime_panicstring(const char *s) { Eface err; if(runtime_m()->mallocing) { runtime_printf("panic: %s\n", s); runtime_throw("panic during malloc"); } if(runtime_m()->gcing) { runtime_printf("panic: %s\n", s); runtime_throw("panic during gc"); } runtime_newErrorCString(s, &err); runtime_panic(err); }
static void runtime_mcall(void (*pfn)(G*)) { M *mp; G *gp; #ifndef USING_SPLIT_STACK int i; #endif // Ensure that all registers are on the stack for the garbage // collector. __builtin_unwind_init(); mp = m; gp = g; if(gp == mp->g0) runtime_throw("runtime: mcall called on m->g0 stack"); if(gp != nil) { #ifdef USING_SPLIT_STACK __splitstack_getcontext(&g->stack_context[0]); #else gp->gcnext_sp = &i; #endif gp->fromgogo = false; getcontext(&gp->context); // When we return from getcontext, we may be running // in a new thread. That means that m and g may have // changed. They are global variables so we will // reload them, but the addresses of m and g may be // cached in our local stack frame, and those // addresses may be wrong. Call functions to reload // the values for this thread. mp = runtime_m(); gp = runtime_g(); if(gp->traceback != nil) gtraceback(gp); } if (gp == nil || !gp->fromgogo) { #ifdef USING_SPLIT_STACK __splitstack_setcontext(&mp->g0->stack_context[0]); #endif mp->g0->entry = (byte*)pfn; mp->g0->param = gp; // It's OK to set g directly here because this case // can not occur if we got here via a setcontext to // the getcontext call just above. g = mp->g0; fixcontext(&mp->g0->context); setcontext(&mp->g0->context); runtime_throw("runtime: mcall function returned"); } }
void runtime_lock(Lock *l) { M *m; uintptr v; uint32 i, spin; m = runtime_m(); if(m->locks++ < 0) runtime_throw("runtime_lock: lock count"); // Speculative grab for lock. if(runtime_casp((void**)&l->key, nil, (void*)LOCKED)) return; if(m->waitsema == 0) m->waitsema = runtime_semacreate(); // On uniprocessor's, no point spinning. // On multiprocessors, spin for ACTIVE_SPIN attempts. spin = 0; if(runtime_ncpu > 1) spin = ACTIVE_SPIN; for(i=0;; i++) { v = (uintptr)runtime_atomicloadp((void**)&l->key); if((v&LOCKED) == 0) { unlocked: if(runtime_casp((void**)&l->key, (void*)v, (void*)(v|LOCKED))) return; i = 0; } if(i<spin) runtime_procyield(ACTIVE_SPIN_CNT); else if(i<spin+PASSIVE_SPIN) runtime_osyield(); else { // Someone else has it. // l->waitm points to a linked list of M's waiting // for this lock, chained through m->nextwaitm. // Queue this M. for(;;) { m->nextwaitm = (void*)(v&~LOCKED); if(runtime_casp((void**)&l->key, (void*)v, (void*)((uintptr)m|LOCKED))) break; v = (uintptr)runtime_atomicloadp((void**)&l->key); if((v&LOCKED) == 0) goto unlocked; } if(v&LOCKED) { // Queued. Wait. runtime_semasleep(-1); i = 0; } } } }
void runtime_dopanic(int32 unused __attribute__ ((unused))) { G *g; static bool didothers; bool crash; int32 t; g = runtime_g(); if(g->sig != 0) runtime_printf("[signal %x code=%p addr=%p]\n", g->sig, (void*)g->sigcode0, (void*)g->sigcode1); if((t = runtime_gotraceback(&crash)) > 0){ if(g != runtime_m()->g0) { runtime_printf("\n"); runtime_goroutineheader(g); runtime_traceback(); runtime_printcreatedby(g); } else if(t >= 2 || runtime_m()->throwing > 0) { runtime_printf("\nruntime stack:\n"); runtime_traceback(); } if(!didothers) { didothers = true; runtime_tracebackothers(g); } } runtime_unlock(&paniclk); if(runtime_xadd(&runtime_panicking, -1) != 0) { // Some other m is panicking too. // Let it print what it needs to print. // Wait forever without chewing up cpu. // It will exit when it's done. static Lock deadlock; runtime_lock(&deadlock); runtime_lock(&deadlock); } if(crash) runtime_crash(); runtime_exit(2); }
int main (int argc, char **argv) { runtime_initsig (0); runtime_args (argc, (byte **) argv); runtime_osinit (); runtime_schedinit (); __go_go (mainstart, NULL); runtime_mstart (runtime_m ()); abort (); }
bool runtime_showframe(String s, bool current) { static int32 traceback = -1; if(current && runtime_m()->throwing > 0) return 1; if(traceback < 0) traceback = runtime_gotraceback(nil); return traceback > 1 || (__builtin_memchr(s.str, '.', s.len) != nil && __builtin_memcmp(s.str, "runtime.", 7) != 0); }
void syscall_cgocall () { M* m; G* g; m = runtime_m (); ++m->ncgocall; g = runtime_g (); ++g->ncgo; runtime_entersyscall (); }
// Possible lock states are MUTEX_UNLOCKED, MUTEX_LOCKED and MUTEX_SLEEPING. // MUTEX_SLEEPING means that there is presumably at least one sleeping thread. // Note that there can be spinning threads during all states - they do not // affect mutex's state. void runtime_lock(Lock *l) { uint32 i, v, wait, spin; if(runtime_m()->locks++ < 0) runtime_throw("runtime_lock: lock count"); // Speculative grab for lock. v = runtime_xchg(&l->key, MUTEX_LOCKED); if(v == MUTEX_UNLOCKED) return; // wait is either MUTEX_LOCKED or MUTEX_SLEEPING // depending on whether there is a thread sleeping // on this mutex. If we ever change l->key from // MUTEX_SLEEPING to some other value, we must be // careful to change it back to MUTEX_SLEEPING before // returning, to ensure that the sleeping thread gets // its wakeup call. wait = v; // On uniprocessor's, no point spinning. // On multiprocessors, spin for ACTIVE_SPIN attempts. spin = 0; if(runtime_ncpu > 1) spin = ACTIVE_SPIN; for(;;) { // Try for lock, spinning. for(i = 0; i < spin; i++) { while(l->key == MUTEX_UNLOCKED) if(runtime_cas(&l->key, MUTEX_UNLOCKED, wait)) return; runtime_procyield(ACTIVE_SPIN_CNT); } // Try for lock, rescheduling. for(i=0; i < PASSIVE_SPIN; i++) { while(l->key == MUTEX_UNLOCKED) if(runtime_cas(&l->key, MUTEX_UNLOCKED, wait)) return; runtime_osyield(); } // Sleep. v = runtime_xchg(&l->key, MUTEX_SLEEPING); if(v == MUTEX_UNLOCKED) return; wait = MUTEX_SLEEPING; runtime_futexsleep(&l->key, MUTEX_SLEEPING, -1); } }
// Allocate a new span of npage pages from the heap // and record its size class in the HeapMap and HeapMapCache. MSpan* runtime_MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct, int32 zeroed) { MSpan *s; runtime_lock(h); mstats.heap_alloc += runtime_m()->mcache->local_cachealloc; runtime_m()->mcache->local_cachealloc = 0; s = MHeap_AllocLocked(h, npage, sizeclass); if(s != nil) { mstats.heap_inuse += npage<<PageShift; if(acct) { mstats.heap_objects++; mstats.heap_alloc += npage<<PageShift; } } runtime_unlock(h); if(s != nil && *(uintptr*)(s->start<<PageShift) != 0 && zeroed) runtime_memclr((byte*)(s->start<<PageShift), s->npages<<PageShift); return s; }
void syscall_cgocallbackdone () { M *mp; runtime_entersyscall (0); mp = runtime_m (); if (mp->dropextram && mp->ncgo == 0) { mp->dropextram = false; runtime_dropm (); } }
// Free the given defer. // The defer cannot be used after this call. void runtime_freedefer(Defer *d) { P *p; if(d->__special) return; p = runtime_m()->p; d->__next = p->deferpool; p->deferpool = d; // No need to wipe out pointers in argp/pc/fn/args, // because we empty the pool before GC. }
// Called to initialize a new m (including the bootstrap m). // Called on the new thread, can not allocate memory. void runtime_minit(void) { M* m; sigset_t sigs; // Initialize signal handling. m = runtime_m(); runtime_signalstack(m->gsignalstack, m->gsignalstacksize); if (sigemptyset(&sigs) != 0) runtime_throw("sigemptyset"); pthread_sigmask(SIG_SETMASK, &sigs, nil); }
uint32 runtime_fastrand1(void) { M *m; uint32 x; m = runtime_m(); x = m->fastrand; x += x; if(x & 0x80000000L) x ^= 0x88888eefUL; m->fastrand = x; return x; }
void runtime_unlock(Lock *l) { uint32 v; if(--runtime_m()->locks < 0) runtime_throw("runtime_unlock: lock count"); v = runtime_xchg(&l->key, MUTEX_UNLOCKED); if(v == MUTEX_UNLOCKED) runtime_throw("unlock of unlocked lock"); if(v == MUTEX_SLEEPING) runtime_futexwakeup(&l->key, 1); }
void runtime_throw(const char *s) { M *mp; mp = runtime_m(); if(mp->throwing == 0) mp->throwing = 1; runtime_startpanic(); runtime_printf("fatal error: %s\n", s); runtime_dopanic(0); *(int32*)0 = 0; // not reached runtime_exit(1); // even more not reached }
void syscall_cgocall () { M* m; G* g; if (runtime_needextram && runtime_cas (&runtime_needextram, 1, 0)) runtime_newextram (); m = runtime_m (); ++m->ncgocall; g = runtime_g (); ++g->ncgo; runtime_entersyscall (); }
void syscall_cgocall () { M* m; if (runtime_needextram && runtime_cas (&runtime_needextram, 1, 0)) runtime_newextram (); runtime_lockOSThread(); m = runtime_m (); ++m->ncgocall; ++m->ncgo; runtime_entersyscall (0); }
void runtime_notesleep(Note *n) { M *m; m = runtime_m(); if(m->waitsema == 0) m->waitsema = runtime_semacreate(); if(!runtime_casp(&n->waitm, nil, m)) { // must be LOCKED (got wakeup) if(n->waitm != (void*)LOCKED) runtime_throw("notesleep - waitm out of sync"); return; } // Queued. Sleep. runtime_semasleep(-1); }
void runtime_resetcpuprofiler(int32 hz) { struct itimerval it; runtime_memclr((byte*)&it, sizeof it); if(hz == 0) { runtime_setitimer(ITIMER_PROF, &it, nil); } else { it.it_interval.tv_sec = 0; it.it_interval.tv_usec = 1000000 / hz; it.it_value = it.it_interval; runtime_setitimer(ITIMER_PROF, &it, nil); } runtime_m()->profilehz = hz; }
// Allocate a Defer, usually using per-P pool. // Each defer must be released with freedefer. Defer* runtime_newdefer() { Defer *d; P *p; d = nil; p = runtime_m()->p; d = p->deferpool; if(d) p->deferpool = d->__next; if(d == nil) { // deferpool is empty d = runtime_malloc(sizeof(Defer)); } return d; }
void * alloc_saved (size_t n) { void *ret; M *m; CgoMal *c; ret = __go_alloc (n); m = runtime_m (); c = (CgoMal *) __go_alloc (sizeof (CgoMal)); c->next = m->cgomal; c->alloc = ret; m->cgomal = c; return ret; }
// Allocate a new span of npage pages from the heap // and record its size class in the HeapMap and HeapMapCache. MSpan* runtime_MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct) { MSpan *s; runtime_lock(h); runtime_purgecachedstats(runtime_m()); s = MHeap_AllocLocked(h, npage, sizeclass); if(s != nil) { mstats.heap_inuse += npage<<PageShift; if(acct) { mstats.heap_objects++; mstats.heap_alloc += npage<<PageShift; } } runtime_unlock(h); return s; }
static void sig_panic_leadin (int sig) { int i; sigset_t clear; if (runtime_m ()->mallocing) { runtime_printf ("caught signal while mallocing: %d\n", sig); runtime_throw ("caught signal while mallocing"); } /* The signal handler blocked signals; unblock them. */ i = sigfillset (&clear); __go_assert (i == 0); i = sigprocmask (SIG_UNBLOCK, &clear, NULL); __go_assert (i == 0); }